Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018-2023, Intel Corporation. */
3
4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <generated/utsrelease.h>
9#include <linux/crash_dump.h>
10#include "ice.h"
11#include "ice_base.h"
12#include "ice_lib.h"
13#include "ice_fltr.h"
14#include "ice_dcb_lib.h"
15#include "ice_dcb_nl.h"
16#include "devlink/devlink.h"
17#include "devlink/devlink_port.h"
18#include "ice_sf_eth.h"
19#include "ice_hwmon.h"
20/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
21 * ice tracepoint functions. This must be done exactly once across the
22 * ice driver.
23 */
24#define CREATE_TRACE_POINTS
25#include "ice_trace.h"
26#include "ice_eswitch.h"
27#include "ice_tc_lib.h"
28#include "ice_vsi_vlan_ops.h"
29#include <net/xdp_sock_drv.h>
30
31#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
32static const char ice_driver_string[] = DRV_SUMMARY;
33static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
34
35/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
36#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
37#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
38
39MODULE_DESCRIPTION(DRV_SUMMARY);
40MODULE_IMPORT_NS("LIBIE");
41MODULE_LICENSE("GPL v2");
42MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
43
44static int debug = -1;
45module_param(debug, int, 0644);
46#ifndef CONFIG_DYNAMIC_DEBUG
47MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
48#else
49MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
50#endif /* !CONFIG_DYNAMIC_DEBUG */
51
52DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
53EXPORT_SYMBOL(ice_xdp_locking_key);
54
55/**
56 * ice_hw_to_dev - Get device pointer from the hardware structure
57 * @hw: pointer to the device HW structure
58 *
59 * Used to access the device pointer from compilation units which can't easily
60 * include the definition of struct ice_pf without leading to circular header
61 * dependencies.
62 */
63struct device *ice_hw_to_dev(struct ice_hw *hw)
64{
65 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
66
67 return &pf->pdev->dev;
68}
69
70static struct workqueue_struct *ice_wq;
71struct workqueue_struct *ice_lag_wq;
72static const struct net_device_ops ice_netdev_safe_mode_ops;
73static const struct net_device_ops ice_netdev_ops;
74
75static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
76
77static void ice_vsi_release_all(struct ice_pf *pf);
78
79static int ice_rebuild_channels(struct ice_pf *pf);
80static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
81
82static int
83ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
84 void *cb_priv, enum tc_setup_type type, void *type_data,
85 void *data,
86 void (*cleanup)(struct flow_block_cb *block_cb));
87
88bool netif_is_ice(const struct net_device *dev)
89{
90 return dev && (dev->netdev_ops == &ice_netdev_ops ||
91 dev->netdev_ops == &ice_netdev_safe_mode_ops);
92}
93
94/**
95 * ice_get_tx_pending - returns number of Tx descriptors not processed
96 * @ring: the ring of descriptors
97 */
98static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
99{
100 u16 head, tail;
101
102 head = ring->next_to_clean;
103 tail = ring->next_to_use;
104
105 if (head != tail)
106 return (head < tail) ?
107 tail - head : (tail + ring->count - head);
108 return 0;
109}
110
111/**
112 * ice_check_for_hang_subtask - check for and recover hung queues
113 * @pf: pointer to PF struct
114 */
115static void ice_check_for_hang_subtask(struct ice_pf *pf)
116{
117 struct ice_vsi *vsi = NULL;
118 struct ice_hw *hw;
119 unsigned int i;
120 int packets;
121 u32 v;
122
123 ice_for_each_vsi(pf, v)
124 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
125 vsi = pf->vsi[v];
126 break;
127 }
128
129 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
130 return;
131
132 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
133 return;
134
135 hw = &vsi->back->hw;
136
137 ice_for_each_txq(vsi, i) {
138 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
139 struct ice_ring_stats *ring_stats;
140
141 if (!tx_ring)
142 continue;
143 if (ice_ring_ch_enabled(tx_ring))
144 continue;
145
146 ring_stats = tx_ring->ring_stats;
147 if (!ring_stats)
148 continue;
149
150 if (tx_ring->desc) {
151 /* If packet counter has not changed the queue is
152 * likely stalled, so force an interrupt for this
153 * queue.
154 *
155 * prev_pkt would be negative if there was no
156 * pending work.
157 */
158 packets = ring_stats->stats.pkts & INT_MAX;
159 if (ring_stats->tx_stats.prev_pkt == packets) {
160 /* Trigger sw interrupt to revive the queue */
161 ice_trigger_sw_intr(hw, tx_ring->q_vector);
162 continue;
163 }
164
165 /* Memory barrier between read of packet count and call
166 * to ice_get_tx_pending()
167 */
168 smp_rmb();
169 ring_stats->tx_stats.prev_pkt =
170 ice_get_tx_pending(tx_ring) ? packets : -1;
171 }
172 }
173}
174
175/**
176 * ice_init_mac_fltr - Set initial MAC filters
177 * @pf: board private structure
178 *
179 * Set initial set of MAC filters for PF VSI; configure filters for permanent
180 * address and broadcast address. If an error is encountered, netdevice will be
181 * unregistered.
182 */
183static int ice_init_mac_fltr(struct ice_pf *pf)
184{
185 struct ice_vsi *vsi;
186 u8 *perm_addr;
187
188 vsi = ice_get_main_vsi(pf);
189 if (!vsi)
190 return -EINVAL;
191
192 perm_addr = vsi->port_info->mac.perm_addr;
193 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
194}
195
196/**
197 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
198 * @netdev: the net device on which the sync is happening
199 * @addr: MAC address to sync
200 *
201 * This is a callback function which is called by the in kernel device sync
202 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
203 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
204 * MAC filters from the hardware.
205 */
206static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
207{
208 struct ice_netdev_priv *np = netdev_priv(netdev);
209 struct ice_vsi *vsi = np->vsi;
210
211 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
212 ICE_FWD_TO_VSI))
213 return -EINVAL;
214
215 return 0;
216}
217
218/**
219 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
220 * @netdev: the net device on which the unsync is happening
221 * @addr: MAC address to unsync
222 *
223 * This is a callback function which is called by the in kernel device unsync
224 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
225 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
226 * delete the MAC filters from the hardware.
227 */
228static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
229{
230 struct ice_netdev_priv *np = netdev_priv(netdev);
231 struct ice_vsi *vsi = np->vsi;
232
233 /* Under some circumstances, we might receive a request to delete our
234 * own device address from our uc list. Because we store the device
235 * address in the VSI's MAC filter list, we need to ignore such
236 * requests and not delete our device address from this list.
237 */
238 if (ether_addr_equal(addr, netdev->dev_addr))
239 return 0;
240
241 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
242 ICE_FWD_TO_VSI))
243 return -EINVAL;
244
245 return 0;
246}
247
248/**
249 * ice_vsi_fltr_changed - check if filter state changed
250 * @vsi: VSI to be checked
251 *
252 * returns true if filter state has changed, false otherwise.
253 */
254static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
255{
256 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
257 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
258}
259
260/**
261 * ice_set_promisc - Enable promiscuous mode for a given PF
262 * @vsi: the VSI being configured
263 * @promisc_m: mask of promiscuous config bits
264 *
265 */
266static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
267{
268 int status;
269
270 if (vsi->type != ICE_VSI_PF)
271 return 0;
272
273 if (ice_vsi_has_non_zero_vlans(vsi)) {
274 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
275 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
276 promisc_m);
277 } else {
278 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
279 promisc_m, 0);
280 }
281 if (status && status != -EEXIST)
282 return status;
283
284 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
285 vsi->vsi_num, promisc_m);
286 return 0;
287}
288
289/**
290 * ice_clear_promisc - Disable promiscuous mode for a given PF
291 * @vsi: the VSI being configured
292 * @promisc_m: mask of promiscuous config bits
293 *
294 */
295static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
296{
297 int status;
298
299 if (vsi->type != ICE_VSI_PF)
300 return 0;
301
302 if (ice_vsi_has_non_zero_vlans(vsi)) {
303 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
304 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
305 promisc_m);
306 } else {
307 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
308 promisc_m, 0);
309 }
310
311 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
312 vsi->vsi_num, promisc_m);
313 return status;
314}
315
316/**
317 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
318 * @vsi: ptr to the VSI
319 *
320 * Push any outstanding VSI filter changes through the AdminQ.
321 */
322static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
323{
324 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
325 struct device *dev = ice_pf_to_dev(vsi->back);
326 struct net_device *netdev = vsi->netdev;
327 bool promisc_forced_on = false;
328 struct ice_pf *pf = vsi->back;
329 struct ice_hw *hw = &pf->hw;
330 u32 changed_flags = 0;
331 int err;
332
333 if (!vsi->netdev)
334 return -EINVAL;
335
336 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
337 usleep_range(1000, 2000);
338
339 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
340 vsi->current_netdev_flags = vsi->netdev->flags;
341
342 INIT_LIST_HEAD(&vsi->tmp_sync_list);
343 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
344
345 if (ice_vsi_fltr_changed(vsi)) {
346 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
347 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
348
349 /* grab the netdev's addr_list_lock */
350 netif_addr_lock_bh(netdev);
351 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
352 ice_add_mac_to_unsync_list);
353 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
354 ice_add_mac_to_unsync_list);
355 /* our temp lists are populated. release lock */
356 netif_addr_unlock_bh(netdev);
357 }
358
359 /* Remove MAC addresses in the unsync list */
360 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
361 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
362 if (err) {
363 netdev_err(netdev, "Failed to delete MAC filters\n");
364 /* if we failed because of alloc failures, just bail */
365 if (err == -ENOMEM)
366 goto out;
367 }
368
369 /* Add MAC addresses in the sync list */
370 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
371 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
372 /* If filter is added successfully or already exists, do not go into
373 * 'if' condition and report it as error. Instead continue processing
374 * rest of the function.
375 */
376 if (err && err != -EEXIST) {
377 netdev_err(netdev, "Failed to add MAC filters\n");
378 /* If there is no more space for new umac filters, VSI
379 * should go into promiscuous mode. There should be some
380 * space reserved for promiscuous filters.
381 */
382 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
383 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
384 vsi->state)) {
385 promisc_forced_on = true;
386 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
387 vsi->vsi_num);
388 } else {
389 goto out;
390 }
391 }
392 err = 0;
393 /* check for changes in promiscuous modes */
394 if (changed_flags & IFF_ALLMULTI) {
395 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
396 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
397 if (err) {
398 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
399 goto out_promisc;
400 }
401 } else {
402 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
403 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
404 if (err) {
405 vsi->current_netdev_flags |= IFF_ALLMULTI;
406 goto out_promisc;
407 }
408 }
409 }
410
411 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
412 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
413 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
414 if (vsi->current_netdev_flags & IFF_PROMISC) {
415 /* Apply Rx filter rule to get traffic from wire */
416 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
417 err = ice_set_dflt_vsi(vsi);
418 if (err && err != -EEXIST) {
419 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
420 err, vsi->vsi_num);
421 vsi->current_netdev_flags &=
422 ~IFF_PROMISC;
423 goto out_promisc;
424 }
425 err = 0;
426 vlan_ops->dis_rx_filtering(vsi);
427
428 /* promiscuous mode implies allmulticast so
429 * that VSIs that are in promiscuous mode are
430 * subscribed to multicast packets coming to
431 * the port
432 */
433 err = ice_set_promisc(vsi,
434 ICE_MCAST_PROMISC_BITS);
435 if (err)
436 goto out_promisc;
437 }
438 } else {
439 /* Clear Rx filter to remove traffic from wire */
440 if (ice_is_vsi_dflt_vsi(vsi)) {
441 err = ice_clear_dflt_vsi(vsi);
442 if (err) {
443 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
444 err, vsi->vsi_num);
445 vsi->current_netdev_flags |=
446 IFF_PROMISC;
447 goto out_promisc;
448 }
449 if (vsi->netdev->features &
450 NETIF_F_HW_VLAN_CTAG_FILTER)
451 vlan_ops->ena_rx_filtering(vsi);
452 }
453
454 /* disable allmulti here, but only if allmulti is not
455 * still enabled for the netdev
456 */
457 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
458 err = ice_clear_promisc(vsi,
459 ICE_MCAST_PROMISC_BITS);
460 if (err) {
461 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
462 err, vsi->vsi_num);
463 }
464 }
465 }
466 }
467 goto exit;
468
469out_promisc:
470 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
471 goto exit;
472out:
473 /* if something went wrong then set the changed flag so we try again */
474 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
475 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
476exit:
477 clear_bit(ICE_CFG_BUSY, vsi->state);
478 return err;
479}
480
481/**
482 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
483 * @pf: board private structure
484 */
485static void ice_sync_fltr_subtask(struct ice_pf *pf)
486{
487 int v;
488
489 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
490 return;
491
492 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
493
494 ice_for_each_vsi(pf, v)
495 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
496 ice_vsi_sync_fltr(pf->vsi[v])) {
497 /* come back and try again later */
498 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
499 break;
500 }
501}
502
503/**
504 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
505 * @pf: the PF
506 * @locked: is the rtnl_lock already held
507 */
508static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
509{
510 int node;
511 int v;
512
513 ice_for_each_vsi(pf, v)
514 if (pf->vsi[v])
515 ice_dis_vsi(pf->vsi[v], locked);
516
517 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
518 pf->pf_agg_node[node].num_vsis = 0;
519
520 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
521 pf->vf_agg_node[node].num_vsis = 0;
522}
523
524/**
525 * ice_prepare_for_reset - prep for reset
526 * @pf: board private structure
527 * @reset_type: reset type requested
528 *
529 * Inform or close all dependent features in prep for reset.
530 */
531static void
532ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
533{
534 struct ice_hw *hw = &pf->hw;
535 struct ice_vsi *vsi;
536 struct ice_vf *vf;
537 unsigned int bkt;
538
539 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
540
541 /* already prepared for reset */
542 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
543 return;
544
545 synchronize_irq(pf->oicr_irq.virq);
546
547 ice_unplug_aux_dev(pf);
548
549 /* Notify VFs of impending reset */
550 if (ice_check_sq_alive(hw, &hw->mailboxq))
551 ice_vc_notify_reset(pf);
552
553 /* Disable VFs until reset is completed */
554 mutex_lock(&pf->vfs.table_lock);
555 ice_for_each_vf(pf, bkt, vf)
556 ice_set_vf_state_dis(vf);
557 mutex_unlock(&pf->vfs.table_lock);
558
559 if (ice_is_eswitch_mode_switchdev(pf)) {
560 rtnl_lock();
561 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
562 rtnl_unlock();
563 }
564
565 /* release ADQ specific HW and SW resources */
566 vsi = ice_get_main_vsi(pf);
567 if (!vsi)
568 goto skip;
569
570 /* to be on safe side, reset orig_rss_size so that normal flow
571 * of deciding rss_size can take precedence
572 */
573 vsi->orig_rss_size = 0;
574
575 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
576 if (reset_type == ICE_RESET_PFR) {
577 vsi->old_ena_tc = vsi->all_enatc;
578 vsi->old_numtc = vsi->all_numtc;
579 } else {
580 ice_remove_q_channels(vsi, true);
581
582 /* for other reset type, do not support channel rebuild
583 * hence reset needed info
584 */
585 vsi->old_ena_tc = 0;
586 vsi->all_enatc = 0;
587 vsi->old_numtc = 0;
588 vsi->all_numtc = 0;
589 vsi->req_txq = 0;
590 vsi->req_rxq = 0;
591 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
592 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
593 }
594 }
595
596 if (vsi->netdev)
597 netif_device_detach(vsi->netdev);
598skip:
599
600 /* clear SW filtering DB */
601 ice_clear_hw_tbls(hw);
602 /* disable the VSIs and their queues that are not already DOWN */
603 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
604 ice_pf_dis_all_vsi(pf, false);
605
606 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
607 ice_ptp_prepare_for_reset(pf, reset_type);
608
609 if (ice_is_feature_supported(pf, ICE_F_GNSS))
610 ice_gnss_exit(pf);
611
612 if (hw->port_info)
613 ice_sched_clear_port(hw->port_info);
614
615 ice_shutdown_all_ctrlq(hw, false);
616
617 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
618}
619
620/**
621 * ice_do_reset - Initiate one of many types of resets
622 * @pf: board private structure
623 * @reset_type: reset type requested before this function was called.
624 */
625static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
626{
627 struct device *dev = ice_pf_to_dev(pf);
628 struct ice_hw *hw = &pf->hw;
629
630 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
631
632 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
633 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
634 reset_type = ICE_RESET_CORER;
635 }
636
637 ice_prepare_for_reset(pf, reset_type);
638
639 /* trigger the reset */
640 if (ice_reset(hw, reset_type)) {
641 dev_err(dev, "reset %d failed\n", reset_type);
642 set_bit(ICE_RESET_FAILED, pf->state);
643 clear_bit(ICE_RESET_OICR_RECV, pf->state);
644 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
645 clear_bit(ICE_PFR_REQ, pf->state);
646 clear_bit(ICE_CORER_REQ, pf->state);
647 clear_bit(ICE_GLOBR_REQ, pf->state);
648 wake_up(&pf->reset_wait_queue);
649 return;
650 }
651
652 /* PFR is a bit of a special case because it doesn't result in an OICR
653 * interrupt. So for PFR, rebuild after the reset and clear the reset-
654 * associated state bits.
655 */
656 if (reset_type == ICE_RESET_PFR) {
657 pf->pfr_count++;
658 ice_rebuild(pf, reset_type);
659 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
660 clear_bit(ICE_PFR_REQ, pf->state);
661 wake_up(&pf->reset_wait_queue);
662 ice_reset_all_vfs(pf);
663 }
664}
665
666/**
667 * ice_reset_subtask - Set up for resetting the device and driver
668 * @pf: board private structure
669 */
670static void ice_reset_subtask(struct ice_pf *pf)
671{
672 enum ice_reset_req reset_type = ICE_RESET_INVAL;
673
674 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
675 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
676 * of reset is pending and sets bits in pf->state indicating the reset
677 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
678 * prepare for pending reset if not already (for PF software-initiated
679 * global resets the software should already be prepared for it as
680 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
681 * by firmware or software on other PFs, that bit is not set so prepare
682 * for the reset now), poll for reset done, rebuild and return.
683 */
684 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
685 /* Perform the largest reset requested */
686 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
687 reset_type = ICE_RESET_CORER;
688 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
689 reset_type = ICE_RESET_GLOBR;
690 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
691 reset_type = ICE_RESET_EMPR;
692 /* return if no valid reset type requested */
693 if (reset_type == ICE_RESET_INVAL)
694 return;
695 ice_prepare_for_reset(pf, reset_type);
696
697 /* make sure we are ready to rebuild */
698 if (ice_check_reset(&pf->hw)) {
699 set_bit(ICE_RESET_FAILED, pf->state);
700 } else {
701 /* done with reset. start rebuild */
702 pf->hw.reset_ongoing = false;
703 ice_rebuild(pf, reset_type);
704 /* clear bit to resume normal operations, but
705 * ICE_NEEDS_RESTART bit is set in case rebuild failed
706 */
707 clear_bit(ICE_RESET_OICR_RECV, pf->state);
708 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
709 clear_bit(ICE_PFR_REQ, pf->state);
710 clear_bit(ICE_CORER_REQ, pf->state);
711 clear_bit(ICE_GLOBR_REQ, pf->state);
712 wake_up(&pf->reset_wait_queue);
713 ice_reset_all_vfs(pf);
714 }
715
716 return;
717 }
718
719 /* No pending resets to finish processing. Check for new resets */
720 if (test_bit(ICE_PFR_REQ, pf->state)) {
721 reset_type = ICE_RESET_PFR;
722 if (pf->lag && pf->lag->bonded) {
723 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
724 reset_type = ICE_RESET_CORER;
725 }
726 }
727 if (test_bit(ICE_CORER_REQ, pf->state))
728 reset_type = ICE_RESET_CORER;
729 if (test_bit(ICE_GLOBR_REQ, pf->state))
730 reset_type = ICE_RESET_GLOBR;
731 /* If no valid reset type requested just return */
732 if (reset_type == ICE_RESET_INVAL)
733 return;
734
735 /* reset if not already down or busy */
736 if (!test_bit(ICE_DOWN, pf->state) &&
737 !test_bit(ICE_CFG_BUSY, pf->state)) {
738 ice_do_reset(pf, reset_type);
739 }
740}
741
742/**
743 * ice_print_topo_conflict - print topology conflict message
744 * @vsi: the VSI whose topology status is being checked
745 */
746static void ice_print_topo_conflict(struct ice_vsi *vsi)
747{
748 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
749 case ICE_AQ_LINK_TOPO_CONFLICT:
750 case ICE_AQ_LINK_MEDIA_CONFLICT:
751 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
752 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
753 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
754 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
755 break;
756 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
757 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
758 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
759 else
760 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
761 break;
762 default:
763 break;
764 }
765}
766
767/**
768 * ice_print_link_msg - print link up or down message
769 * @vsi: the VSI whose link status is being queried
770 * @isup: boolean for if the link is now up or down
771 */
772void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
773{
774 struct ice_aqc_get_phy_caps_data *caps;
775 const char *an_advertised;
776 const char *fec_req;
777 const char *speed;
778 const char *fec;
779 const char *fc;
780 const char *an;
781 int status;
782
783 if (!vsi)
784 return;
785
786 if (vsi->current_isup == isup)
787 return;
788
789 vsi->current_isup = isup;
790
791 if (!isup) {
792 netdev_info(vsi->netdev, "NIC Link is Down\n");
793 return;
794 }
795
796 switch (vsi->port_info->phy.link_info.link_speed) {
797 case ICE_AQ_LINK_SPEED_200GB:
798 speed = "200 G";
799 break;
800 case ICE_AQ_LINK_SPEED_100GB:
801 speed = "100 G";
802 break;
803 case ICE_AQ_LINK_SPEED_50GB:
804 speed = "50 G";
805 break;
806 case ICE_AQ_LINK_SPEED_40GB:
807 speed = "40 G";
808 break;
809 case ICE_AQ_LINK_SPEED_25GB:
810 speed = "25 G";
811 break;
812 case ICE_AQ_LINK_SPEED_20GB:
813 speed = "20 G";
814 break;
815 case ICE_AQ_LINK_SPEED_10GB:
816 speed = "10 G";
817 break;
818 case ICE_AQ_LINK_SPEED_5GB:
819 speed = "5 G";
820 break;
821 case ICE_AQ_LINK_SPEED_2500MB:
822 speed = "2.5 G";
823 break;
824 case ICE_AQ_LINK_SPEED_1000MB:
825 speed = "1 G";
826 break;
827 case ICE_AQ_LINK_SPEED_100MB:
828 speed = "100 M";
829 break;
830 default:
831 speed = "Unknown ";
832 break;
833 }
834
835 switch (vsi->port_info->fc.current_mode) {
836 case ICE_FC_FULL:
837 fc = "Rx/Tx";
838 break;
839 case ICE_FC_TX_PAUSE:
840 fc = "Tx";
841 break;
842 case ICE_FC_RX_PAUSE:
843 fc = "Rx";
844 break;
845 case ICE_FC_NONE:
846 fc = "None";
847 break;
848 default:
849 fc = "Unknown";
850 break;
851 }
852
853 /* Get FEC mode based on negotiated link info */
854 switch (vsi->port_info->phy.link_info.fec_info) {
855 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
856 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
857 fec = "RS-FEC";
858 break;
859 case ICE_AQ_LINK_25G_KR_FEC_EN:
860 fec = "FC-FEC/BASE-R";
861 break;
862 default:
863 fec = "NONE";
864 break;
865 }
866
867 /* check if autoneg completed, might be false due to not supported */
868 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
869 an = "True";
870 else
871 an = "False";
872
873 /* Get FEC mode requested based on PHY caps last SW configuration */
874 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
875 if (!caps) {
876 fec_req = "Unknown";
877 an_advertised = "Unknown";
878 goto done;
879 }
880
881 status = ice_aq_get_phy_caps(vsi->port_info, false,
882 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
883 if (status)
884 netdev_info(vsi->netdev, "Get phy capability failed.\n");
885
886 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
887
888 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
889 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
890 fec_req = "RS-FEC";
891 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
892 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
893 fec_req = "FC-FEC/BASE-R";
894 else
895 fec_req = "NONE";
896
897 kfree(caps);
898
899done:
900 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
901 speed, fec_req, fec, an_advertised, an, fc);
902 ice_print_topo_conflict(vsi);
903}
904
905/**
906 * ice_vsi_link_event - update the VSI's netdev
907 * @vsi: the VSI on which the link event occurred
908 * @link_up: whether or not the VSI needs to be set up or down
909 */
910static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
911{
912 if (!vsi)
913 return;
914
915 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
916 return;
917
918 if (vsi->type == ICE_VSI_PF) {
919 if (link_up == netif_carrier_ok(vsi->netdev))
920 return;
921
922 if (link_up) {
923 netif_carrier_on(vsi->netdev);
924 netif_tx_wake_all_queues(vsi->netdev);
925 } else {
926 netif_carrier_off(vsi->netdev);
927 netif_tx_stop_all_queues(vsi->netdev);
928 }
929 }
930}
931
932/**
933 * ice_set_dflt_mib - send a default config MIB to the FW
934 * @pf: private PF struct
935 *
936 * This function sends a default configuration MIB to the FW.
937 *
938 * If this function errors out at any point, the driver is still able to
939 * function. The main impact is that LFC may not operate as expected.
940 * Therefore an error state in this function should be treated with a DBG
941 * message and continue on with driver rebuild/reenable.
942 */
943static void ice_set_dflt_mib(struct ice_pf *pf)
944{
945 struct device *dev = ice_pf_to_dev(pf);
946 u8 mib_type, *buf, *lldpmib = NULL;
947 u16 len, typelen, offset = 0;
948 struct ice_lldp_org_tlv *tlv;
949 struct ice_hw *hw = &pf->hw;
950 u32 ouisubtype;
951
952 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
953 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
954 if (!lldpmib) {
955 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
956 __func__);
957 return;
958 }
959
960 /* Add ETS CFG TLV */
961 tlv = (struct ice_lldp_org_tlv *)lldpmib;
962 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
963 ICE_IEEE_ETS_TLV_LEN);
964 tlv->typelen = htons(typelen);
965 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
966 ICE_IEEE_SUBTYPE_ETS_CFG);
967 tlv->ouisubtype = htonl(ouisubtype);
968
969 buf = tlv->tlvinfo;
970 buf[0] = 0;
971
972 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
973 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
974 * Octets 13 - 20 are TSA values - leave as zeros
975 */
976 buf[5] = 0x64;
977 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
978 offset += len + 2;
979 tlv = (struct ice_lldp_org_tlv *)
980 ((char *)tlv + sizeof(tlv->typelen) + len);
981
982 /* Add ETS REC TLV */
983 buf = tlv->tlvinfo;
984 tlv->typelen = htons(typelen);
985
986 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
987 ICE_IEEE_SUBTYPE_ETS_REC);
988 tlv->ouisubtype = htonl(ouisubtype);
989
990 /* First octet of buf is reserved
991 * Octets 1 - 4 map UP to TC - all UPs map to zero
992 * Octets 5 - 12 are BW values - set TC 0 to 100%.
993 * Octets 13 - 20 are TSA value - leave as zeros
994 */
995 buf[5] = 0x64;
996 offset += len + 2;
997 tlv = (struct ice_lldp_org_tlv *)
998 ((char *)tlv + sizeof(tlv->typelen) + len);
999
1000 /* Add PFC CFG TLV */
1001 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1002 ICE_IEEE_PFC_TLV_LEN);
1003 tlv->typelen = htons(typelen);
1004
1005 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1006 ICE_IEEE_SUBTYPE_PFC_CFG);
1007 tlv->ouisubtype = htonl(ouisubtype);
1008
1009 /* Octet 1 left as all zeros - PFC disabled */
1010 buf[0] = 0x08;
1011 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1012 offset += len + 2;
1013
1014 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1015 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1016
1017 kfree(lldpmib);
1018}
1019
1020/**
1021 * ice_check_phy_fw_load - check if PHY FW load failed
1022 * @pf: pointer to PF struct
1023 * @link_cfg_err: bitmap from the link info structure
1024 *
1025 * check if external PHY FW load failed and print an error message if it did
1026 */
1027static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1028{
1029 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1030 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1031 return;
1032 }
1033
1034 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1035 return;
1036
1037 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1038 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1039 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1040 }
1041}
1042
1043/**
1044 * ice_check_module_power
1045 * @pf: pointer to PF struct
1046 * @link_cfg_err: bitmap from the link info structure
1047 *
1048 * check module power level returned by a previous call to aq_get_link_info
1049 * and print error messages if module power level is not supported
1050 */
1051static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1052{
1053 /* if module power level is supported, clear the flag */
1054 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1055 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1056 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1057 return;
1058 }
1059
1060 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1061 * above block didn't clear this bit, there's nothing to do
1062 */
1063 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1064 return;
1065
1066 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1067 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1068 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1069 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1070 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1071 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1072 }
1073}
1074
1075/**
1076 * ice_check_link_cfg_err - check if link configuration failed
1077 * @pf: pointer to the PF struct
1078 * @link_cfg_err: bitmap from the link info structure
1079 *
1080 * print if any link configuration failure happens due to the value in the
1081 * link_cfg_err parameter in the link info structure
1082 */
1083static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1084{
1085 ice_check_module_power(pf, link_cfg_err);
1086 ice_check_phy_fw_load(pf, link_cfg_err);
1087}
1088
1089/**
1090 * ice_link_event - process the link event
1091 * @pf: PF that the link event is associated with
1092 * @pi: port_info for the port that the link event is associated with
1093 * @link_up: true if the physical link is up and false if it is down
1094 * @link_speed: current link speed received from the link event
1095 *
1096 * Returns 0 on success and negative on failure
1097 */
1098static int
1099ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1100 u16 link_speed)
1101{
1102 struct device *dev = ice_pf_to_dev(pf);
1103 struct ice_phy_info *phy_info;
1104 struct ice_vsi *vsi;
1105 u16 old_link_speed;
1106 bool old_link;
1107 int status;
1108
1109 phy_info = &pi->phy;
1110 phy_info->link_info_old = phy_info->link_info;
1111
1112 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1113 old_link_speed = phy_info->link_info_old.link_speed;
1114
1115 /* update the link info structures and re-enable link events,
1116 * don't bail on failure due to other book keeping needed
1117 */
1118 status = ice_update_link_info(pi);
1119 if (status)
1120 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1121 pi->lport, status,
1122 ice_aq_str(pi->hw->adminq.sq_last_status));
1123
1124 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1125
1126 /* Check if the link state is up after updating link info, and treat
1127 * this event as an UP event since the link is actually UP now.
1128 */
1129 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1130 link_up = true;
1131
1132 vsi = ice_get_main_vsi(pf);
1133 if (!vsi || !vsi->port_info)
1134 return -EINVAL;
1135
1136 /* turn off PHY if media was removed */
1137 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1138 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1139 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1140 ice_set_link(vsi, false);
1141 }
1142
1143 /* if the old link up/down and speed is the same as the new */
1144 if (link_up == old_link && link_speed == old_link_speed)
1145 return 0;
1146
1147 ice_ptp_link_change(pf, link_up);
1148
1149 if (ice_is_dcb_active(pf)) {
1150 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1151 ice_dcb_rebuild(pf);
1152 } else {
1153 if (link_up)
1154 ice_set_dflt_mib(pf);
1155 }
1156 ice_vsi_link_event(vsi, link_up);
1157 ice_print_link_msg(vsi, link_up);
1158
1159 ice_vc_notify_link_state(pf);
1160
1161 return 0;
1162}
1163
1164/**
1165 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1166 * @pf: board private structure
1167 */
1168static void ice_watchdog_subtask(struct ice_pf *pf)
1169{
1170 int i;
1171
1172 /* if interface is down do nothing */
1173 if (test_bit(ICE_DOWN, pf->state) ||
1174 test_bit(ICE_CFG_BUSY, pf->state))
1175 return;
1176
1177 /* make sure we don't do these things too often */
1178 if (time_before(jiffies,
1179 pf->serv_tmr_prev + pf->serv_tmr_period))
1180 return;
1181
1182 pf->serv_tmr_prev = jiffies;
1183
1184 /* Update the stats for active netdevs so the network stack
1185 * can look at updated numbers whenever it cares to
1186 */
1187 ice_update_pf_stats(pf);
1188 ice_for_each_vsi(pf, i)
1189 if (pf->vsi[i] && pf->vsi[i]->netdev)
1190 ice_update_vsi_stats(pf->vsi[i]);
1191}
1192
1193/**
1194 * ice_init_link_events - enable/initialize link events
1195 * @pi: pointer to the port_info instance
1196 *
1197 * Returns -EIO on failure, 0 on success
1198 */
1199static int ice_init_link_events(struct ice_port_info *pi)
1200{
1201 u16 mask;
1202
1203 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1204 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1205 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1206
1207 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1208 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1209 pi->lport);
1210 return -EIO;
1211 }
1212
1213 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1214 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1215 pi->lport);
1216 return -EIO;
1217 }
1218
1219 return 0;
1220}
1221
1222/**
1223 * ice_handle_link_event - handle link event via ARQ
1224 * @pf: PF that the link event is associated with
1225 * @event: event structure containing link status info
1226 */
1227static int
1228ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1229{
1230 struct ice_aqc_get_link_status_data *link_data;
1231 struct ice_port_info *port_info;
1232 int status;
1233
1234 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1235 port_info = pf->hw.port_info;
1236 if (!port_info)
1237 return -EINVAL;
1238
1239 status = ice_link_event(pf, port_info,
1240 !!(link_data->link_info & ICE_AQ_LINK_UP),
1241 le16_to_cpu(link_data->link_speed));
1242 if (status)
1243 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1244 status);
1245
1246 return status;
1247}
1248
1249/**
1250 * ice_get_fwlog_data - copy the FW log data from ARQ event
1251 * @pf: PF that the FW log event is associated with
1252 * @event: event structure containing FW log data
1253 */
1254static void
1255ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1256{
1257 struct ice_fwlog_data *fwlog;
1258 struct ice_hw *hw = &pf->hw;
1259
1260 fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1261
1262 memset(fwlog->data, 0, PAGE_SIZE);
1263 fwlog->data_size = le16_to_cpu(event->desc.datalen);
1264
1265 memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1266 ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1267
1268 if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1269 /* the rings are full so bump the head to create room */
1270 ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1271 hw->fwlog_ring.size);
1272 }
1273}
1274
1275/**
1276 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1277 * @pf: pointer to the PF private structure
1278 * @task: intermediate helper storage and identifier for waiting
1279 * @opcode: the opcode to wait for
1280 *
1281 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1282 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1283 *
1284 * Calls are separated to allow caller registering for event before sending
1285 * the command, which mitigates a race between registering and FW responding.
1286 *
1287 * To obtain only the descriptor contents, pass an task->event with null
1288 * msg_buf. If the complete data buffer is desired, allocate the
1289 * task->event.msg_buf with enough space ahead of time.
1290 */
1291void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1292 u16 opcode)
1293{
1294 INIT_HLIST_NODE(&task->entry);
1295 task->opcode = opcode;
1296 task->state = ICE_AQ_TASK_WAITING;
1297
1298 spin_lock_bh(&pf->aq_wait_lock);
1299 hlist_add_head(&task->entry, &pf->aq_wait_list);
1300 spin_unlock_bh(&pf->aq_wait_lock);
1301}
1302
1303/**
1304 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1305 * @pf: pointer to the PF private structure
1306 * @task: ptr prepared by ice_aq_prep_for_event()
1307 * @timeout: how long to wait, in jiffies
1308 *
1309 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1310 * current thread will be put to sleep until the specified event occurs or
1311 * until the given timeout is reached.
1312 *
1313 * Returns: zero on success, or a negative error code on failure.
1314 */
1315int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1316 unsigned long timeout)
1317{
1318 enum ice_aq_task_state *state = &task->state;
1319 struct device *dev = ice_pf_to_dev(pf);
1320 unsigned long start = jiffies;
1321 long ret;
1322 int err;
1323
1324 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1325 *state != ICE_AQ_TASK_WAITING,
1326 timeout);
1327 switch (*state) {
1328 case ICE_AQ_TASK_NOT_PREPARED:
1329 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1330 err = -EINVAL;
1331 break;
1332 case ICE_AQ_TASK_WAITING:
1333 err = ret < 0 ? ret : -ETIMEDOUT;
1334 break;
1335 case ICE_AQ_TASK_CANCELED:
1336 err = ret < 0 ? ret : -ECANCELED;
1337 break;
1338 case ICE_AQ_TASK_COMPLETE:
1339 err = ret < 0 ? ret : 0;
1340 break;
1341 default:
1342 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1343 err = -EINVAL;
1344 break;
1345 }
1346
1347 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1348 jiffies_to_msecs(jiffies - start),
1349 jiffies_to_msecs(timeout),
1350 task->opcode);
1351
1352 spin_lock_bh(&pf->aq_wait_lock);
1353 hlist_del(&task->entry);
1354 spin_unlock_bh(&pf->aq_wait_lock);
1355
1356 return err;
1357}
1358
1359/**
1360 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1361 * @pf: pointer to the PF private structure
1362 * @opcode: the opcode of the event
1363 * @event: the event to check
1364 *
1365 * Loops over the current list of pending threads waiting for an AdminQ event.
1366 * For each matching task, copy the contents of the event into the task
1367 * structure and wake up the thread.
1368 *
1369 * If multiple threads wait for the same opcode, they will all be woken up.
1370 *
1371 * Note that event->msg_buf will only be duplicated if the event has a buffer
1372 * with enough space already allocated. Otherwise, only the descriptor and
1373 * message length will be copied.
1374 *
1375 * Returns: true if an event was found, false otherwise
1376 */
1377static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1378 struct ice_rq_event_info *event)
1379{
1380 struct ice_rq_event_info *task_ev;
1381 struct ice_aq_task *task;
1382 bool found = false;
1383
1384 spin_lock_bh(&pf->aq_wait_lock);
1385 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1386 if (task->state != ICE_AQ_TASK_WAITING)
1387 continue;
1388 if (task->opcode != opcode)
1389 continue;
1390
1391 task_ev = &task->event;
1392 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1393 task_ev->msg_len = event->msg_len;
1394
1395 /* Only copy the data buffer if a destination was set */
1396 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1397 memcpy(task_ev->msg_buf, event->msg_buf,
1398 event->buf_len);
1399 task_ev->buf_len = event->buf_len;
1400 }
1401
1402 task->state = ICE_AQ_TASK_COMPLETE;
1403 found = true;
1404 }
1405 spin_unlock_bh(&pf->aq_wait_lock);
1406
1407 if (found)
1408 wake_up(&pf->aq_wait_queue);
1409}
1410
1411/**
1412 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1413 * @pf: the PF private structure
1414 *
1415 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1416 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1417 */
1418static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1419{
1420 struct ice_aq_task *task;
1421
1422 spin_lock_bh(&pf->aq_wait_lock);
1423 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1424 task->state = ICE_AQ_TASK_CANCELED;
1425 spin_unlock_bh(&pf->aq_wait_lock);
1426
1427 wake_up(&pf->aq_wait_queue);
1428}
1429
1430#define ICE_MBX_OVERFLOW_WATERMARK 64
1431
1432/**
1433 * __ice_clean_ctrlq - helper function to clean controlq rings
1434 * @pf: ptr to struct ice_pf
1435 * @q_type: specific Control queue type
1436 */
1437static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1438{
1439 struct device *dev = ice_pf_to_dev(pf);
1440 struct ice_rq_event_info event;
1441 struct ice_hw *hw = &pf->hw;
1442 struct ice_ctl_q_info *cq;
1443 u16 pending, i = 0;
1444 const char *qtype;
1445 u32 oldval, val;
1446
1447 /* Do not clean control queue if/when PF reset fails */
1448 if (test_bit(ICE_RESET_FAILED, pf->state))
1449 return 0;
1450
1451 switch (q_type) {
1452 case ICE_CTL_Q_ADMIN:
1453 cq = &hw->adminq;
1454 qtype = "Admin";
1455 break;
1456 case ICE_CTL_Q_SB:
1457 cq = &hw->sbq;
1458 qtype = "Sideband";
1459 break;
1460 case ICE_CTL_Q_MAILBOX:
1461 cq = &hw->mailboxq;
1462 qtype = "Mailbox";
1463 /* we are going to try to detect a malicious VF, so set the
1464 * state to begin detection
1465 */
1466 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1467 break;
1468 default:
1469 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1470 return 0;
1471 }
1472
1473 /* check for error indications - PF_xx_AxQLEN register layout for
1474 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1475 */
1476 val = rd32(hw, cq->rq.len);
1477 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1478 PF_FW_ARQLEN_ARQCRIT_M)) {
1479 oldval = val;
1480 if (val & PF_FW_ARQLEN_ARQVFE_M)
1481 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1482 qtype);
1483 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1484 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1485 qtype);
1486 }
1487 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1488 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1489 qtype);
1490 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1491 PF_FW_ARQLEN_ARQCRIT_M);
1492 if (oldval != val)
1493 wr32(hw, cq->rq.len, val);
1494 }
1495
1496 val = rd32(hw, cq->sq.len);
1497 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1498 PF_FW_ATQLEN_ATQCRIT_M)) {
1499 oldval = val;
1500 if (val & PF_FW_ATQLEN_ATQVFE_M)
1501 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1502 qtype);
1503 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1504 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1505 qtype);
1506 }
1507 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1508 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1509 qtype);
1510 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1511 PF_FW_ATQLEN_ATQCRIT_M);
1512 if (oldval != val)
1513 wr32(hw, cq->sq.len, val);
1514 }
1515
1516 event.buf_len = cq->rq_buf_size;
1517 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1518 if (!event.msg_buf)
1519 return 0;
1520
1521 do {
1522 struct ice_mbx_data data = {};
1523 u16 opcode;
1524 int ret;
1525
1526 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1527 if (ret == -EALREADY)
1528 break;
1529 if (ret) {
1530 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1531 ret);
1532 break;
1533 }
1534
1535 opcode = le16_to_cpu(event.desc.opcode);
1536
1537 /* Notify any thread that might be waiting for this event */
1538 ice_aq_check_events(pf, opcode, &event);
1539
1540 switch (opcode) {
1541 case ice_aqc_opc_get_link_status:
1542 if (ice_handle_link_event(pf, &event))
1543 dev_err(dev, "Could not handle link event\n");
1544 break;
1545 case ice_aqc_opc_event_lan_overflow:
1546 ice_vf_lan_overflow_event(pf, &event);
1547 break;
1548 case ice_mbx_opc_send_msg_to_pf:
1549 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
1550 ice_vc_process_vf_msg(pf, &event, NULL);
1551 ice_mbx_vf_dec_trig_e830(hw, &event);
1552 } else {
1553 u16 val = hw->mailboxq.num_rq_entries;
1554
1555 data.max_num_msgs_mbx = val;
1556 val = ICE_MBX_OVERFLOW_WATERMARK;
1557 data.async_watermark_val = val;
1558 data.num_msg_proc = i;
1559 data.num_pending_arq = pending;
1560
1561 ice_vc_process_vf_msg(pf, &event, &data);
1562 }
1563 break;
1564 case ice_aqc_opc_fw_logs_event:
1565 ice_get_fwlog_data(pf, &event);
1566 break;
1567 case ice_aqc_opc_lldp_set_mib_change:
1568 ice_dcb_process_lldp_set_mib_change(pf, &event);
1569 break;
1570 default:
1571 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1572 qtype, opcode);
1573 break;
1574 }
1575 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1576
1577 kfree(event.msg_buf);
1578
1579 return pending && (i == ICE_DFLT_IRQ_WORK);
1580}
1581
1582/**
1583 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1584 * @hw: pointer to hardware info
1585 * @cq: control queue information
1586 *
1587 * returns true if there are pending messages in a queue, false if there aren't
1588 */
1589static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1590{
1591 u16 ntu;
1592
1593 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1594 return cq->rq.next_to_clean != ntu;
1595}
1596
1597/**
1598 * ice_clean_adminq_subtask - clean the AdminQ rings
1599 * @pf: board private structure
1600 */
1601static void ice_clean_adminq_subtask(struct ice_pf *pf)
1602{
1603 struct ice_hw *hw = &pf->hw;
1604
1605 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1606 return;
1607
1608 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1609 return;
1610
1611 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1612
1613 /* There might be a situation where new messages arrive to a control
1614 * queue between processing the last message and clearing the
1615 * EVENT_PENDING bit. So before exiting, check queue head again (using
1616 * ice_ctrlq_pending) and process new messages if any.
1617 */
1618 if (ice_ctrlq_pending(hw, &hw->adminq))
1619 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1620
1621 ice_flush(hw);
1622}
1623
1624/**
1625 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1626 * @pf: board private structure
1627 */
1628static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1629{
1630 struct ice_hw *hw = &pf->hw;
1631
1632 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1633 return;
1634
1635 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1636 return;
1637
1638 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1639
1640 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1641 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1642
1643 ice_flush(hw);
1644}
1645
1646/**
1647 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1648 * @pf: board private structure
1649 */
1650static void ice_clean_sbq_subtask(struct ice_pf *pf)
1651{
1652 struct ice_hw *hw = &pf->hw;
1653
1654 /* if mac_type is not generic, sideband is not supported
1655 * and there's nothing to do here
1656 */
1657 if (!ice_is_generic_mac(hw)) {
1658 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1659 return;
1660 }
1661
1662 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1663 return;
1664
1665 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1666 return;
1667
1668 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1669
1670 if (ice_ctrlq_pending(hw, &hw->sbq))
1671 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1672
1673 ice_flush(hw);
1674}
1675
1676/**
1677 * ice_service_task_schedule - schedule the service task to wake up
1678 * @pf: board private structure
1679 *
1680 * If not already scheduled, this puts the task into the work queue.
1681 */
1682void ice_service_task_schedule(struct ice_pf *pf)
1683{
1684 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1685 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1686 !test_bit(ICE_NEEDS_RESTART, pf->state))
1687 queue_work(ice_wq, &pf->serv_task);
1688}
1689
1690/**
1691 * ice_service_task_complete - finish up the service task
1692 * @pf: board private structure
1693 */
1694static void ice_service_task_complete(struct ice_pf *pf)
1695{
1696 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1697
1698 /* force memory (pf->state) to sync before next service task */
1699 smp_mb__before_atomic();
1700 clear_bit(ICE_SERVICE_SCHED, pf->state);
1701}
1702
1703/**
1704 * ice_service_task_stop - stop service task and cancel works
1705 * @pf: board private structure
1706 *
1707 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1708 * 1 otherwise.
1709 */
1710static int ice_service_task_stop(struct ice_pf *pf)
1711{
1712 int ret;
1713
1714 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1715
1716 if (pf->serv_tmr.function)
1717 del_timer_sync(&pf->serv_tmr);
1718 if (pf->serv_task.func)
1719 cancel_work_sync(&pf->serv_task);
1720
1721 clear_bit(ICE_SERVICE_SCHED, pf->state);
1722 return ret;
1723}
1724
1725/**
1726 * ice_service_task_restart - restart service task and schedule works
1727 * @pf: board private structure
1728 *
1729 * This function is needed for suspend and resume works (e.g WoL scenario)
1730 */
1731static void ice_service_task_restart(struct ice_pf *pf)
1732{
1733 clear_bit(ICE_SERVICE_DIS, pf->state);
1734 ice_service_task_schedule(pf);
1735}
1736
1737/**
1738 * ice_service_timer - timer callback to schedule service task
1739 * @t: pointer to timer_list
1740 */
1741static void ice_service_timer(struct timer_list *t)
1742{
1743 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1744
1745 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1746 ice_service_task_schedule(pf);
1747}
1748
1749/**
1750 * ice_mdd_maybe_reset_vf - reset VF after MDD event
1751 * @pf: pointer to the PF structure
1752 * @vf: pointer to the VF structure
1753 * @reset_vf_tx: whether Tx MDD has occurred
1754 * @reset_vf_rx: whether Rx MDD has occurred
1755 *
1756 * Since the queue can get stuck on VF MDD events, the PF can be configured to
1757 * automatically reset the VF by enabling the private ethtool flag
1758 * mdd-auto-reset-vf.
1759 */
1760static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1761 bool reset_vf_tx, bool reset_vf_rx)
1762{
1763 struct device *dev = ice_pf_to_dev(pf);
1764
1765 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1766 return;
1767
1768 /* VF MDD event counters will be cleared by reset, so print the event
1769 * prior to reset.
1770 */
1771 if (reset_vf_tx)
1772 ice_print_vf_tx_mdd_event(vf);
1773
1774 if (reset_vf_rx)
1775 ice_print_vf_rx_mdd_event(vf);
1776
1777 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1778 pf->hw.pf_id, vf->vf_id);
1779 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1780}
1781
1782/**
1783 * ice_handle_mdd_event - handle malicious driver detect event
1784 * @pf: pointer to the PF structure
1785 *
1786 * Called from service task. OICR interrupt handler indicates MDD event.
1787 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1788 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1789 * disable the queue, the PF can be configured to reset the VF using ethtool
1790 * private flag mdd-auto-reset-vf.
1791 */
1792static void ice_handle_mdd_event(struct ice_pf *pf)
1793{
1794 struct device *dev = ice_pf_to_dev(pf);
1795 struct ice_hw *hw = &pf->hw;
1796 struct ice_vf *vf;
1797 unsigned int bkt;
1798 u32 reg;
1799
1800 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1801 /* Since the VF MDD event logging is rate limited, check if
1802 * there are pending MDD events.
1803 */
1804 ice_print_vfs_mdd_events(pf);
1805 return;
1806 }
1807
1808 /* find what triggered an MDD event */
1809 reg = rd32(hw, GL_MDET_TX_PQM);
1810 if (reg & GL_MDET_TX_PQM_VALID_M) {
1811 u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1812 u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1813 u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1814 u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
1815
1816 if (netif_msg_tx_err(pf))
1817 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1818 event, queue, pf_num, vf_num);
1819 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1820 }
1821
1822 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1823 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1824 u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1825 u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1826 u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1827 u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
1828
1829 if (netif_msg_tx_err(pf))
1830 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1831 event, queue, pf_num, vf_num);
1832 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1833 }
1834
1835 reg = rd32(hw, GL_MDET_RX);
1836 if (reg & GL_MDET_RX_VALID_M) {
1837 u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1838 u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1839 u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1840 u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
1841
1842 if (netif_msg_rx_err(pf))
1843 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1844 event, queue, pf_num, vf_num);
1845 wr32(hw, GL_MDET_RX, 0xffffffff);
1846 }
1847
1848 /* check to see if this PF caused an MDD event */
1849 reg = rd32(hw, PF_MDET_TX_PQM);
1850 if (reg & PF_MDET_TX_PQM_VALID_M) {
1851 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1852 if (netif_msg_tx_err(pf))
1853 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1854 }
1855
1856 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1857 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1858 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1859 if (netif_msg_tx_err(pf))
1860 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1861 }
1862
1863 reg = rd32(hw, PF_MDET_RX);
1864 if (reg & PF_MDET_RX_VALID_M) {
1865 wr32(hw, PF_MDET_RX, 0xFFFF);
1866 if (netif_msg_rx_err(pf))
1867 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1868 }
1869
1870 /* Check to see if one of the VFs caused an MDD event, and then
1871 * increment counters and set print pending
1872 */
1873 mutex_lock(&pf->vfs.table_lock);
1874 ice_for_each_vf(pf, bkt, vf) {
1875 bool reset_vf_tx = false, reset_vf_rx = false;
1876
1877 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1878 if (reg & VP_MDET_TX_PQM_VALID_M) {
1879 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1880 vf->mdd_tx_events.count++;
1881 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1882 if (netif_msg_tx_err(pf))
1883 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1884 vf->vf_id);
1885
1886 reset_vf_tx = true;
1887 }
1888
1889 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1890 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1891 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1892 vf->mdd_tx_events.count++;
1893 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1894 if (netif_msg_tx_err(pf))
1895 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1896 vf->vf_id);
1897
1898 reset_vf_tx = true;
1899 }
1900
1901 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1902 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1903 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1904 vf->mdd_tx_events.count++;
1905 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1906 if (netif_msg_tx_err(pf))
1907 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1908 vf->vf_id);
1909
1910 reset_vf_tx = true;
1911 }
1912
1913 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1914 if (reg & VP_MDET_RX_VALID_M) {
1915 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1916 vf->mdd_rx_events.count++;
1917 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1918 if (netif_msg_rx_err(pf))
1919 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1920 vf->vf_id);
1921
1922 reset_vf_rx = true;
1923 }
1924
1925 if (reset_vf_tx || reset_vf_rx)
1926 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1927 reset_vf_rx);
1928 }
1929 mutex_unlock(&pf->vfs.table_lock);
1930
1931 ice_print_vfs_mdd_events(pf);
1932}
1933
1934/**
1935 * ice_force_phys_link_state - Force the physical link state
1936 * @vsi: VSI to force the physical link state to up/down
1937 * @link_up: true/false indicates to set the physical link to up/down
1938 *
1939 * Force the physical link state by getting the current PHY capabilities from
1940 * hardware and setting the PHY config based on the determined capabilities. If
1941 * link changes a link event will be triggered because both the Enable Automatic
1942 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1943 *
1944 * Returns 0 on success, negative on failure
1945 */
1946static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1947{
1948 struct ice_aqc_get_phy_caps_data *pcaps;
1949 struct ice_aqc_set_phy_cfg_data *cfg;
1950 struct ice_port_info *pi;
1951 struct device *dev;
1952 int retcode;
1953
1954 if (!vsi || !vsi->port_info || !vsi->back)
1955 return -EINVAL;
1956 if (vsi->type != ICE_VSI_PF)
1957 return 0;
1958
1959 dev = ice_pf_to_dev(vsi->back);
1960
1961 pi = vsi->port_info;
1962
1963 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1964 if (!pcaps)
1965 return -ENOMEM;
1966
1967 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1968 NULL);
1969 if (retcode) {
1970 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1971 vsi->vsi_num, retcode);
1972 retcode = -EIO;
1973 goto out;
1974 }
1975
1976 /* No change in link */
1977 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1978 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1979 goto out;
1980
1981 /* Use the current user PHY configuration. The current user PHY
1982 * configuration is initialized during probe from PHY capabilities
1983 * software mode, and updated on set PHY configuration.
1984 */
1985 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1986 if (!cfg) {
1987 retcode = -ENOMEM;
1988 goto out;
1989 }
1990
1991 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1992 if (link_up)
1993 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1994 else
1995 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1996
1997 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1998 if (retcode) {
1999 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2000 vsi->vsi_num, retcode);
2001 retcode = -EIO;
2002 }
2003
2004 kfree(cfg);
2005out:
2006 kfree(pcaps);
2007 return retcode;
2008}
2009
2010/**
2011 * ice_init_nvm_phy_type - Initialize the NVM PHY type
2012 * @pi: port info structure
2013 *
2014 * Initialize nvm_phy_type_[low|high] for link lenient mode support
2015 */
2016static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2017{
2018 struct ice_aqc_get_phy_caps_data *pcaps;
2019 struct ice_pf *pf = pi->hw->back;
2020 int err;
2021
2022 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2023 if (!pcaps)
2024 return -ENOMEM;
2025
2026 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2027 pcaps, NULL);
2028
2029 if (err) {
2030 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2031 goto out;
2032 }
2033
2034 pf->nvm_phy_type_hi = pcaps->phy_type_high;
2035 pf->nvm_phy_type_lo = pcaps->phy_type_low;
2036
2037out:
2038 kfree(pcaps);
2039 return err;
2040}
2041
2042/**
2043 * ice_init_link_dflt_override - Initialize link default override
2044 * @pi: port info structure
2045 *
2046 * Initialize link default override and PHY total port shutdown during probe
2047 */
2048static void ice_init_link_dflt_override(struct ice_port_info *pi)
2049{
2050 struct ice_link_default_override_tlv *ldo;
2051 struct ice_pf *pf = pi->hw->back;
2052
2053 ldo = &pf->link_dflt_override;
2054 if (ice_get_link_default_override(ldo, pi))
2055 return;
2056
2057 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2058 return;
2059
2060 /* Enable Total Port Shutdown (override/replace link-down-on-close
2061 * ethtool private flag) for ports with Port Disable bit set.
2062 */
2063 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2064 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2065}
2066
2067/**
2068 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2069 * @pi: port info structure
2070 *
2071 * If default override is enabled, initialize the user PHY cfg speed and FEC
2072 * settings using the default override mask from the NVM.
2073 *
2074 * The PHY should only be configured with the default override settings the
2075 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2076 * is used to indicate that the user PHY cfg default override is initialized
2077 * and the PHY has not been configured with the default override settings. The
2078 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2079 * configured.
2080 *
2081 * This function should be called only if the FW doesn't support default
2082 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2083 */
2084static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2085{
2086 struct ice_link_default_override_tlv *ldo;
2087 struct ice_aqc_set_phy_cfg_data *cfg;
2088 struct ice_phy_info *phy = &pi->phy;
2089 struct ice_pf *pf = pi->hw->back;
2090
2091 ldo = &pf->link_dflt_override;
2092
2093 /* If link default override is enabled, use to mask NVM PHY capabilities
2094 * for speed and FEC default configuration.
2095 */
2096 cfg = &phy->curr_user_phy_cfg;
2097
2098 if (ldo->phy_type_low || ldo->phy_type_high) {
2099 cfg->phy_type_low = pf->nvm_phy_type_lo &
2100 cpu_to_le64(ldo->phy_type_low);
2101 cfg->phy_type_high = pf->nvm_phy_type_hi &
2102 cpu_to_le64(ldo->phy_type_high);
2103 }
2104 cfg->link_fec_opt = ldo->fec_options;
2105 phy->curr_user_fec_req = ICE_FEC_AUTO;
2106
2107 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2108}
2109
2110/**
2111 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2112 * @pi: port info structure
2113 *
2114 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2115 * mode to default. The PHY defaults are from get PHY capabilities topology
2116 * with media so call when media is first available. An error is returned if
2117 * called when media is not available. The PHY initialization completed state is
2118 * set here.
2119 *
2120 * These configurations are used when setting PHY
2121 * configuration. The user PHY configuration is updated on set PHY
2122 * configuration. Returns 0 on success, negative on failure
2123 */
2124static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2125{
2126 struct ice_aqc_get_phy_caps_data *pcaps;
2127 struct ice_phy_info *phy = &pi->phy;
2128 struct ice_pf *pf = pi->hw->back;
2129 int err;
2130
2131 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2132 return -EIO;
2133
2134 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2135 if (!pcaps)
2136 return -ENOMEM;
2137
2138 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2139 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2140 pcaps, NULL);
2141 else
2142 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2143 pcaps, NULL);
2144 if (err) {
2145 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2146 goto err_out;
2147 }
2148
2149 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2150
2151 /* check if lenient mode is supported and enabled */
2152 if (ice_fw_supports_link_override(pi->hw) &&
2153 !(pcaps->module_compliance_enforcement &
2154 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2155 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2156
2157 /* if the FW supports default PHY configuration mode, then the driver
2158 * does not have to apply link override settings. If not,
2159 * initialize user PHY configuration with link override values
2160 */
2161 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2162 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2163 ice_init_phy_cfg_dflt_override(pi);
2164 goto out;
2165 }
2166 }
2167
2168 /* if link default override is not enabled, set user flow control and
2169 * FEC settings based on what get_phy_caps returned
2170 */
2171 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2172 pcaps->link_fec_options);
2173 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2174
2175out:
2176 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2177 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2178err_out:
2179 kfree(pcaps);
2180 return err;
2181}
2182
2183/**
2184 * ice_configure_phy - configure PHY
2185 * @vsi: VSI of PHY
2186 *
2187 * Set the PHY configuration. If the current PHY configuration is the same as
2188 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2189 * configure the based get PHY capabilities for topology with media.
2190 */
2191static int ice_configure_phy(struct ice_vsi *vsi)
2192{
2193 struct device *dev = ice_pf_to_dev(vsi->back);
2194 struct ice_port_info *pi = vsi->port_info;
2195 struct ice_aqc_get_phy_caps_data *pcaps;
2196 struct ice_aqc_set_phy_cfg_data *cfg;
2197 struct ice_phy_info *phy = &pi->phy;
2198 struct ice_pf *pf = vsi->back;
2199 int err;
2200
2201 /* Ensure we have media as we cannot configure a medialess port */
2202 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2203 return -ENOMEDIUM;
2204
2205 ice_print_topo_conflict(vsi);
2206
2207 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2208 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2209 return -EPERM;
2210
2211 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2212 return ice_force_phys_link_state(vsi, true);
2213
2214 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2215 if (!pcaps)
2216 return -ENOMEM;
2217
2218 /* Get current PHY config */
2219 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2220 NULL);
2221 if (err) {
2222 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2223 vsi->vsi_num, err);
2224 goto done;
2225 }
2226
2227 /* If PHY enable link is configured and configuration has not changed,
2228 * there's nothing to do
2229 */
2230 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2231 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2232 goto done;
2233
2234 /* Use PHY topology as baseline for configuration */
2235 memset(pcaps, 0, sizeof(*pcaps));
2236 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2237 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2238 pcaps, NULL);
2239 else
2240 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2241 pcaps, NULL);
2242 if (err) {
2243 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2244 vsi->vsi_num, err);
2245 goto done;
2246 }
2247
2248 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2249 if (!cfg) {
2250 err = -ENOMEM;
2251 goto done;
2252 }
2253
2254 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2255
2256 /* Speed - If default override pending, use curr_user_phy_cfg set in
2257 * ice_init_phy_user_cfg_ldo.
2258 */
2259 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2260 vsi->back->state)) {
2261 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2262 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2263 } else {
2264 u64 phy_low = 0, phy_high = 0;
2265
2266 ice_update_phy_type(&phy_low, &phy_high,
2267 pi->phy.curr_user_speed_req);
2268 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2269 cfg->phy_type_high = pcaps->phy_type_high &
2270 cpu_to_le64(phy_high);
2271 }
2272
2273 /* Can't provide what was requested; use PHY capabilities */
2274 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2275 cfg->phy_type_low = pcaps->phy_type_low;
2276 cfg->phy_type_high = pcaps->phy_type_high;
2277 }
2278
2279 /* FEC */
2280 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2281
2282 /* Can't provide what was requested; use PHY capabilities */
2283 if (cfg->link_fec_opt !=
2284 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2285 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2286 cfg->link_fec_opt = pcaps->link_fec_options;
2287 }
2288
2289 /* Flow Control - always supported; no need to check against
2290 * capabilities
2291 */
2292 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2293
2294 /* Enable link and link update */
2295 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2296
2297 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2298 if (err)
2299 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2300 vsi->vsi_num, err);
2301
2302 kfree(cfg);
2303done:
2304 kfree(pcaps);
2305 return err;
2306}
2307
2308/**
2309 * ice_check_media_subtask - Check for media
2310 * @pf: pointer to PF struct
2311 *
2312 * If media is available, then initialize PHY user configuration if it is not
2313 * been, and configure the PHY if the interface is up.
2314 */
2315static void ice_check_media_subtask(struct ice_pf *pf)
2316{
2317 struct ice_port_info *pi;
2318 struct ice_vsi *vsi;
2319 int err;
2320
2321 /* No need to check for media if it's already present */
2322 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2323 return;
2324
2325 vsi = ice_get_main_vsi(pf);
2326 if (!vsi)
2327 return;
2328
2329 /* Refresh link info and check if media is present */
2330 pi = vsi->port_info;
2331 err = ice_update_link_info(pi);
2332 if (err)
2333 return;
2334
2335 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2336
2337 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2338 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2339 ice_init_phy_user_cfg(pi);
2340
2341 /* PHY settings are reset on media insertion, reconfigure
2342 * PHY to preserve settings.
2343 */
2344 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2345 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2346 return;
2347
2348 err = ice_configure_phy(vsi);
2349 if (!err)
2350 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2351
2352 /* A Link Status Event will be generated; the event handler
2353 * will complete bringing the interface up
2354 */
2355 }
2356}
2357
2358/**
2359 * ice_service_task - manage and run subtasks
2360 * @work: pointer to work_struct contained by the PF struct
2361 */
2362static void ice_service_task(struct work_struct *work)
2363{
2364 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2365 unsigned long start_time = jiffies;
2366
2367 /* subtasks */
2368
2369 /* process reset requests first */
2370 ice_reset_subtask(pf);
2371
2372 /* bail if a reset/recovery cycle is pending or rebuild failed */
2373 if (ice_is_reset_in_progress(pf->state) ||
2374 test_bit(ICE_SUSPENDED, pf->state) ||
2375 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2376 ice_service_task_complete(pf);
2377 return;
2378 }
2379
2380 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2381 struct iidc_event *event;
2382
2383 event = kzalloc(sizeof(*event), GFP_KERNEL);
2384 if (event) {
2385 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2386 /* report the entire OICR value to AUX driver */
2387 swap(event->reg, pf->oicr_err_reg);
2388 ice_send_event_to_aux(pf, event);
2389 kfree(event);
2390 }
2391 }
2392
2393 /* unplug aux dev per request, if an unplug request came in
2394 * while processing a plug request, this will handle it
2395 */
2396 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2397 ice_unplug_aux_dev(pf);
2398
2399 /* Plug aux device per request */
2400 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2401 ice_plug_aux_dev(pf);
2402
2403 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2404 struct iidc_event *event;
2405
2406 event = kzalloc(sizeof(*event), GFP_KERNEL);
2407 if (event) {
2408 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2409 ice_send_event_to_aux(pf, event);
2410 kfree(event);
2411 }
2412 }
2413
2414 ice_clean_adminq_subtask(pf);
2415 ice_check_media_subtask(pf);
2416 ice_check_for_hang_subtask(pf);
2417 ice_sync_fltr_subtask(pf);
2418 ice_handle_mdd_event(pf);
2419 ice_watchdog_subtask(pf);
2420
2421 if (ice_is_safe_mode(pf)) {
2422 ice_service_task_complete(pf);
2423 return;
2424 }
2425
2426 ice_process_vflr_event(pf);
2427 ice_clean_mailboxq_subtask(pf);
2428 ice_clean_sbq_subtask(pf);
2429 ice_sync_arfs_fltrs(pf);
2430 ice_flush_fdir_ctx(pf);
2431
2432 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2433 ice_service_task_complete(pf);
2434
2435 /* If the tasks have taken longer than one service timer period
2436 * or there is more work to be done, reset the service timer to
2437 * schedule the service task now.
2438 */
2439 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2440 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2441 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2442 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2443 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2444 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2445 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2446 mod_timer(&pf->serv_tmr, jiffies);
2447}
2448
2449/**
2450 * ice_set_ctrlq_len - helper function to set controlq length
2451 * @hw: pointer to the HW instance
2452 */
2453static void ice_set_ctrlq_len(struct ice_hw *hw)
2454{
2455 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2456 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2457 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2458 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2459 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2460 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2461 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2462 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2463 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2464 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2465 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2466 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2467}
2468
2469/**
2470 * ice_schedule_reset - schedule a reset
2471 * @pf: board private structure
2472 * @reset: reset being requested
2473 */
2474int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2475{
2476 struct device *dev = ice_pf_to_dev(pf);
2477
2478 /* bail out if earlier reset has failed */
2479 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2480 dev_dbg(dev, "earlier reset has failed\n");
2481 return -EIO;
2482 }
2483 /* bail if reset/recovery already in progress */
2484 if (ice_is_reset_in_progress(pf->state)) {
2485 dev_dbg(dev, "Reset already in progress\n");
2486 return -EBUSY;
2487 }
2488
2489 switch (reset) {
2490 case ICE_RESET_PFR:
2491 set_bit(ICE_PFR_REQ, pf->state);
2492 break;
2493 case ICE_RESET_CORER:
2494 set_bit(ICE_CORER_REQ, pf->state);
2495 break;
2496 case ICE_RESET_GLOBR:
2497 set_bit(ICE_GLOBR_REQ, pf->state);
2498 break;
2499 default:
2500 return -EINVAL;
2501 }
2502
2503 ice_service_task_schedule(pf);
2504 return 0;
2505}
2506
2507/**
2508 * ice_irq_affinity_notify - Callback for affinity changes
2509 * @notify: context as to what irq was changed
2510 * @mask: the new affinity mask
2511 *
2512 * This is a callback function used by the irq_set_affinity_notifier function
2513 * so that we may register to receive changes to the irq affinity masks.
2514 */
2515static void
2516ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2517 const cpumask_t *mask)
2518{
2519 struct ice_q_vector *q_vector =
2520 container_of(notify, struct ice_q_vector, affinity_notify);
2521
2522 cpumask_copy(&q_vector->affinity_mask, mask);
2523}
2524
2525/**
2526 * ice_irq_affinity_release - Callback for affinity notifier release
2527 * @ref: internal core kernel usage
2528 *
2529 * This is a callback function used by the irq_set_affinity_notifier function
2530 * to inform the current notification subscriber that they will no longer
2531 * receive notifications.
2532 */
2533static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2534
2535/**
2536 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2537 * @vsi: the VSI being configured
2538 */
2539static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2540{
2541 struct ice_hw *hw = &vsi->back->hw;
2542 int i;
2543
2544 ice_for_each_q_vector(vsi, i)
2545 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2546
2547 ice_flush(hw);
2548 return 0;
2549}
2550
2551/**
2552 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2553 * @vsi: the VSI being configured
2554 * @basename: name for the vector
2555 */
2556static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2557{
2558 int q_vectors = vsi->num_q_vectors;
2559 struct ice_pf *pf = vsi->back;
2560 struct device *dev;
2561 int rx_int_idx = 0;
2562 int tx_int_idx = 0;
2563 int vector, err;
2564 int irq_num;
2565
2566 dev = ice_pf_to_dev(pf);
2567 for (vector = 0; vector < q_vectors; vector++) {
2568 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2569
2570 irq_num = q_vector->irq.virq;
2571
2572 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2573 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2574 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2575 tx_int_idx++;
2576 } else if (q_vector->rx.rx_ring) {
2577 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2578 "%s-%s-%d", basename, "rx", rx_int_idx++);
2579 } else if (q_vector->tx.tx_ring) {
2580 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2581 "%s-%s-%d", basename, "tx", tx_int_idx++);
2582 } else {
2583 /* skip this unused q_vector */
2584 continue;
2585 }
2586 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2587 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2588 IRQF_SHARED, q_vector->name,
2589 q_vector);
2590 else
2591 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2592 0, q_vector->name, q_vector);
2593 if (err) {
2594 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2595 err);
2596 goto free_q_irqs;
2597 }
2598
2599 /* register for affinity change notifications */
2600 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2601 struct irq_affinity_notify *affinity_notify;
2602
2603 affinity_notify = &q_vector->affinity_notify;
2604 affinity_notify->notify = ice_irq_affinity_notify;
2605 affinity_notify->release = ice_irq_affinity_release;
2606 irq_set_affinity_notifier(irq_num, affinity_notify);
2607 }
2608
2609 /* assign the mask for this irq */
2610 irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
2611 }
2612
2613 err = ice_set_cpu_rx_rmap(vsi);
2614 if (err) {
2615 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2616 vsi->vsi_num, ERR_PTR(err));
2617 goto free_q_irqs;
2618 }
2619
2620 vsi->irqs_ready = true;
2621 return 0;
2622
2623free_q_irqs:
2624 while (vector--) {
2625 irq_num = vsi->q_vectors[vector]->irq.virq;
2626 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2627 irq_set_affinity_notifier(irq_num, NULL);
2628 irq_update_affinity_hint(irq_num, NULL);
2629 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2630 }
2631 return err;
2632}
2633
2634/**
2635 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2636 * @vsi: VSI to setup Tx rings used by XDP
2637 *
2638 * Return 0 on success and negative value on error
2639 */
2640static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2641{
2642 struct device *dev = ice_pf_to_dev(vsi->back);
2643 struct ice_tx_desc *tx_desc;
2644 int i, j;
2645
2646 ice_for_each_xdp_txq(vsi, i) {
2647 u16 xdp_q_idx = vsi->alloc_txq + i;
2648 struct ice_ring_stats *ring_stats;
2649 struct ice_tx_ring *xdp_ring;
2650
2651 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2652 if (!xdp_ring)
2653 goto free_xdp_rings;
2654
2655 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2656 if (!ring_stats) {
2657 ice_free_tx_ring(xdp_ring);
2658 goto free_xdp_rings;
2659 }
2660
2661 xdp_ring->ring_stats = ring_stats;
2662 xdp_ring->q_index = xdp_q_idx;
2663 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2664 xdp_ring->vsi = vsi;
2665 xdp_ring->netdev = NULL;
2666 xdp_ring->dev = dev;
2667 xdp_ring->count = vsi->num_tx_desc;
2668 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2669 if (ice_setup_tx_ring(xdp_ring))
2670 goto free_xdp_rings;
2671 ice_set_ring_xdp(xdp_ring);
2672 spin_lock_init(&xdp_ring->tx_lock);
2673 for (j = 0; j < xdp_ring->count; j++) {
2674 tx_desc = ICE_TX_DESC(xdp_ring, j);
2675 tx_desc->cmd_type_offset_bsz = 0;
2676 }
2677 }
2678
2679 return 0;
2680
2681free_xdp_rings:
2682 for (; i >= 0; i--) {
2683 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2684 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2685 vsi->xdp_rings[i]->ring_stats = NULL;
2686 ice_free_tx_ring(vsi->xdp_rings[i]);
2687 }
2688 }
2689 return -ENOMEM;
2690}
2691
2692/**
2693 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2694 * @vsi: VSI to set the bpf prog on
2695 * @prog: the bpf prog pointer
2696 */
2697static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2698{
2699 struct bpf_prog *old_prog;
2700 int i;
2701
2702 old_prog = xchg(&vsi->xdp_prog, prog);
2703 ice_for_each_rxq(vsi, i)
2704 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2705
2706 if (old_prog)
2707 bpf_prog_put(old_prog);
2708}
2709
2710static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2711{
2712 struct ice_q_vector *q_vector;
2713 struct ice_tx_ring *ring;
2714
2715 if (static_key_enabled(&ice_xdp_locking_key))
2716 return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2717
2718 q_vector = vsi->rx_rings[qid]->q_vector;
2719 ice_for_each_tx_ring(ring, q_vector->tx)
2720 if (ice_ring_is_xdp(ring))
2721 return ring;
2722
2723 return NULL;
2724}
2725
2726/**
2727 * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2728 * @vsi: the VSI with XDP rings being configured
2729 *
2730 * Map XDP rings to interrupt vectors and perform the configuration steps
2731 * dependent on the mapping.
2732 */
2733void ice_map_xdp_rings(struct ice_vsi *vsi)
2734{
2735 int xdp_rings_rem = vsi->num_xdp_txq;
2736 int v_idx, q_idx;
2737
2738 /* follow the logic from ice_vsi_map_rings_to_vectors */
2739 ice_for_each_q_vector(vsi, v_idx) {
2740 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2741 int xdp_rings_per_v, q_id, q_base;
2742
2743 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2744 vsi->num_q_vectors - v_idx);
2745 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2746
2747 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2748 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2749
2750 xdp_ring->q_vector = q_vector;
2751 xdp_ring->next = q_vector->tx.tx_ring;
2752 q_vector->tx.tx_ring = xdp_ring;
2753 }
2754 xdp_rings_rem -= xdp_rings_per_v;
2755 }
2756
2757 ice_for_each_rxq(vsi, q_idx) {
2758 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2759 q_idx);
2760 ice_tx_xsk_pool(vsi, q_idx);
2761 }
2762}
2763
2764/**
2765 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2766 * @vsi: VSI to bring up Tx rings used by XDP
2767 * @prog: bpf program that will be assigned to VSI
2768 * @cfg_type: create from scratch or restore the existing configuration
2769 *
2770 * Return 0 on success and negative value on error
2771 */
2772int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2773 enum ice_xdp_cfg cfg_type)
2774{
2775 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2776 struct ice_pf *pf = vsi->back;
2777 struct ice_qs_cfg xdp_qs_cfg = {
2778 .qs_mutex = &pf->avail_q_mutex,
2779 .pf_map = pf->avail_txqs,
2780 .pf_map_size = pf->max_pf_txqs,
2781 .q_count = vsi->num_xdp_txq,
2782 .scatter_count = ICE_MAX_SCATTER_TXQS,
2783 .vsi_map = vsi->txq_map,
2784 .vsi_map_offset = vsi->alloc_txq,
2785 .mapping_mode = ICE_VSI_MAP_CONTIG
2786 };
2787 struct device *dev;
2788 int status, i;
2789
2790 dev = ice_pf_to_dev(pf);
2791 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2792 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2793 if (!vsi->xdp_rings)
2794 return -ENOMEM;
2795
2796 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2797 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2798 goto err_map_xdp;
2799
2800 if (static_key_enabled(&ice_xdp_locking_key))
2801 netdev_warn(vsi->netdev,
2802 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2803
2804 if (ice_xdp_alloc_setup_rings(vsi))
2805 goto clear_xdp_rings;
2806
2807 /* omit the scheduler update if in reset path; XDP queues will be
2808 * taken into account at the end of ice_vsi_rebuild, where
2809 * ice_cfg_vsi_lan is being called
2810 */
2811 if (cfg_type == ICE_XDP_CFG_PART)
2812 return 0;
2813
2814 ice_map_xdp_rings(vsi);
2815
2816 /* tell the Tx scheduler that right now we have
2817 * additional queues
2818 */
2819 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2820 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2821
2822 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2823 max_txqs);
2824 if (status) {
2825 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2826 status);
2827 goto clear_xdp_rings;
2828 }
2829
2830 /* assign the prog only when it's not already present on VSI;
2831 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2832 * VSI rebuild that happens under ethtool -L can expose us to
2833 * the bpf_prog refcount issues as we would be swapping same
2834 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2835 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2836 * this is not harmful as dev_xdp_install bumps the refcount
2837 * before calling the op exposed by the driver;
2838 */
2839 if (!ice_is_xdp_ena_vsi(vsi))
2840 ice_vsi_assign_bpf_prog(vsi, prog);
2841
2842 return 0;
2843clear_xdp_rings:
2844 ice_for_each_xdp_txq(vsi, i)
2845 if (vsi->xdp_rings[i]) {
2846 kfree_rcu(vsi->xdp_rings[i], rcu);
2847 vsi->xdp_rings[i] = NULL;
2848 }
2849
2850err_map_xdp:
2851 mutex_lock(&pf->avail_q_mutex);
2852 ice_for_each_xdp_txq(vsi, i) {
2853 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2854 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2855 }
2856 mutex_unlock(&pf->avail_q_mutex);
2857
2858 devm_kfree(dev, vsi->xdp_rings);
2859 return -ENOMEM;
2860}
2861
2862/**
2863 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2864 * @vsi: VSI to remove XDP rings
2865 * @cfg_type: disable XDP permanently or allow it to be restored later
2866 *
2867 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2868 * resources
2869 */
2870int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2871{
2872 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2873 struct ice_pf *pf = vsi->back;
2874 int i, v_idx;
2875
2876 /* q_vectors are freed in reset path so there's no point in detaching
2877 * rings
2878 */
2879 if (cfg_type == ICE_XDP_CFG_PART)
2880 goto free_qmap;
2881
2882 ice_for_each_q_vector(vsi, v_idx) {
2883 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2884 struct ice_tx_ring *ring;
2885
2886 ice_for_each_tx_ring(ring, q_vector->tx)
2887 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2888 break;
2889
2890 /* restore the value of last node prior to XDP setup */
2891 q_vector->tx.tx_ring = ring;
2892 }
2893
2894free_qmap:
2895 mutex_lock(&pf->avail_q_mutex);
2896 ice_for_each_xdp_txq(vsi, i) {
2897 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2898 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2899 }
2900 mutex_unlock(&pf->avail_q_mutex);
2901
2902 ice_for_each_xdp_txq(vsi, i)
2903 if (vsi->xdp_rings[i]) {
2904 if (vsi->xdp_rings[i]->desc) {
2905 synchronize_rcu();
2906 ice_free_tx_ring(vsi->xdp_rings[i]);
2907 }
2908 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2909 vsi->xdp_rings[i]->ring_stats = NULL;
2910 kfree_rcu(vsi->xdp_rings[i], rcu);
2911 vsi->xdp_rings[i] = NULL;
2912 }
2913
2914 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2915 vsi->xdp_rings = NULL;
2916
2917 if (static_key_enabled(&ice_xdp_locking_key))
2918 static_branch_dec(&ice_xdp_locking_key);
2919
2920 if (cfg_type == ICE_XDP_CFG_PART)
2921 return 0;
2922
2923 ice_vsi_assign_bpf_prog(vsi, NULL);
2924
2925 /* notify Tx scheduler that we destroyed XDP queues and bring
2926 * back the old number of child nodes
2927 */
2928 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2929 max_txqs[i] = vsi->num_txq;
2930
2931 /* change number of XDP Tx queues to 0 */
2932 vsi->num_xdp_txq = 0;
2933
2934 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2935 max_txqs);
2936}
2937
2938/**
2939 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2940 * @vsi: VSI to schedule napi on
2941 */
2942static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2943{
2944 int i;
2945
2946 ice_for_each_rxq(vsi, i) {
2947 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2948
2949 if (READ_ONCE(rx_ring->xsk_pool))
2950 napi_schedule(&rx_ring->q_vector->napi);
2951 }
2952}
2953
2954/**
2955 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2956 * @vsi: VSI to determine the count of XDP Tx qs
2957 *
2958 * returns 0 if Tx qs count is higher than at least half of CPU count,
2959 * -ENOMEM otherwise
2960 */
2961int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2962{
2963 u16 avail = ice_get_avail_txq_count(vsi->back);
2964 u16 cpus = num_possible_cpus();
2965
2966 if (avail < cpus / 2)
2967 return -ENOMEM;
2968
2969 if (vsi->type == ICE_VSI_SF)
2970 avail = vsi->alloc_txq;
2971
2972 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2973
2974 if (vsi->num_xdp_txq < cpus)
2975 static_branch_inc(&ice_xdp_locking_key);
2976
2977 return 0;
2978}
2979
2980/**
2981 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2982 * @vsi: Pointer to VSI structure
2983 */
2984static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2985{
2986 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2987 return ICE_RXBUF_1664;
2988 else
2989 return ICE_RXBUF_3072;
2990}
2991
2992/**
2993 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2994 * @vsi: VSI to setup XDP for
2995 * @prog: XDP program
2996 * @extack: netlink extended ack
2997 */
2998static int
2999ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
3000 struct netlink_ext_ack *extack)
3001{
3002 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
3003 int ret = 0, xdp_ring_err = 0;
3004 bool if_running;
3005
3006 if (prog && !prog->aux->xdp_has_frags) {
3007 if (frame_size > ice_max_xdp_frame_size(vsi)) {
3008 NL_SET_ERR_MSG_MOD(extack,
3009 "MTU is too large for linear frames and XDP prog does not support frags");
3010 return -EOPNOTSUPP;
3011 }
3012 }
3013
3014 /* hot swap progs and avoid toggling link */
3015 if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
3016 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
3017 ice_vsi_assign_bpf_prog(vsi, prog);
3018 return 0;
3019 }
3020
3021 if_running = netif_running(vsi->netdev) &&
3022 !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
3023
3024 /* need to stop netdev while setting up the program for Rx rings */
3025 if (if_running) {
3026 ret = ice_down(vsi);
3027 if (ret) {
3028 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3029 return ret;
3030 }
3031 }
3032
3033 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3034 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3035 if (xdp_ring_err) {
3036 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3037 } else {
3038 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3039 ICE_XDP_CFG_FULL);
3040 if (xdp_ring_err)
3041 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3042 }
3043 xdp_features_set_redirect_target(vsi->netdev, true);
3044 /* reallocate Rx queues that are used for zero-copy */
3045 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
3046 if (xdp_ring_err)
3047 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
3048 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3049 xdp_features_clear_redirect_target(vsi->netdev);
3050 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3051 if (xdp_ring_err)
3052 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3053 /* reallocate Rx queues that were used for zero-copy */
3054 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
3055 if (xdp_ring_err)
3056 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
3057 }
3058
3059 if (if_running)
3060 ret = ice_up(vsi);
3061
3062 if (!ret && prog)
3063 ice_vsi_rx_napi_schedule(vsi);
3064
3065 return (ret || xdp_ring_err) ? -ENOMEM : 0;
3066}
3067
3068/**
3069 * ice_xdp_safe_mode - XDP handler for safe mode
3070 * @dev: netdevice
3071 * @xdp: XDP command
3072 */
3073static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3074 struct netdev_bpf *xdp)
3075{
3076 NL_SET_ERR_MSG_MOD(xdp->extack,
3077 "Please provide working DDP firmware package in order to use XDP\n"
3078 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3079 return -EOPNOTSUPP;
3080}
3081
3082/**
3083 * ice_xdp - implements XDP handler
3084 * @dev: netdevice
3085 * @xdp: XDP command
3086 */
3087int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3088{
3089 struct ice_netdev_priv *np = netdev_priv(dev);
3090 struct ice_vsi *vsi = np->vsi;
3091 int ret;
3092
3093 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
3094 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
3095 return -EINVAL;
3096 }
3097
3098 mutex_lock(&vsi->xdp_state_lock);
3099
3100 switch (xdp->command) {
3101 case XDP_SETUP_PROG:
3102 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3103 break;
3104 case XDP_SETUP_XSK_POOL:
3105 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
3106 break;
3107 default:
3108 ret = -EINVAL;
3109 }
3110
3111 mutex_unlock(&vsi->xdp_state_lock);
3112 return ret;
3113}
3114
3115/**
3116 * ice_ena_misc_vector - enable the non-queue interrupts
3117 * @pf: board private structure
3118 */
3119static void ice_ena_misc_vector(struct ice_pf *pf)
3120{
3121 struct ice_hw *hw = &pf->hw;
3122 u32 pf_intr_start_offset;
3123 u32 val;
3124
3125 /* Disable anti-spoof detection interrupt to prevent spurious event
3126 * interrupts during a function reset. Anti-spoof functionally is
3127 * still supported.
3128 */
3129 val = rd32(hw, GL_MDCK_TX_TDPU);
3130 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3131 wr32(hw, GL_MDCK_TX_TDPU, val);
3132
3133 /* clear things first */
3134 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3135 rd32(hw, PFINT_OICR); /* read to clear */
3136
3137 val = (PFINT_OICR_ECC_ERR_M |
3138 PFINT_OICR_MAL_DETECT_M |
3139 PFINT_OICR_GRST_M |
3140 PFINT_OICR_PCI_EXCEPTION_M |
3141 PFINT_OICR_VFLR_M |
3142 PFINT_OICR_HMC_ERR_M |
3143 PFINT_OICR_PE_PUSH_M |
3144 PFINT_OICR_PE_CRITERR_M);
3145
3146 wr32(hw, PFINT_OICR_ENA, val);
3147
3148 /* SW_ITR_IDX = 0, but don't change INTENA */
3149 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3150 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3151
3152 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3153 return;
3154 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3155 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3156 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3157}
3158
3159/**
3160 * ice_ll_ts_intr - ll_ts interrupt handler
3161 * @irq: interrupt number
3162 * @data: pointer to a q_vector
3163 */
3164static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3165{
3166 struct ice_pf *pf = data;
3167 u32 pf_intr_start_offset;
3168 struct ice_ptp_tx *tx;
3169 unsigned long flags;
3170 struct ice_hw *hw;
3171 u32 val;
3172 u8 idx;
3173
3174 hw = &pf->hw;
3175 tx = &pf->ptp.port.tx;
3176 spin_lock_irqsave(&tx->lock, flags);
3177 ice_ptp_complete_tx_single_tstamp(tx);
3178
3179 idx = find_next_bit_wrap(tx->in_use, tx->len,
3180 tx->last_ll_ts_idx_read + 1);
3181 if (idx != tx->len)
3182 ice_ptp_req_tx_single_tstamp(tx, idx);
3183 spin_unlock_irqrestore(&tx->lock, flags);
3184
3185 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3186 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3187 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3188 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3189 val);
3190
3191 return IRQ_HANDLED;
3192}
3193
3194/**
3195 * ice_misc_intr - misc interrupt handler
3196 * @irq: interrupt number
3197 * @data: pointer to a q_vector
3198 */
3199static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3200{
3201 struct ice_pf *pf = (struct ice_pf *)data;
3202 irqreturn_t ret = IRQ_HANDLED;
3203 struct ice_hw *hw = &pf->hw;
3204 struct device *dev;
3205 u32 oicr, ena_mask;
3206
3207 dev = ice_pf_to_dev(pf);
3208 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3209 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3210 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3211
3212 oicr = rd32(hw, PFINT_OICR);
3213 ena_mask = rd32(hw, PFINT_OICR_ENA);
3214
3215 if (oicr & PFINT_OICR_SWINT_M) {
3216 ena_mask &= ~PFINT_OICR_SWINT_M;
3217 pf->sw_int_count++;
3218 }
3219
3220 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3221 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3222 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3223 }
3224 if (oicr & PFINT_OICR_VFLR_M) {
3225 /* disable any further VFLR event notifications */
3226 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3227 u32 reg = rd32(hw, PFINT_OICR_ENA);
3228
3229 reg &= ~PFINT_OICR_VFLR_M;
3230 wr32(hw, PFINT_OICR_ENA, reg);
3231 } else {
3232 ena_mask &= ~PFINT_OICR_VFLR_M;
3233 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3234 }
3235 }
3236
3237 if (oicr & PFINT_OICR_GRST_M) {
3238 u32 reset;
3239
3240 /* we have a reset warning */
3241 ena_mask &= ~PFINT_OICR_GRST_M;
3242 reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3243 rd32(hw, GLGEN_RSTAT));
3244
3245 if (reset == ICE_RESET_CORER)
3246 pf->corer_count++;
3247 else if (reset == ICE_RESET_GLOBR)
3248 pf->globr_count++;
3249 else if (reset == ICE_RESET_EMPR)
3250 pf->empr_count++;
3251 else
3252 dev_dbg(dev, "Invalid reset type %d\n", reset);
3253
3254 /* If a reset cycle isn't already in progress, we set a bit in
3255 * pf->state so that the service task can start a reset/rebuild.
3256 */
3257 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3258 if (reset == ICE_RESET_CORER)
3259 set_bit(ICE_CORER_RECV, pf->state);
3260 else if (reset == ICE_RESET_GLOBR)
3261 set_bit(ICE_GLOBR_RECV, pf->state);
3262 else
3263 set_bit(ICE_EMPR_RECV, pf->state);
3264
3265 /* There are couple of different bits at play here.
3266 * hw->reset_ongoing indicates whether the hardware is
3267 * in reset. This is set to true when a reset interrupt
3268 * is received and set back to false after the driver
3269 * has determined that the hardware is out of reset.
3270 *
3271 * ICE_RESET_OICR_RECV in pf->state indicates
3272 * that a post reset rebuild is required before the
3273 * driver is operational again. This is set above.
3274 *
3275 * As this is the start of the reset/rebuild cycle, set
3276 * both to indicate that.
3277 */
3278 hw->reset_ongoing = true;
3279 }
3280 }
3281
3282 if (oicr & PFINT_OICR_TSYN_TX_M) {
3283 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3284 if (ice_pf_state_is_nominal(pf) &&
3285 pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3286 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3287 unsigned long flags;
3288 u8 idx;
3289
3290 spin_lock_irqsave(&tx->lock, flags);
3291 idx = find_next_bit_wrap(tx->in_use, tx->len,
3292 tx->last_ll_ts_idx_read + 1);
3293 if (idx != tx->len)
3294 ice_ptp_req_tx_single_tstamp(tx, idx);
3295 spin_unlock_irqrestore(&tx->lock, flags);
3296 } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3297 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3298 ret = IRQ_WAKE_THREAD;
3299 }
3300 }
3301
3302 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3303 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3304 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3305
3306 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3307
3308 if (ice_pf_src_tmr_owned(pf)) {
3309 /* Save EVENTs from GLTSYN register */
3310 pf->ptp.ext_ts_irq |= gltsyn_stat &
3311 (GLTSYN_STAT_EVENT0_M |
3312 GLTSYN_STAT_EVENT1_M |
3313 GLTSYN_STAT_EVENT2_M);
3314
3315 ice_ptp_extts_event(pf);
3316 }
3317 }
3318
3319#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3320 if (oicr & ICE_AUX_CRIT_ERR) {
3321 pf->oicr_err_reg |= oicr;
3322 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3323 ena_mask &= ~ICE_AUX_CRIT_ERR;
3324 }
3325
3326 /* Report any remaining unexpected interrupts */
3327 oicr &= ena_mask;
3328 if (oicr) {
3329 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3330 /* If a critical error is pending there is no choice but to
3331 * reset the device.
3332 */
3333 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3334 PFINT_OICR_ECC_ERR_M)) {
3335 set_bit(ICE_PFR_REQ, pf->state);
3336 }
3337 }
3338 ice_service_task_schedule(pf);
3339 if (ret == IRQ_HANDLED)
3340 ice_irq_dynamic_ena(hw, NULL, NULL);
3341
3342 return ret;
3343}
3344
3345/**
3346 * ice_misc_intr_thread_fn - misc interrupt thread function
3347 * @irq: interrupt number
3348 * @data: pointer to a q_vector
3349 */
3350static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3351{
3352 struct ice_pf *pf = data;
3353 struct ice_hw *hw;
3354
3355 hw = &pf->hw;
3356
3357 if (ice_is_reset_in_progress(pf->state))
3358 goto skip_irq;
3359
3360 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3361 /* Process outstanding Tx timestamps. If there is more work,
3362 * re-arm the interrupt to trigger again.
3363 */
3364 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3365 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3366 ice_flush(hw);
3367 }
3368 }
3369
3370skip_irq:
3371 ice_irq_dynamic_ena(hw, NULL, NULL);
3372
3373 return IRQ_HANDLED;
3374}
3375
3376/**
3377 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3378 * @hw: pointer to HW structure
3379 */
3380static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3381{
3382 /* disable Admin queue Interrupt causes */
3383 wr32(hw, PFINT_FW_CTL,
3384 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3385
3386 /* disable Mailbox queue Interrupt causes */
3387 wr32(hw, PFINT_MBX_CTL,
3388 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3389
3390 wr32(hw, PFINT_SB_CTL,
3391 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3392
3393 /* disable Control queue Interrupt causes */
3394 wr32(hw, PFINT_OICR_CTL,
3395 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3396
3397 ice_flush(hw);
3398}
3399
3400/**
3401 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3402 * @pf: board private structure
3403 */
3404static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3405{
3406 int irq_num = pf->ll_ts_irq.virq;
3407
3408 synchronize_irq(irq_num);
3409 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3410
3411 ice_free_irq(pf, pf->ll_ts_irq);
3412}
3413
3414/**
3415 * ice_free_irq_msix_misc - Unroll misc vector setup
3416 * @pf: board private structure
3417 */
3418static void ice_free_irq_msix_misc(struct ice_pf *pf)
3419{
3420 int misc_irq_num = pf->oicr_irq.virq;
3421 struct ice_hw *hw = &pf->hw;
3422
3423 ice_dis_ctrlq_interrupts(hw);
3424
3425 /* disable OICR interrupt */
3426 wr32(hw, PFINT_OICR_ENA, 0);
3427 ice_flush(hw);
3428
3429 synchronize_irq(misc_irq_num);
3430 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3431
3432 ice_free_irq(pf, pf->oicr_irq);
3433 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3434 ice_free_irq_msix_ll_ts(pf);
3435}
3436
3437/**
3438 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3439 * @hw: pointer to HW structure
3440 * @reg_idx: HW vector index to associate the control queue interrupts with
3441 */
3442static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3443{
3444 u32 val;
3445
3446 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3447 PFINT_OICR_CTL_CAUSE_ENA_M);
3448 wr32(hw, PFINT_OICR_CTL, val);
3449
3450 /* enable Admin queue Interrupt causes */
3451 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3452 PFINT_FW_CTL_CAUSE_ENA_M);
3453 wr32(hw, PFINT_FW_CTL, val);
3454
3455 /* enable Mailbox queue Interrupt causes */
3456 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3457 PFINT_MBX_CTL_CAUSE_ENA_M);
3458 wr32(hw, PFINT_MBX_CTL, val);
3459
3460 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3461 /* enable Sideband queue Interrupt causes */
3462 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3463 PFINT_SB_CTL_CAUSE_ENA_M);
3464 wr32(hw, PFINT_SB_CTL, val);
3465 }
3466
3467 ice_flush(hw);
3468}
3469
3470/**
3471 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3472 * @pf: board private structure
3473 *
3474 * This sets up the handler for MSIX 0, which is used to manage the
3475 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3476 * when in MSI or Legacy interrupt mode.
3477 */
3478static int ice_req_irq_msix_misc(struct ice_pf *pf)
3479{
3480 struct device *dev = ice_pf_to_dev(pf);
3481 struct ice_hw *hw = &pf->hw;
3482 u32 pf_intr_start_offset;
3483 struct msi_map irq;
3484 int err = 0;
3485
3486 if (!pf->int_name[0])
3487 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3488 dev_driver_string(dev), dev_name(dev));
3489
3490 if (!pf->int_name_ll_ts[0])
3491 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3492 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3493 /* Do not request IRQ but do enable OICR interrupt since settings are
3494 * lost during reset. Note that this function is called only during
3495 * rebuild path and not while reset is in progress.
3496 */
3497 if (ice_is_reset_in_progress(pf->state))
3498 goto skip_req_irq;
3499
3500 /* reserve one vector in irq_tracker for misc interrupts */
3501 irq = ice_alloc_irq(pf, false);
3502 if (irq.index < 0)
3503 return irq.index;
3504
3505 pf->oicr_irq = irq;
3506 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3507 ice_misc_intr_thread_fn, 0,
3508 pf->int_name, pf);
3509 if (err) {
3510 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3511 pf->int_name, err);
3512 ice_free_irq(pf, pf->oicr_irq);
3513 return err;
3514 }
3515
3516 /* reserve one vector in irq_tracker for ll_ts interrupt */
3517 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3518 goto skip_req_irq;
3519
3520 irq = ice_alloc_irq(pf, false);
3521 if (irq.index < 0)
3522 return irq.index;
3523
3524 pf->ll_ts_irq = irq;
3525 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3526 pf->int_name_ll_ts, pf);
3527 if (err) {
3528 dev_err(dev, "devm_request_irq for %s failed: %d\n",
3529 pf->int_name_ll_ts, err);
3530 ice_free_irq(pf, pf->ll_ts_irq);
3531 return err;
3532 }
3533
3534skip_req_irq:
3535 ice_ena_misc_vector(pf);
3536
3537 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3538 /* This enables LL TS interrupt */
3539 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3540 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3541 wr32(hw, PFINT_SB_CTL,
3542 ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3543 PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3544 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3545 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3546
3547 ice_flush(hw);
3548 ice_irq_dynamic_ena(hw, NULL, NULL);
3549
3550 return 0;
3551}
3552
3553/**
3554 * ice_set_ops - set netdev and ethtools ops for the given netdev
3555 * @vsi: the VSI associated with the new netdev
3556 */
3557static void ice_set_ops(struct ice_vsi *vsi)
3558{
3559 struct net_device *netdev = vsi->netdev;
3560 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3561
3562 if (ice_is_safe_mode(pf)) {
3563 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3564 ice_set_ethtool_safe_mode_ops(netdev);
3565 return;
3566 }
3567
3568 netdev->netdev_ops = &ice_netdev_ops;
3569 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3570 netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3571 ice_set_ethtool_ops(netdev);
3572
3573 if (vsi->type != ICE_VSI_PF)
3574 return;
3575
3576 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3577 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3578 NETDEV_XDP_ACT_RX_SG;
3579 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3580}
3581
3582/**
3583 * ice_set_netdev_features - set features for the given netdev
3584 * @netdev: netdev instance
3585 */
3586void ice_set_netdev_features(struct net_device *netdev)
3587{
3588 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3589 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3590 netdev_features_t csumo_features;
3591 netdev_features_t vlano_features;
3592 netdev_features_t dflt_features;
3593 netdev_features_t tso_features;
3594
3595 if (ice_is_safe_mode(pf)) {
3596 /* safe mode */
3597 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3598 netdev->hw_features = netdev->features;
3599 return;
3600 }
3601
3602 dflt_features = NETIF_F_SG |
3603 NETIF_F_HIGHDMA |
3604 NETIF_F_NTUPLE |
3605 NETIF_F_RXHASH;
3606
3607 csumo_features = NETIF_F_RXCSUM |
3608 NETIF_F_IP_CSUM |
3609 NETIF_F_SCTP_CRC |
3610 NETIF_F_IPV6_CSUM;
3611
3612 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3613 NETIF_F_HW_VLAN_CTAG_TX |
3614 NETIF_F_HW_VLAN_CTAG_RX;
3615
3616 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3617 if (is_dvm_ena)
3618 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3619
3620 tso_features = NETIF_F_TSO |
3621 NETIF_F_TSO_ECN |
3622 NETIF_F_TSO6 |
3623 NETIF_F_GSO_GRE |
3624 NETIF_F_GSO_UDP_TUNNEL |
3625 NETIF_F_GSO_GRE_CSUM |
3626 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3627 NETIF_F_GSO_PARTIAL |
3628 NETIF_F_GSO_IPXIP4 |
3629 NETIF_F_GSO_IPXIP6 |
3630 NETIF_F_GSO_UDP_L4;
3631
3632 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3633 NETIF_F_GSO_GRE_CSUM;
3634 /* set features that user can change */
3635 netdev->hw_features = dflt_features | csumo_features |
3636 vlano_features | tso_features;
3637
3638 /* add support for HW_CSUM on packets with MPLS header */
3639 netdev->mpls_features = NETIF_F_HW_CSUM |
3640 NETIF_F_TSO |
3641 NETIF_F_TSO6;
3642
3643 /* enable features */
3644 netdev->features |= netdev->hw_features;
3645
3646 netdev->hw_features |= NETIF_F_HW_TC;
3647 netdev->hw_features |= NETIF_F_LOOPBACK;
3648
3649 /* encap and VLAN devices inherit default, csumo and tso features */
3650 netdev->hw_enc_features |= dflt_features | csumo_features |
3651 tso_features;
3652 netdev->vlan_features |= dflt_features | csumo_features |
3653 tso_features;
3654
3655 /* advertise support but don't enable by default since only one type of
3656 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3657 * type turns on the other has to be turned off. This is enforced by the
3658 * ice_fix_features() ndo callback.
3659 */
3660 if (is_dvm_ena)
3661 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3662 NETIF_F_HW_VLAN_STAG_TX;
3663
3664 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3665 * be changed at runtime
3666 */
3667 netdev->hw_features |= NETIF_F_RXFCS;
3668
3669 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3670}
3671
3672/**
3673 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3674 * @lut: Lookup table
3675 * @rss_table_size: Lookup table size
3676 * @rss_size: Range of queue number for hashing
3677 */
3678void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3679{
3680 u16 i;
3681
3682 for (i = 0; i < rss_table_size; i++)
3683 lut[i] = i % rss_size;
3684}
3685
3686/**
3687 * ice_pf_vsi_setup - Set up a PF VSI
3688 * @pf: board private structure
3689 * @pi: pointer to the port_info instance
3690 *
3691 * Returns pointer to the successfully allocated VSI software struct
3692 * on success, otherwise returns NULL on failure.
3693 */
3694static struct ice_vsi *
3695ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3696{
3697 struct ice_vsi_cfg_params params = {};
3698
3699 params.type = ICE_VSI_PF;
3700 params.port_info = pi;
3701 params.flags = ICE_VSI_FLAG_INIT;
3702
3703 return ice_vsi_setup(pf, ¶ms);
3704}
3705
3706static struct ice_vsi *
3707ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3708 struct ice_channel *ch)
3709{
3710 struct ice_vsi_cfg_params params = {};
3711
3712 params.type = ICE_VSI_CHNL;
3713 params.port_info = pi;
3714 params.ch = ch;
3715 params.flags = ICE_VSI_FLAG_INIT;
3716
3717 return ice_vsi_setup(pf, ¶ms);
3718}
3719
3720/**
3721 * ice_ctrl_vsi_setup - Set up a control VSI
3722 * @pf: board private structure
3723 * @pi: pointer to the port_info instance
3724 *
3725 * Returns pointer to the successfully allocated VSI software struct
3726 * on success, otherwise returns NULL on failure.
3727 */
3728static struct ice_vsi *
3729ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3730{
3731 struct ice_vsi_cfg_params params = {};
3732
3733 params.type = ICE_VSI_CTRL;
3734 params.port_info = pi;
3735 params.flags = ICE_VSI_FLAG_INIT;
3736
3737 return ice_vsi_setup(pf, ¶ms);
3738}
3739
3740/**
3741 * ice_lb_vsi_setup - Set up a loopback VSI
3742 * @pf: board private structure
3743 * @pi: pointer to the port_info instance
3744 *
3745 * Returns pointer to the successfully allocated VSI software struct
3746 * on success, otherwise returns NULL on failure.
3747 */
3748struct ice_vsi *
3749ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3750{
3751 struct ice_vsi_cfg_params params = {};
3752
3753 params.type = ICE_VSI_LB;
3754 params.port_info = pi;
3755 params.flags = ICE_VSI_FLAG_INIT;
3756
3757 return ice_vsi_setup(pf, ¶ms);
3758}
3759
3760/**
3761 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3762 * @netdev: network interface to be adjusted
3763 * @proto: VLAN TPID
3764 * @vid: VLAN ID to be added
3765 *
3766 * net_device_ops implementation for adding VLAN IDs
3767 */
3768int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3769{
3770 struct ice_netdev_priv *np = netdev_priv(netdev);
3771 struct ice_vsi_vlan_ops *vlan_ops;
3772 struct ice_vsi *vsi = np->vsi;
3773 struct ice_vlan vlan;
3774 int ret;
3775
3776 /* VLAN 0 is added by default during load/reset */
3777 if (!vid)
3778 return 0;
3779
3780 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3781 usleep_range(1000, 2000);
3782
3783 /* Add multicast promisc rule for the VLAN ID to be added if
3784 * all-multicast is currently enabled.
3785 */
3786 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3787 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3788 ICE_MCAST_VLAN_PROMISC_BITS,
3789 vid);
3790 if (ret)
3791 goto finish;
3792 }
3793
3794 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3795
3796 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3797 * packets aren't pruned by the device's internal switch on Rx
3798 */
3799 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3800 ret = vlan_ops->add_vlan(vsi, &vlan);
3801 if (ret)
3802 goto finish;
3803
3804 /* If all-multicast is currently enabled and this VLAN ID is only one
3805 * besides VLAN-0 we have to update look-up type of multicast promisc
3806 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3807 */
3808 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3809 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3810 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3811 ICE_MCAST_PROMISC_BITS, 0);
3812 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3813 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3814 }
3815
3816finish:
3817 clear_bit(ICE_CFG_BUSY, vsi->state);
3818
3819 return ret;
3820}
3821
3822/**
3823 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3824 * @netdev: network interface to be adjusted
3825 * @proto: VLAN TPID
3826 * @vid: VLAN ID to be removed
3827 *
3828 * net_device_ops implementation for removing VLAN IDs
3829 */
3830int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3831{
3832 struct ice_netdev_priv *np = netdev_priv(netdev);
3833 struct ice_vsi_vlan_ops *vlan_ops;
3834 struct ice_vsi *vsi = np->vsi;
3835 struct ice_vlan vlan;
3836 int ret;
3837
3838 /* don't allow removal of VLAN 0 */
3839 if (!vid)
3840 return 0;
3841
3842 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3843 usleep_range(1000, 2000);
3844
3845 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3846 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3847 if (ret) {
3848 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3849 vsi->vsi_num);
3850 vsi->current_netdev_flags |= IFF_ALLMULTI;
3851 }
3852
3853 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3854
3855 /* Make sure VLAN delete is successful before updating VLAN
3856 * information
3857 */
3858 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3859 ret = vlan_ops->del_vlan(vsi, &vlan);
3860 if (ret)
3861 goto finish;
3862
3863 /* Remove multicast promisc rule for the removed VLAN ID if
3864 * all-multicast is enabled.
3865 */
3866 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3867 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3868 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3869
3870 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3871 /* Update look-up type of multicast promisc rule for VLAN 0
3872 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3873 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3874 */
3875 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3876 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3877 ICE_MCAST_VLAN_PROMISC_BITS,
3878 0);
3879 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3880 ICE_MCAST_PROMISC_BITS, 0);
3881 }
3882 }
3883
3884finish:
3885 clear_bit(ICE_CFG_BUSY, vsi->state);
3886
3887 return ret;
3888}
3889
3890/**
3891 * ice_rep_indr_tc_block_unbind
3892 * @cb_priv: indirection block private data
3893 */
3894static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3895{
3896 struct ice_indr_block_priv *indr_priv = cb_priv;
3897
3898 list_del(&indr_priv->list);
3899 kfree(indr_priv);
3900}
3901
3902/**
3903 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3904 * @vsi: VSI struct which has the netdev
3905 */
3906static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3907{
3908 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3909
3910 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3911 ice_rep_indr_tc_block_unbind);
3912}
3913
3914/**
3915 * ice_tc_indir_block_register - Register TC indirect block notifications
3916 * @vsi: VSI struct which has the netdev
3917 *
3918 * Returns 0 on success, negative value on failure
3919 */
3920static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3921{
3922 struct ice_netdev_priv *np;
3923
3924 if (!vsi || !vsi->netdev)
3925 return -EINVAL;
3926
3927 np = netdev_priv(vsi->netdev);
3928
3929 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3930 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3931}
3932
3933/**
3934 * ice_get_avail_q_count - Get count of queues in use
3935 * @pf_qmap: bitmap to get queue use count from
3936 * @lock: pointer to a mutex that protects access to pf_qmap
3937 * @size: size of the bitmap
3938 */
3939static u16
3940ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3941{
3942 unsigned long bit;
3943 u16 count = 0;
3944
3945 mutex_lock(lock);
3946 for_each_clear_bit(bit, pf_qmap, size)
3947 count++;
3948 mutex_unlock(lock);
3949
3950 return count;
3951}
3952
3953/**
3954 * ice_get_avail_txq_count - Get count of Tx queues in use
3955 * @pf: pointer to an ice_pf instance
3956 */
3957u16 ice_get_avail_txq_count(struct ice_pf *pf)
3958{
3959 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3960 pf->max_pf_txqs);
3961}
3962
3963/**
3964 * ice_get_avail_rxq_count - Get count of Rx queues in use
3965 * @pf: pointer to an ice_pf instance
3966 */
3967u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3968{
3969 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3970 pf->max_pf_rxqs);
3971}
3972
3973/**
3974 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3975 * @pf: board private structure to initialize
3976 */
3977static void ice_deinit_pf(struct ice_pf *pf)
3978{
3979 ice_service_task_stop(pf);
3980 mutex_destroy(&pf->lag_mutex);
3981 mutex_destroy(&pf->adev_mutex);
3982 mutex_destroy(&pf->sw_mutex);
3983 mutex_destroy(&pf->tc_mutex);
3984 mutex_destroy(&pf->avail_q_mutex);
3985 mutex_destroy(&pf->vfs.table_lock);
3986
3987 if (pf->avail_txqs) {
3988 bitmap_free(pf->avail_txqs);
3989 pf->avail_txqs = NULL;
3990 }
3991
3992 if (pf->avail_rxqs) {
3993 bitmap_free(pf->avail_rxqs);
3994 pf->avail_rxqs = NULL;
3995 }
3996
3997 if (pf->ptp.clock)
3998 ptp_clock_unregister(pf->ptp.clock);
3999
4000 xa_destroy(&pf->dyn_ports);
4001 xa_destroy(&pf->sf_nums);
4002}
4003
4004/**
4005 * ice_set_pf_caps - set PFs capability flags
4006 * @pf: pointer to the PF instance
4007 */
4008static void ice_set_pf_caps(struct ice_pf *pf)
4009{
4010 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
4011
4012 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4013 if (func_caps->common_cap.rdma)
4014 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4015 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4016 if (func_caps->common_cap.dcb)
4017 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4018 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4019 if (func_caps->common_cap.sr_iov_1_1) {
4020 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4021 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
4022 ICE_MAX_SRIOV_VFS);
4023 }
4024 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4025 if (func_caps->common_cap.rss_table_size)
4026 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4027
4028 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4029 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4030 u16 unused;
4031
4032 /* ctrl_vsi_idx will be set to a valid value when flow director
4033 * is setup by ice_init_fdir
4034 */
4035 pf->ctrl_vsi_idx = ICE_NO_VSI;
4036 set_bit(ICE_FLAG_FD_ENA, pf->flags);
4037 /* force guaranteed filter pool for PF */
4038 ice_alloc_fd_guar_item(&pf->hw, &unused,
4039 func_caps->fd_fltr_guar);
4040 /* force shared filter pool for PF */
4041 ice_alloc_fd_shrd_item(&pf->hw, &unused,
4042 func_caps->fd_fltr_best_effort);
4043 }
4044
4045 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4046 if (func_caps->common_cap.ieee_1588 &&
4047 !(pf->hw.mac_type == ICE_MAC_E830))
4048 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4049
4050 pf->max_pf_txqs = func_caps->common_cap.num_txq;
4051 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4052}
4053
4054/**
4055 * ice_init_pf - Initialize general software structures (struct ice_pf)
4056 * @pf: board private structure to initialize
4057 */
4058static int ice_init_pf(struct ice_pf *pf)
4059{
4060 ice_set_pf_caps(pf);
4061
4062 mutex_init(&pf->sw_mutex);
4063 mutex_init(&pf->tc_mutex);
4064 mutex_init(&pf->adev_mutex);
4065 mutex_init(&pf->lag_mutex);
4066
4067 INIT_HLIST_HEAD(&pf->aq_wait_list);
4068 spin_lock_init(&pf->aq_wait_lock);
4069 init_waitqueue_head(&pf->aq_wait_queue);
4070
4071 init_waitqueue_head(&pf->reset_wait_queue);
4072
4073 /* setup service timer and periodic service task */
4074 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4075 pf->serv_tmr_period = HZ;
4076 INIT_WORK(&pf->serv_task, ice_service_task);
4077 clear_bit(ICE_SERVICE_SCHED, pf->state);
4078
4079 mutex_init(&pf->avail_q_mutex);
4080 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4081 if (!pf->avail_txqs)
4082 return -ENOMEM;
4083
4084 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4085 if (!pf->avail_rxqs) {
4086 bitmap_free(pf->avail_txqs);
4087 pf->avail_txqs = NULL;
4088 return -ENOMEM;
4089 }
4090
4091 mutex_init(&pf->vfs.table_lock);
4092 hash_init(pf->vfs.table);
4093 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
4094 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
4095 ICE_MBX_OVERFLOW_WATERMARK);
4096 else
4097 ice_mbx_init_snapshot(&pf->hw);
4098
4099 xa_init(&pf->dyn_ports);
4100 xa_init(&pf->sf_nums);
4101
4102 return 0;
4103}
4104
4105/**
4106 * ice_is_wol_supported - check if WoL is supported
4107 * @hw: pointer to hardware info
4108 *
4109 * Check if WoL is supported based on the HW configuration.
4110 * Returns true if NVM supports and enables WoL for this port, false otherwise
4111 */
4112bool ice_is_wol_supported(struct ice_hw *hw)
4113{
4114 u16 wol_ctrl;
4115
4116 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4117 * word) indicates WoL is not supported on the corresponding PF ID.
4118 */
4119 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4120 return false;
4121
4122 return !(BIT(hw->port_info->lport) & wol_ctrl);
4123}
4124
4125/**
4126 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4127 * @vsi: VSI being changed
4128 * @new_rx: new number of Rx queues
4129 * @new_tx: new number of Tx queues
4130 * @locked: is adev device_lock held
4131 *
4132 * Only change the number of queues if new_tx, or new_rx is non-0.
4133 *
4134 * Returns 0 on success.
4135 */
4136int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4137{
4138 struct ice_pf *pf = vsi->back;
4139 int i, err = 0, timeout = 50;
4140
4141 if (!new_rx && !new_tx)
4142 return -EINVAL;
4143
4144 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4145 timeout--;
4146 if (!timeout)
4147 return -EBUSY;
4148 usleep_range(1000, 2000);
4149 }
4150
4151 if (new_tx)
4152 vsi->req_txq = (u16)new_tx;
4153 if (new_rx)
4154 vsi->req_rxq = (u16)new_rx;
4155
4156 /* set for the next time the netdev is started */
4157 if (!netif_running(vsi->netdev)) {
4158 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4159 if (err)
4160 goto rebuild_err;
4161 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4162 goto done;
4163 }
4164
4165 ice_vsi_close(vsi);
4166 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4167 if (err)
4168 goto rebuild_err;
4169
4170 ice_for_each_traffic_class(i) {
4171 if (vsi->tc_cfg.ena_tc & BIT(i))
4172 netdev_set_tc_queue(vsi->netdev,
4173 vsi->tc_cfg.tc_info[i].netdev_tc,
4174 vsi->tc_cfg.tc_info[i].qcount_tx,
4175 vsi->tc_cfg.tc_info[i].qoffset);
4176 }
4177 ice_pf_dcb_recfg(pf, locked);
4178 ice_vsi_open(vsi);
4179 goto done;
4180
4181rebuild_err:
4182 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4183 err);
4184done:
4185 clear_bit(ICE_CFG_BUSY, pf->state);
4186 return err;
4187}
4188
4189/**
4190 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4191 * @pf: PF to configure
4192 *
4193 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4194 * VSI can still Tx/Rx VLAN tagged packets.
4195 */
4196static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4197{
4198 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4199 struct ice_vsi_ctx *ctxt;
4200 struct ice_hw *hw;
4201 int status;
4202
4203 if (!vsi)
4204 return;
4205
4206 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4207 if (!ctxt)
4208 return;
4209
4210 hw = &pf->hw;
4211 ctxt->info = vsi->info;
4212
4213 ctxt->info.valid_sections =
4214 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4215 ICE_AQ_VSI_PROP_SECURITY_VALID |
4216 ICE_AQ_VSI_PROP_SW_VALID);
4217
4218 /* disable VLAN anti-spoof */
4219 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4220 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4221
4222 /* disable VLAN pruning and keep all other settings */
4223 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4224
4225 /* allow all VLANs on Tx and don't strip on Rx */
4226 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4227 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4228
4229 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4230 if (status) {
4231 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4232 status, ice_aq_str(hw->adminq.sq_last_status));
4233 } else {
4234 vsi->info.sec_flags = ctxt->info.sec_flags;
4235 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4236 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4237 }
4238
4239 kfree(ctxt);
4240}
4241
4242/**
4243 * ice_log_pkg_init - log result of DDP package load
4244 * @hw: pointer to hardware info
4245 * @state: state of package load
4246 */
4247static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4248{
4249 struct ice_pf *pf = hw->back;
4250 struct device *dev;
4251
4252 dev = ice_pf_to_dev(pf);
4253
4254 switch (state) {
4255 case ICE_DDP_PKG_SUCCESS:
4256 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4257 hw->active_pkg_name,
4258 hw->active_pkg_ver.major,
4259 hw->active_pkg_ver.minor,
4260 hw->active_pkg_ver.update,
4261 hw->active_pkg_ver.draft);
4262 break;
4263 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4264 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4265 hw->active_pkg_name,
4266 hw->active_pkg_ver.major,
4267 hw->active_pkg_ver.minor,
4268 hw->active_pkg_ver.update,
4269 hw->active_pkg_ver.draft);
4270 break;
4271 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4272 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4273 hw->active_pkg_name,
4274 hw->active_pkg_ver.major,
4275 hw->active_pkg_ver.minor,
4276 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4277 break;
4278 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4279 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4280 hw->active_pkg_name,
4281 hw->active_pkg_ver.major,
4282 hw->active_pkg_ver.minor,
4283 hw->active_pkg_ver.update,
4284 hw->active_pkg_ver.draft,
4285 hw->pkg_name,
4286 hw->pkg_ver.major,
4287 hw->pkg_ver.minor,
4288 hw->pkg_ver.update,
4289 hw->pkg_ver.draft);
4290 break;
4291 case ICE_DDP_PKG_FW_MISMATCH:
4292 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4293 break;
4294 case ICE_DDP_PKG_INVALID_FILE:
4295 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4296 break;
4297 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4298 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4299 break;
4300 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4301 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4302 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4303 break;
4304 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4305 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4306 break;
4307 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4308 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4309 break;
4310 case ICE_DDP_PKG_LOAD_ERROR:
4311 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4312 /* poll for reset to complete */
4313 if (ice_check_reset(hw))
4314 dev_err(dev, "Error resetting device. Please reload the driver\n");
4315 break;
4316 case ICE_DDP_PKG_ERR:
4317 default:
4318 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4319 break;
4320 }
4321}
4322
4323/**
4324 * ice_load_pkg - load/reload the DDP Package file
4325 * @firmware: firmware structure when firmware requested or NULL for reload
4326 * @pf: pointer to the PF instance
4327 *
4328 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4329 * initialize HW tables.
4330 */
4331static void
4332ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4333{
4334 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4335 struct device *dev = ice_pf_to_dev(pf);
4336 struct ice_hw *hw = &pf->hw;
4337
4338 /* Load DDP Package */
4339 if (firmware && !hw->pkg_copy) {
4340 state = ice_copy_and_init_pkg(hw, firmware->data,
4341 firmware->size);
4342 ice_log_pkg_init(hw, state);
4343 } else if (!firmware && hw->pkg_copy) {
4344 /* Reload package during rebuild after CORER/GLOBR reset */
4345 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4346 ice_log_pkg_init(hw, state);
4347 } else {
4348 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4349 }
4350
4351 if (!ice_is_init_pkg_successful(state)) {
4352 /* Safe Mode */
4353 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4354 return;
4355 }
4356
4357 /* Successful download package is the precondition for advanced
4358 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4359 */
4360 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4361}
4362
4363/**
4364 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4365 * @pf: pointer to the PF structure
4366 *
4367 * There is no error returned here because the driver should be able to handle
4368 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4369 * specifically with Tx.
4370 */
4371static void ice_verify_cacheline_size(struct ice_pf *pf)
4372{
4373 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4374 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4375 ICE_CACHE_LINE_BYTES);
4376}
4377
4378/**
4379 * ice_send_version - update firmware with driver version
4380 * @pf: PF struct
4381 *
4382 * Returns 0 on success, else error code
4383 */
4384static int ice_send_version(struct ice_pf *pf)
4385{
4386 struct ice_driver_ver dv;
4387
4388 dv.major_ver = 0xff;
4389 dv.minor_ver = 0xff;
4390 dv.build_ver = 0xff;
4391 dv.subbuild_ver = 0;
4392 strscpy((char *)dv.driver_string, UTS_RELEASE,
4393 sizeof(dv.driver_string));
4394 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4395}
4396
4397/**
4398 * ice_init_fdir - Initialize flow director VSI and configuration
4399 * @pf: pointer to the PF instance
4400 *
4401 * returns 0 on success, negative on error
4402 */
4403static int ice_init_fdir(struct ice_pf *pf)
4404{
4405 struct device *dev = ice_pf_to_dev(pf);
4406 struct ice_vsi *ctrl_vsi;
4407 int err;
4408
4409 /* Side Band Flow Director needs to have a control VSI.
4410 * Allocate it and store it in the PF.
4411 */
4412 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4413 if (!ctrl_vsi) {
4414 dev_dbg(dev, "could not create control VSI\n");
4415 return -ENOMEM;
4416 }
4417
4418 err = ice_vsi_open_ctrl(ctrl_vsi);
4419 if (err) {
4420 dev_dbg(dev, "could not open control VSI\n");
4421 goto err_vsi_open;
4422 }
4423
4424 mutex_init(&pf->hw.fdir_fltr_lock);
4425
4426 err = ice_fdir_create_dflt_rules(pf);
4427 if (err)
4428 goto err_fdir_rule;
4429
4430 return 0;
4431
4432err_fdir_rule:
4433 ice_fdir_release_flows(&pf->hw);
4434 ice_vsi_close(ctrl_vsi);
4435err_vsi_open:
4436 ice_vsi_release(ctrl_vsi);
4437 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4438 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4439 pf->ctrl_vsi_idx = ICE_NO_VSI;
4440 }
4441 return err;
4442}
4443
4444static void ice_deinit_fdir(struct ice_pf *pf)
4445{
4446 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4447
4448 if (!vsi)
4449 return;
4450
4451 ice_vsi_manage_fdir(vsi, false);
4452 ice_vsi_release(vsi);
4453 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4454 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4455 pf->ctrl_vsi_idx = ICE_NO_VSI;
4456 }
4457
4458 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4459}
4460
4461/**
4462 * ice_get_opt_fw_name - return optional firmware file name or NULL
4463 * @pf: pointer to the PF instance
4464 */
4465static char *ice_get_opt_fw_name(struct ice_pf *pf)
4466{
4467 /* Optional firmware name same as default with additional dash
4468 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4469 */
4470 struct pci_dev *pdev = pf->pdev;
4471 char *opt_fw_filename;
4472 u64 dsn;
4473
4474 /* Determine the name of the optional file using the DSN (two
4475 * dwords following the start of the DSN Capability).
4476 */
4477 dsn = pci_get_dsn(pdev);
4478 if (!dsn)
4479 return NULL;
4480
4481 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4482 if (!opt_fw_filename)
4483 return NULL;
4484
4485 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4486 ICE_DDP_PKG_PATH, dsn);
4487
4488 return opt_fw_filename;
4489}
4490
4491/**
4492 * ice_request_fw - Device initialization routine
4493 * @pf: pointer to the PF instance
4494 * @firmware: double pointer to firmware struct
4495 *
4496 * Return: zero when successful, negative values otherwise.
4497 */
4498static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4499{
4500 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4501 struct device *dev = ice_pf_to_dev(pf);
4502 int err = 0;
4503
4504 /* optional device-specific DDP (if present) overrides the default DDP
4505 * package file. kernel logs a debug message if the file doesn't exist,
4506 * and warning messages for other errors.
4507 */
4508 if (opt_fw_filename) {
4509 err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4510 kfree(opt_fw_filename);
4511 if (!err)
4512 return err;
4513 }
4514 err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4515 if (err)
4516 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4517
4518 return err;
4519}
4520
4521/**
4522 * ice_init_tx_topology - performs Tx topology initialization
4523 * @hw: pointer to the hardware structure
4524 * @firmware: pointer to firmware structure
4525 *
4526 * Return: zero when init was successful, negative values otherwise.
4527 */
4528static int
4529ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4530{
4531 u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4532 struct ice_pf *pf = hw->back;
4533 struct device *dev;
4534 int err;
4535
4536 dev = ice_pf_to_dev(pf);
4537 err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
4538 if (!err) {
4539 if (hw->num_tx_sched_layers > num_tx_sched_layers)
4540 dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4541 else
4542 dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4543 /* if there was a change in topology ice_cfg_tx_topo triggered
4544 * a CORER and we need to re-init hw
4545 */
4546 ice_deinit_hw(hw);
4547 err = ice_init_hw(hw);
4548
4549 return err;
4550 } else if (err == -EIO) {
4551 dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4552 }
4553
4554 return 0;
4555}
4556
4557/**
4558 * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
4559 * @hw: pointer to the hardware structure
4560 * @pf: pointer to pf structure
4561 *
4562 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4563 * formats the PF hardware supports. The exact list of supported RXDIDs
4564 * depends on the loaded DDP package. The IDs can be determined by reading the
4565 * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
4566 *
4567 * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
4568 * in the DDP package. The 16-byte legacy descriptor is never supported by
4569 * VFs.
4570 */
4571static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
4572{
4573 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
4574
4575 for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
4576 u32 regval;
4577
4578 regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
4579 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
4580 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
4581 pf->supported_rxdids |= BIT(i);
4582 }
4583}
4584
4585/**
4586 * ice_init_ddp_config - DDP related configuration
4587 * @hw: pointer to the hardware structure
4588 * @pf: pointer to pf structure
4589 *
4590 * This function loads DDP file from the disk, then initializes Tx
4591 * topology. At the end DDP package is loaded on the card.
4592 *
4593 * Return: zero when init was successful, negative values otherwise.
4594 */
4595static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4596{
4597 struct device *dev = ice_pf_to_dev(pf);
4598 const struct firmware *firmware = NULL;
4599 int err;
4600
4601 err = ice_request_fw(pf, &firmware);
4602 if (err) {
4603 dev_err(dev, "Fail during requesting FW: %d\n", err);
4604 return err;
4605 }
4606
4607 err = ice_init_tx_topology(hw, firmware);
4608 if (err) {
4609 dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4610 err);
4611 release_firmware(firmware);
4612 return err;
4613 }
4614
4615 /* Download firmware to device */
4616 ice_load_pkg(firmware, pf);
4617 release_firmware(firmware);
4618
4619 /* Initialize the supported Rx descriptor IDs after loading DDP */
4620 ice_init_supported_rxdids(hw, pf);
4621
4622 return 0;
4623}
4624
4625/**
4626 * ice_print_wake_reason - show the wake up cause in the log
4627 * @pf: pointer to the PF struct
4628 */
4629static void ice_print_wake_reason(struct ice_pf *pf)
4630{
4631 u32 wus = pf->wakeup_reason;
4632 const char *wake_str;
4633
4634 /* if no wake event, nothing to print */
4635 if (!wus)
4636 return;
4637
4638 if (wus & PFPM_WUS_LNKC_M)
4639 wake_str = "Link\n";
4640 else if (wus & PFPM_WUS_MAG_M)
4641 wake_str = "Magic Packet\n";
4642 else if (wus & PFPM_WUS_MNG_M)
4643 wake_str = "Management\n";
4644 else if (wus & PFPM_WUS_FW_RST_WK_M)
4645 wake_str = "Firmware Reset\n";
4646 else
4647 wake_str = "Unknown\n";
4648
4649 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4650}
4651
4652/**
4653 * ice_pf_fwlog_update_module - update 1 module
4654 * @pf: pointer to the PF struct
4655 * @log_level: log_level to use for the @module
4656 * @module: module to update
4657 */
4658void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4659{
4660 struct ice_hw *hw = &pf->hw;
4661
4662 hw->fwlog_cfg.module_entries[module].log_level = log_level;
4663}
4664
4665/**
4666 * ice_register_netdev - register netdev
4667 * @vsi: pointer to the VSI struct
4668 */
4669static int ice_register_netdev(struct ice_vsi *vsi)
4670{
4671 int err;
4672
4673 if (!vsi || !vsi->netdev)
4674 return -EIO;
4675
4676 err = register_netdev(vsi->netdev);
4677 if (err)
4678 return err;
4679
4680 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4681 netif_carrier_off(vsi->netdev);
4682 netif_tx_stop_all_queues(vsi->netdev);
4683
4684 return 0;
4685}
4686
4687static void ice_unregister_netdev(struct ice_vsi *vsi)
4688{
4689 if (!vsi || !vsi->netdev)
4690 return;
4691
4692 unregister_netdev(vsi->netdev);
4693 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4694}
4695
4696/**
4697 * ice_cfg_netdev - Allocate, configure and register a netdev
4698 * @vsi: the VSI associated with the new netdev
4699 *
4700 * Returns 0 on success, negative value on failure
4701 */
4702static int ice_cfg_netdev(struct ice_vsi *vsi)
4703{
4704 struct ice_netdev_priv *np;
4705 struct net_device *netdev;
4706 u8 mac_addr[ETH_ALEN];
4707
4708 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4709 vsi->alloc_rxq);
4710 if (!netdev)
4711 return -ENOMEM;
4712
4713 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4714 vsi->netdev = netdev;
4715 np = netdev_priv(netdev);
4716 np->vsi = vsi;
4717
4718 ice_set_netdev_features(netdev);
4719 ice_set_ops(vsi);
4720
4721 if (vsi->type == ICE_VSI_PF) {
4722 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4723 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4724 eth_hw_addr_set(netdev, mac_addr);
4725 }
4726
4727 netdev->priv_flags |= IFF_UNICAST_FLT;
4728
4729 /* Setup netdev TC information */
4730 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4731
4732 netdev->max_mtu = ICE_MAX_MTU;
4733
4734 return 0;
4735}
4736
4737static void ice_decfg_netdev(struct ice_vsi *vsi)
4738{
4739 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4740 free_netdev(vsi->netdev);
4741 vsi->netdev = NULL;
4742}
4743
4744/**
4745 * ice_wait_for_fw - wait for full FW readiness
4746 * @hw: pointer to the hardware structure
4747 * @timeout: milliseconds that can elapse before timing out
4748 */
4749static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4750{
4751 int fw_loading;
4752 u32 elapsed = 0;
4753
4754 while (elapsed <= timeout) {
4755 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4756
4757 /* firmware was not yet loaded, we have to wait more */
4758 if (fw_loading) {
4759 elapsed += 100;
4760 msleep(100);
4761 continue;
4762 }
4763 return 0;
4764 }
4765
4766 return -ETIMEDOUT;
4767}
4768
4769int ice_init_dev(struct ice_pf *pf)
4770{
4771 struct device *dev = ice_pf_to_dev(pf);
4772 struct ice_hw *hw = &pf->hw;
4773 int err;
4774
4775 err = ice_init_hw(hw);
4776 if (err) {
4777 dev_err(dev, "ice_init_hw failed: %d\n", err);
4778 return err;
4779 }
4780
4781 /* Some cards require longer initialization times
4782 * due to necessity of loading FW from an external source.
4783 * This can take even half a minute.
4784 */
4785 if (ice_is_pf_c827(hw)) {
4786 err = ice_wait_for_fw(hw, 30000);
4787 if (err) {
4788 dev_err(dev, "ice_wait_for_fw timed out");
4789 return err;
4790 }
4791 }
4792
4793 ice_init_feature_support(pf);
4794
4795 err = ice_init_ddp_config(hw, pf);
4796
4797 /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4798 * set in pf->state, which will cause ice_is_safe_mode to return
4799 * true
4800 */
4801 if (err || ice_is_safe_mode(pf)) {
4802 /* we already got function/device capabilities but these don't
4803 * reflect what the driver needs to do in safe mode. Instead of
4804 * adding conditional logic everywhere to ignore these
4805 * device/function capabilities, override them.
4806 */
4807 ice_set_safe_mode_caps(hw);
4808 }
4809
4810 err = ice_init_pf(pf);
4811 if (err) {
4812 dev_err(dev, "ice_init_pf failed: %d\n", err);
4813 goto err_init_pf;
4814 }
4815
4816 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4817 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4818 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4819 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4820 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4821 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4822 pf->hw.tnl.valid_count[TNL_VXLAN];
4823 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4824 UDP_TUNNEL_TYPE_VXLAN;
4825 }
4826 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4827 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4828 pf->hw.tnl.valid_count[TNL_GENEVE];
4829 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4830 UDP_TUNNEL_TYPE_GENEVE;
4831 }
4832
4833 err = ice_init_interrupt_scheme(pf);
4834 if (err) {
4835 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4836 err = -EIO;
4837 goto err_init_interrupt_scheme;
4838 }
4839
4840 /* In case of MSIX we are going to setup the misc vector right here
4841 * to handle admin queue events etc. In case of legacy and MSI
4842 * the misc functionality and queue processing is combined in
4843 * the same vector and that gets setup at open.
4844 */
4845 err = ice_req_irq_msix_misc(pf);
4846 if (err) {
4847 dev_err(dev, "setup of misc vector failed: %d\n", err);
4848 goto err_req_irq_msix_misc;
4849 }
4850
4851 return 0;
4852
4853err_req_irq_msix_misc:
4854 ice_clear_interrupt_scheme(pf);
4855err_init_interrupt_scheme:
4856 ice_deinit_pf(pf);
4857err_init_pf:
4858 ice_deinit_hw(hw);
4859 return err;
4860}
4861
4862void ice_deinit_dev(struct ice_pf *pf)
4863{
4864 ice_free_irq_msix_misc(pf);
4865 ice_deinit_pf(pf);
4866 ice_deinit_hw(&pf->hw);
4867
4868 /* Service task is already stopped, so call reset directly. */
4869 ice_reset(&pf->hw, ICE_RESET_PFR);
4870 pci_wait_for_pending_transaction(pf->pdev);
4871 ice_clear_interrupt_scheme(pf);
4872}
4873
4874static void ice_init_features(struct ice_pf *pf)
4875{
4876 struct device *dev = ice_pf_to_dev(pf);
4877
4878 if (ice_is_safe_mode(pf))
4879 return;
4880
4881 /* initialize DDP driven features */
4882 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4883 ice_ptp_init(pf);
4884
4885 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4886 ice_gnss_init(pf);
4887
4888 if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4889 ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4890 ice_dpll_init(pf);
4891
4892 /* Note: Flow director init failure is non-fatal to load */
4893 if (ice_init_fdir(pf))
4894 dev_err(dev, "could not initialize flow director\n");
4895
4896 /* Note: DCB init failure is non-fatal to load */
4897 if (ice_init_pf_dcb(pf, false)) {
4898 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4899 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4900 } else {
4901 ice_cfg_lldp_mib_change(&pf->hw, true);
4902 }
4903
4904 if (ice_init_lag(pf))
4905 dev_warn(dev, "Failed to init link aggregation support\n");
4906
4907 ice_hwmon_init(pf);
4908}
4909
4910static void ice_deinit_features(struct ice_pf *pf)
4911{
4912 if (ice_is_safe_mode(pf))
4913 return;
4914
4915 ice_deinit_lag(pf);
4916 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4917 ice_cfg_lldp_mib_change(&pf->hw, false);
4918 ice_deinit_fdir(pf);
4919 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4920 ice_gnss_exit(pf);
4921 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4922 ice_ptp_release(pf);
4923 if (test_bit(ICE_FLAG_DPLL, pf->flags))
4924 ice_dpll_deinit(pf);
4925 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4926 xa_destroy(&pf->eswitch.reprs);
4927}
4928
4929static void ice_init_wakeup(struct ice_pf *pf)
4930{
4931 /* Save wakeup reason register for later use */
4932 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4933
4934 /* check for a power management event */
4935 ice_print_wake_reason(pf);
4936
4937 /* clear wake status, all bits */
4938 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4939
4940 /* Disable WoL at init, wait for user to enable */
4941 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4942}
4943
4944static int ice_init_link(struct ice_pf *pf)
4945{
4946 struct device *dev = ice_pf_to_dev(pf);
4947 int err;
4948
4949 err = ice_init_link_events(pf->hw.port_info);
4950 if (err) {
4951 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4952 return err;
4953 }
4954
4955 /* not a fatal error if this fails */
4956 err = ice_init_nvm_phy_type(pf->hw.port_info);
4957 if (err)
4958 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4959
4960 /* not a fatal error if this fails */
4961 err = ice_update_link_info(pf->hw.port_info);
4962 if (err)
4963 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4964
4965 ice_init_link_dflt_override(pf->hw.port_info);
4966
4967 ice_check_link_cfg_err(pf,
4968 pf->hw.port_info->phy.link_info.link_cfg_err);
4969
4970 /* if media available, initialize PHY settings */
4971 if (pf->hw.port_info->phy.link_info.link_info &
4972 ICE_AQ_MEDIA_AVAILABLE) {
4973 /* not a fatal error if this fails */
4974 err = ice_init_phy_user_cfg(pf->hw.port_info);
4975 if (err)
4976 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4977
4978 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4979 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4980
4981 if (vsi)
4982 ice_configure_phy(vsi);
4983 }
4984 } else {
4985 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4986 }
4987
4988 return err;
4989}
4990
4991static int ice_init_pf_sw(struct ice_pf *pf)
4992{
4993 bool dvm = ice_is_dvm_ena(&pf->hw);
4994 struct ice_vsi *vsi;
4995 int err;
4996
4997 /* create switch struct for the switch element created by FW on boot */
4998 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4999 if (!pf->first_sw)
5000 return -ENOMEM;
5001
5002 if (pf->hw.evb_veb)
5003 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
5004 else
5005 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
5006
5007 pf->first_sw->pf = pf;
5008
5009 /* record the sw_id available for later use */
5010 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
5011
5012 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
5013 if (err)
5014 goto err_aq_set_port_params;
5015
5016 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
5017 if (!vsi) {
5018 err = -ENOMEM;
5019 goto err_pf_vsi_setup;
5020 }
5021
5022 return 0;
5023
5024err_pf_vsi_setup:
5025err_aq_set_port_params:
5026 kfree(pf->first_sw);
5027 return err;
5028}
5029
5030static void ice_deinit_pf_sw(struct ice_pf *pf)
5031{
5032 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5033
5034 if (!vsi)
5035 return;
5036
5037 ice_vsi_release(vsi);
5038 kfree(pf->first_sw);
5039}
5040
5041static int ice_alloc_vsis(struct ice_pf *pf)
5042{
5043 struct device *dev = ice_pf_to_dev(pf);
5044
5045 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
5046 if (!pf->num_alloc_vsi)
5047 return -EIO;
5048
5049 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
5050 dev_warn(dev,
5051 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
5052 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
5053 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
5054 }
5055
5056 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
5057 GFP_KERNEL);
5058 if (!pf->vsi)
5059 return -ENOMEM;
5060
5061 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
5062 sizeof(*pf->vsi_stats), GFP_KERNEL);
5063 if (!pf->vsi_stats) {
5064 devm_kfree(dev, pf->vsi);
5065 return -ENOMEM;
5066 }
5067
5068 return 0;
5069}
5070
5071static void ice_dealloc_vsis(struct ice_pf *pf)
5072{
5073 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
5074 pf->vsi_stats = NULL;
5075
5076 pf->num_alloc_vsi = 0;
5077 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
5078 pf->vsi = NULL;
5079}
5080
5081static int ice_init_devlink(struct ice_pf *pf)
5082{
5083 int err;
5084
5085 err = ice_devlink_register_params(pf);
5086 if (err)
5087 return err;
5088
5089 ice_devlink_init_regions(pf);
5090 ice_devlink_register(pf);
5091
5092 return 0;
5093}
5094
5095static void ice_deinit_devlink(struct ice_pf *pf)
5096{
5097 ice_devlink_unregister(pf);
5098 ice_devlink_destroy_regions(pf);
5099 ice_devlink_unregister_params(pf);
5100}
5101
5102static int ice_init(struct ice_pf *pf)
5103{
5104 int err;
5105
5106 err = ice_init_dev(pf);
5107 if (err)
5108 return err;
5109
5110 err = ice_alloc_vsis(pf);
5111 if (err)
5112 goto err_alloc_vsis;
5113
5114 err = ice_init_pf_sw(pf);
5115 if (err)
5116 goto err_init_pf_sw;
5117
5118 ice_init_wakeup(pf);
5119
5120 err = ice_init_link(pf);
5121 if (err)
5122 goto err_init_link;
5123
5124 err = ice_send_version(pf);
5125 if (err)
5126 goto err_init_link;
5127
5128 ice_verify_cacheline_size(pf);
5129
5130 if (ice_is_safe_mode(pf))
5131 ice_set_safe_mode_vlan_cfg(pf);
5132 else
5133 /* print PCI link speed and width */
5134 pcie_print_link_status(pf->pdev);
5135
5136 /* ready to go, so clear down state bit */
5137 clear_bit(ICE_DOWN, pf->state);
5138 clear_bit(ICE_SERVICE_DIS, pf->state);
5139
5140 /* since everything is good, start the service timer */
5141 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5142
5143 return 0;
5144
5145err_init_link:
5146 ice_deinit_pf_sw(pf);
5147err_init_pf_sw:
5148 ice_dealloc_vsis(pf);
5149err_alloc_vsis:
5150 ice_deinit_dev(pf);
5151 return err;
5152}
5153
5154static void ice_deinit(struct ice_pf *pf)
5155{
5156 set_bit(ICE_SERVICE_DIS, pf->state);
5157 set_bit(ICE_DOWN, pf->state);
5158
5159 ice_deinit_pf_sw(pf);
5160 ice_dealloc_vsis(pf);
5161 ice_deinit_dev(pf);
5162}
5163
5164/**
5165 * ice_load - load pf by init hw and starting VSI
5166 * @pf: pointer to the pf instance
5167 *
5168 * This function has to be called under devl_lock.
5169 */
5170int ice_load(struct ice_pf *pf)
5171{
5172 struct ice_vsi *vsi;
5173 int err;
5174
5175 devl_assert_locked(priv_to_devlink(pf));
5176
5177 vsi = ice_get_main_vsi(pf);
5178
5179 /* init channel list */
5180 INIT_LIST_HEAD(&vsi->ch_list);
5181
5182 err = ice_cfg_netdev(vsi);
5183 if (err)
5184 return err;
5185
5186 /* Setup DCB netlink interface */
5187 ice_dcbnl_setup(vsi);
5188
5189 err = ice_init_mac_fltr(pf);
5190 if (err)
5191 goto err_init_mac_fltr;
5192
5193 err = ice_devlink_create_pf_port(pf);
5194 if (err)
5195 goto err_devlink_create_pf_port;
5196
5197 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5198
5199 err = ice_register_netdev(vsi);
5200 if (err)
5201 goto err_register_netdev;
5202
5203 err = ice_tc_indir_block_register(vsi);
5204 if (err)
5205 goto err_tc_indir_block_register;
5206
5207 ice_napi_add(vsi);
5208
5209 err = ice_init_rdma(pf);
5210 if (err)
5211 goto err_init_rdma;
5212
5213 ice_init_features(pf);
5214 ice_service_task_restart(pf);
5215
5216 clear_bit(ICE_DOWN, pf->state);
5217
5218 return 0;
5219
5220err_init_rdma:
5221 ice_tc_indir_block_unregister(vsi);
5222err_tc_indir_block_register:
5223 ice_unregister_netdev(vsi);
5224err_register_netdev:
5225 ice_devlink_destroy_pf_port(pf);
5226err_devlink_create_pf_port:
5227err_init_mac_fltr:
5228 ice_decfg_netdev(vsi);
5229 return err;
5230}
5231
5232/**
5233 * ice_unload - unload pf by stopping VSI and deinit hw
5234 * @pf: pointer to the pf instance
5235 *
5236 * This function has to be called under devl_lock.
5237 */
5238void ice_unload(struct ice_pf *pf)
5239{
5240 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5241
5242 devl_assert_locked(priv_to_devlink(pf));
5243
5244 ice_deinit_features(pf);
5245 ice_deinit_rdma(pf);
5246 ice_tc_indir_block_unregister(vsi);
5247 ice_unregister_netdev(vsi);
5248 ice_devlink_destroy_pf_port(pf);
5249 ice_decfg_netdev(vsi);
5250}
5251
5252/**
5253 * ice_probe - Device initialization routine
5254 * @pdev: PCI device information struct
5255 * @ent: entry in ice_pci_tbl
5256 *
5257 * Returns 0 on success, negative on failure
5258 */
5259static int
5260ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5261{
5262 struct device *dev = &pdev->dev;
5263 struct ice_adapter *adapter;
5264 struct ice_pf *pf;
5265 struct ice_hw *hw;
5266 int err;
5267
5268 if (pdev->is_virtfn) {
5269 dev_err(dev, "can't probe a virtual function\n");
5270 return -EINVAL;
5271 }
5272
5273 /* when under a kdump kernel initiate a reset before enabling the
5274 * device in order to clear out any pending DMA transactions. These
5275 * transactions can cause some systems to machine check when doing
5276 * the pcim_enable_device() below.
5277 */
5278 if (is_kdump_kernel()) {
5279 pci_save_state(pdev);
5280 pci_clear_master(pdev);
5281 err = pcie_flr(pdev);
5282 if (err)
5283 return err;
5284 pci_restore_state(pdev);
5285 }
5286
5287 /* this driver uses devres, see
5288 * Documentation/driver-api/driver-model/devres.rst
5289 */
5290 err = pcim_enable_device(pdev);
5291 if (err)
5292 return err;
5293
5294 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5295 if (err) {
5296 dev_err(dev, "BAR0 I/O map error %d\n", err);
5297 return err;
5298 }
5299
5300 pf = ice_allocate_pf(dev);
5301 if (!pf)
5302 return -ENOMEM;
5303
5304 /* initialize Auxiliary index to invalid value */
5305 pf->aux_idx = -1;
5306
5307 /* set up for high or low DMA */
5308 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5309 if (err) {
5310 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5311 return err;
5312 }
5313
5314 pci_set_master(pdev);
5315
5316 adapter = ice_adapter_get(pdev);
5317 if (IS_ERR(adapter))
5318 return PTR_ERR(adapter);
5319
5320 pf->pdev = pdev;
5321 pf->adapter = adapter;
5322 pci_set_drvdata(pdev, pf);
5323 set_bit(ICE_DOWN, pf->state);
5324 /* Disable service task until DOWN bit is cleared */
5325 set_bit(ICE_SERVICE_DIS, pf->state);
5326
5327 hw = &pf->hw;
5328 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5329 pci_save_state(pdev);
5330
5331 hw->back = pf;
5332 hw->port_info = NULL;
5333 hw->vendor_id = pdev->vendor;
5334 hw->device_id = pdev->device;
5335 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5336 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5337 hw->subsystem_device_id = pdev->subsystem_device;
5338 hw->bus.device = PCI_SLOT(pdev->devfn);
5339 hw->bus.func = PCI_FUNC(pdev->devfn);
5340 ice_set_ctrlq_len(hw);
5341
5342 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5343
5344#ifndef CONFIG_DYNAMIC_DEBUG
5345 if (debug < -1)
5346 hw->debug_mask = debug;
5347#endif
5348
5349 err = ice_init(pf);
5350 if (err)
5351 goto err_init;
5352
5353 devl_lock(priv_to_devlink(pf));
5354 err = ice_load(pf);
5355 if (err)
5356 goto err_load;
5357
5358 err = ice_init_devlink(pf);
5359 if (err)
5360 goto err_init_devlink;
5361 devl_unlock(priv_to_devlink(pf));
5362
5363 return 0;
5364
5365err_init_devlink:
5366 ice_unload(pf);
5367err_load:
5368 devl_unlock(priv_to_devlink(pf));
5369 ice_deinit(pf);
5370err_init:
5371 ice_adapter_put(pdev);
5372 return err;
5373}
5374
5375/**
5376 * ice_set_wake - enable or disable Wake on LAN
5377 * @pf: pointer to the PF struct
5378 *
5379 * Simple helper for WoL control
5380 */
5381static void ice_set_wake(struct ice_pf *pf)
5382{
5383 struct ice_hw *hw = &pf->hw;
5384 bool wol = pf->wol_ena;
5385
5386 /* clear wake state, otherwise new wake events won't fire */
5387 wr32(hw, PFPM_WUS, U32_MAX);
5388
5389 /* enable / disable APM wake up, no RMW needed */
5390 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5391
5392 /* set magic packet filter enabled */
5393 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5394}
5395
5396/**
5397 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5398 * @pf: pointer to the PF struct
5399 *
5400 * Issue firmware command to enable multicast magic wake, making
5401 * sure that any locally administered address (LAA) is used for
5402 * wake, and that PF reset doesn't undo the LAA.
5403 */
5404static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5405{
5406 struct device *dev = ice_pf_to_dev(pf);
5407 struct ice_hw *hw = &pf->hw;
5408 u8 mac_addr[ETH_ALEN];
5409 struct ice_vsi *vsi;
5410 int status;
5411 u8 flags;
5412
5413 if (!pf->wol_ena)
5414 return;
5415
5416 vsi = ice_get_main_vsi(pf);
5417 if (!vsi)
5418 return;
5419
5420 /* Get current MAC address in case it's an LAA */
5421 if (vsi->netdev)
5422 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5423 else
5424 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5425
5426 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5427 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5428 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5429
5430 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5431 if (status)
5432 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5433 status, ice_aq_str(hw->adminq.sq_last_status));
5434}
5435
5436/**
5437 * ice_remove - Device removal routine
5438 * @pdev: PCI device information struct
5439 */
5440static void ice_remove(struct pci_dev *pdev)
5441{
5442 struct ice_pf *pf = pci_get_drvdata(pdev);
5443 int i;
5444
5445 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5446 if (!ice_is_reset_in_progress(pf->state))
5447 break;
5448 msleep(100);
5449 }
5450
5451 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5452 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5453 ice_free_vfs(pf);
5454 }
5455
5456 ice_hwmon_exit(pf);
5457
5458 ice_service_task_stop(pf);
5459 ice_aq_cancel_waiting_tasks(pf);
5460 set_bit(ICE_DOWN, pf->state);
5461
5462 if (!ice_is_safe_mode(pf))
5463 ice_remove_arfs(pf);
5464
5465 devl_lock(priv_to_devlink(pf));
5466 ice_dealloc_all_dynamic_ports(pf);
5467 ice_deinit_devlink(pf);
5468
5469 ice_unload(pf);
5470 devl_unlock(priv_to_devlink(pf));
5471
5472 ice_deinit(pf);
5473 ice_vsi_release_all(pf);
5474
5475 ice_setup_mc_magic_wake(pf);
5476 ice_set_wake(pf);
5477
5478 ice_adapter_put(pdev);
5479}
5480
5481/**
5482 * ice_shutdown - PCI callback for shutting down device
5483 * @pdev: PCI device information struct
5484 */
5485static void ice_shutdown(struct pci_dev *pdev)
5486{
5487 struct ice_pf *pf = pci_get_drvdata(pdev);
5488
5489 ice_remove(pdev);
5490
5491 if (system_state == SYSTEM_POWER_OFF) {
5492 pci_wake_from_d3(pdev, pf->wol_ena);
5493 pci_set_power_state(pdev, PCI_D3hot);
5494 }
5495}
5496
5497/**
5498 * ice_prepare_for_shutdown - prep for PCI shutdown
5499 * @pf: board private structure
5500 *
5501 * Inform or close all dependent features in prep for PCI device shutdown
5502 */
5503static void ice_prepare_for_shutdown(struct ice_pf *pf)
5504{
5505 struct ice_hw *hw = &pf->hw;
5506 u32 v;
5507
5508 /* Notify VFs of impending reset */
5509 if (ice_check_sq_alive(hw, &hw->mailboxq))
5510 ice_vc_notify_reset(pf);
5511
5512 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5513
5514 /* disable the VSIs and their queues that are not already DOWN */
5515 ice_pf_dis_all_vsi(pf, false);
5516
5517 ice_for_each_vsi(pf, v)
5518 if (pf->vsi[v])
5519 pf->vsi[v]->vsi_num = 0;
5520
5521 ice_shutdown_all_ctrlq(hw, true);
5522}
5523
5524/**
5525 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5526 * @pf: board private structure to reinitialize
5527 *
5528 * This routine reinitialize interrupt scheme that was cleared during
5529 * power management suspend callback.
5530 *
5531 * This should be called during resume routine to re-allocate the q_vectors
5532 * and reacquire interrupts.
5533 */
5534static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5535{
5536 struct device *dev = ice_pf_to_dev(pf);
5537 int ret, v;
5538
5539 /* Since we clear MSIX flag during suspend, we need to
5540 * set it back during resume...
5541 */
5542
5543 ret = ice_init_interrupt_scheme(pf);
5544 if (ret) {
5545 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5546 return ret;
5547 }
5548
5549 /* Remap vectors and rings, after successful re-init interrupts */
5550 ice_for_each_vsi(pf, v) {
5551 if (!pf->vsi[v])
5552 continue;
5553
5554 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5555 if (ret)
5556 goto err_reinit;
5557 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5558 rtnl_lock();
5559 ice_vsi_set_napi_queues(pf->vsi[v]);
5560 rtnl_unlock();
5561 }
5562
5563 ret = ice_req_irq_msix_misc(pf);
5564 if (ret) {
5565 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5566 ret);
5567 goto err_reinit;
5568 }
5569
5570 return 0;
5571
5572err_reinit:
5573 while (v--)
5574 if (pf->vsi[v]) {
5575 rtnl_lock();
5576 ice_vsi_clear_napi_queues(pf->vsi[v]);
5577 rtnl_unlock();
5578 ice_vsi_free_q_vectors(pf->vsi[v]);
5579 }
5580
5581 return ret;
5582}
5583
5584/**
5585 * ice_suspend
5586 * @dev: generic device information structure
5587 *
5588 * Power Management callback to quiesce the device and prepare
5589 * for D3 transition.
5590 */
5591static int ice_suspend(struct device *dev)
5592{
5593 struct pci_dev *pdev = to_pci_dev(dev);
5594 struct ice_pf *pf;
5595 int disabled, v;
5596
5597 pf = pci_get_drvdata(pdev);
5598
5599 if (!ice_pf_state_is_nominal(pf)) {
5600 dev_err(dev, "Device is not ready, no need to suspend it\n");
5601 return -EBUSY;
5602 }
5603
5604 /* Stop watchdog tasks until resume completion.
5605 * Even though it is most likely that the service task is
5606 * disabled if the device is suspended or down, the service task's
5607 * state is controlled by a different state bit, and we should
5608 * store and honor whatever state that bit is in at this point.
5609 */
5610 disabled = ice_service_task_stop(pf);
5611
5612 ice_deinit_rdma(pf);
5613
5614 /* Already suspended?, then there is nothing to do */
5615 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5616 if (!disabled)
5617 ice_service_task_restart(pf);
5618 return 0;
5619 }
5620
5621 if (test_bit(ICE_DOWN, pf->state) ||
5622 ice_is_reset_in_progress(pf->state)) {
5623 dev_err(dev, "can't suspend device in reset or already down\n");
5624 if (!disabled)
5625 ice_service_task_restart(pf);
5626 return 0;
5627 }
5628
5629 ice_setup_mc_magic_wake(pf);
5630
5631 ice_prepare_for_shutdown(pf);
5632
5633 ice_set_wake(pf);
5634
5635 /* Free vectors, clear the interrupt scheme and release IRQs
5636 * for proper hibernation, especially with large number of CPUs.
5637 * Otherwise hibernation might fail when mapping all the vectors back
5638 * to CPU0.
5639 */
5640 ice_free_irq_msix_misc(pf);
5641 ice_for_each_vsi(pf, v) {
5642 if (!pf->vsi[v])
5643 continue;
5644 rtnl_lock();
5645 ice_vsi_clear_napi_queues(pf->vsi[v]);
5646 rtnl_unlock();
5647 ice_vsi_free_q_vectors(pf->vsi[v]);
5648 }
5649 ice_clear_interrupt_scheme(pf);
5650
5651 pci_save_state(pdev);
5652 pci_wake_from_d3(pdev, pf->wol_ena);
5653 pci_set_power_state(pdev, PCI_D3hot);
5654 return 0;
5655}
5656
5657/**
5658 * ice_resume - PM callback for waking up from D3
5659 * @dev: generic device information structure
5660 */
5661static int ice_resume(struct device *dev)
5662{
5663 struct pci_dev *pdev = to_pci_dev(dev);
5664 enum ice_reset_req reset_type;
5665 struct ice_pf *pf;
5666 struct ice_hw *hw;
5667 int ret;
5668
5669 pci_set_power_state(pdev, PCI_D0);
5670 pci_restore_state(pdev);
5671 pci_save_state(pdev);
5672
5673 if (!pci_device_is_present(pdev))
5674 return -ENODEV;
5675
5676 ret = pci_enable_device_mem(pdev);
5677 if (ret) {
5678 dev_err(dev, "Cannot enable device after suspend\n");
5679 return ret;
5680 }
5681
5682 pf = pci_get_drvdata(pdev);
5683 hw = &pf->hw;
5684
5685 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5686 ice_print_wake_reason(pf);
5687
5688 /* We cleared the interrupt scheme when we suspended, so we need to
5689 * restore it now to resume device functionality.
5690 */
5691 ret = ice_reinit_interrupt_scheme(pf);
5692 if (ret)
5693 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5694
5695 ret = ice_init_rdma(pf);
5696 if (ret)
5697 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5698 ret);
5699
5700 clear_bit(ICE_DOWN, pf->state);
5701 /* Now perform PF reset and rebuild */
5702 reset_type = ICE_RESET_PFR;
5703 /* re-enable service task for reset, but allow reset to schedule it */
5704 clear_bit(ICE_SERVICE_DIS, pf->state);
5705
5706 if (ice_schedule_reset(pf, reset_type))
5707 dev_err(dev, "Reset during resume failed.\n");
5708
5709 clear_bit(ICE_SUSPENDED, pf->state);
5710 ice_service_task_restart(pf);
5711
5712 /* Restart the service task */
5713 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5714
5715 return 0;
5716}
5717
5718/**
5719 * ice_pci_err_detected - warning that PCI error has been detected
5720 * @pdev: PCI device information struct
5721 * @err: the type of PCI error
5722 *
5723 * Called to warn that something happened on the PCI bus and the error handling
5724 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5725 */
5726static pci_ers_result_t
5727ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5728{
5729 struct ice_pf *pf = pci_get_drvdata(pdev);
5730
5731 if (!pf) {
5732 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5733 __func__, err);
5734 return PCI_ERS_RESULT_DISCONNECT;
5735 }
5736
5737 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5738 ice_service_task_stop(pf);
5739
5740 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5741 set_bit(ICE_PFR_REQ, pf->state);
5742 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5743 }
5744 }
5745
5746 return PCI_ERS_RESULT_NEED_RESET;
5747}
5748
5749/**
5750 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5751 * @pdev: PCI device information struct
5752 *
5753 * Called to determine if the driver can recover from the PCI slot reset by
5754 * using a register read to determine if the device is recoverable.
5755 */
5756static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5757{
5758 struct ice_pf *pf = pci_get_drvdata(pdev);
5759 pci_ers_result_t result;
5760 int err;
5761 u32 reg;
5762
5763 err = pci_enable_device_mem(pdev);
5764 if (err) {
5765 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5766 err);
5767 result = PCI_ERS_RESULT_DISCONNECT;
5768 } else {
5769 pci_set_master(pdev);
5770 pci_restore_state(pdev);
5771 pci_save_state(pdev);
5772 pci_wake_from_d3(pdev, false);
5773
5774 /* Check for life */
5775 reg = rd32(&pf->hw, GLGEN_RTRIG);
5776 if (!reg)
5777 result = PCI_ERS_RESULT_RECOVERED;
5778 else
5779 result = PCI_ERS_RESULT_DISCONNECT;
5780 }
5781
5782 return result;
5783}
5784
5785/**
5786 * ice_pci_err_resume - restart operations after PCI error recovery
5787 * @pdev: PCI device information struct
5788 *
5789 * Called to allow the driver to bring things back up after PCI error and/or
5790 * reset recovery have finished
5791 */
5792static void ice_pci_err_resume(struct pci_dev *pdev)
5793{
5794 struct ice_pf *pf = pci_get_drvdata(pdev);
5795
5796 if (!pf) {
5797 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5798 __func__);
5799 return;
5800 }
5801
5802 if (test_bit(ICE_SUSPENDED, pf->state)) {
5803 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5804 __func__);
5805 return;
5806 }
5807
5808 ice_restore_all_vfs_msi_state(pf);
5809
5810 ice_do_reset(pf, ICE_RESET_PFR);
5811 ice_service_task_restart(pf);
5812 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5813}
5814
5815/**
5816 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5817 * @pdev: PCI device information struct
5818 */
5819static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5820{
5821 struct ice_pf *pf = pci_get_drvdata(pdev);
5822
5823 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5824 ice_service_task_stop(pf);
5825
5826 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5827 set_bit(ICE_PFR_REQ, pf->state);
5828 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5829 }
5830 }
5831}
5832
5833/**
5834 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5835 * @pdev: PCI device information struct
5836 */
5837static void ice_pci_err_reset_done(struct pci_dev *pdev)
5838{
5839 ice_pci_err_resume(pdev);
5840}
5841
5842/* ice_pci_tbl - PCI Device ID Table
5843 *
5844 * Wildcard entries (PCI_ANY_ID) should come last
5845 * Last entry must be all 0s
5846 *
5847 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5848 * Class, Class Mask, private data (not used) }
5849 */
5850static const struct pci_device_id ice_pci_tbl[] = {
5851 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5852 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5853 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5854 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5855 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5856 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5857 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5858 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5859 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5860 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5861 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5862 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5863 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5864 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5865 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5866 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5867 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5868 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5869 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5870 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5871 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5872 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5873 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5874 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5875 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5876 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5877 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5878 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5879 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5880 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5881 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5882 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5883 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5884 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5885 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5886 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5887 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5888 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5889 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5890 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5891 /* required last entry */
5892 {}
5893};
5894MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5895
5896static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5897
5898static const struct pci_error_handlers ice_pci_err_handler = {
5899 .error_detected = ice_pci_err_detected,
5900 .slot_reset = ice_pci_err_slot_reset,
5901 .reset_prepare = ice_pci_err_reset_prepare,
5902 .reset_done = ice_pci_err_reset_done,
5903 .resume = ice_pci_err_resume
5904};
5905
5906static struct pci_driver ice_driver = {
5907 .name = KBUILD_MODNAME,
5908 .id_table = ice_pci_tbl,
5909 .probe = ice_probe,
5910 .remove = ice_remove,
5911 .driver.pm = pm_sleep_ptr(&ice_pm_ops),
5912 .shutdown = ice_shutdown,
5913 .sriov_configure = ice_sriov_configure,
5914 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5915 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5916 .err_handler = &ice_pci_err_handler
5917};
5918
5919/**
5920 * ice_module_init - Driver registration routine
5921 *
5922 * ice_module_init is the first routine called when the driver is
5923 * loaded. All it does is register with the PCI subsystem.
5924 */
5925static int __init ice_module_init(void)
5926{
5927 int status = -ENOMEM;
5928
5929 pr_info("%s\n", ice_driver_string);
5930 pr_info("%s\n", ice_copyright);
5931
5932 ice_adv_lnk_speed_maps_init();
5933
5934 ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
5935 if (!ice_wq) {
5936 pr_err("Failed to create workqueue\n");
5937 return status;
5938 }
5939
5940 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5941 if (!ice_lag_wq) {
5942 pr_err("Failed to create LAG workqueue\n");
5943 goto err_dest_wq;
5944 }
5945
5946 ice_debugfs_init();
5947
5948 status = pci_register_driver(&ice_driver);
5949 if (status) {
5950 pr_err("failed to register PCI driver, err %d\n", status);
5951 goto err_dest_lag_wq;
5952 }
5953
5954 status = ice_sf_driver_register();
5955 if (status) {
5956 pr_err("Failed to register SF driver, err %d\n", status);
5957 goto err_sf_driver;
5958 }
5959
5960 return 0;
5961
5962err_sf_driver:
5963 pci_unregister_driver(&ice_driver);
5964err_dest_lag_wq:
5965 destroy_workqueue(ice_lag_wq);
5966 ice_debugfs_exit();
5967err_dest_wq:
5968 destroy_workqueue(ice_wq);
5969 return status;
5970}
5971module_init(ice_module_init);
5972
5973/**
5974 * ice_module_exit - Driver exit cleanup routine
5975 *
5976 * ice_module_exit is called just before the driver is removed
5977 * from memory.
5978 */
5979static void __exit ice_module_exit(void)
5980{
5981 ice_sf_driver_unregister();
5982 pci_unregister_driver(&ice_driver);
5983 ice_debugfs_exit();
5984 destroy_workqueue(ice_wq);
5985 destroy_workqueue(ice_lag_wq);
5986 pr_info("module unloaded\n");
5987}
5988module_exit(ice_module_exit);
5989
5990/**
5991 * ice_set_mac_address - NDO callback to set MAC address
5992 * @netdev: network interface device structure
5993 * @pi: pointer to an address structure
5994 *
5995 * Returns 0 on success, negative on failure
5996 */
5997static int ice_set_mac_address(struct net_device *netdev, void *pi)
5998{
5999 struct ice_netdev_priv *np = netdev_priv(netdev);
6000 struct ice_vsi *vsi = np->vsi;
6001 struct ice_pf *pf = vsi->back;
6002 struct ice_hw *hw = &pf->hw;
6003 struct sockaddr *addr = pi;
6004 u8 old_mac[ETH_ALEN];
6005 u8 flags = 0;
6006 u8 *mac;
6007 int err;
6008
6009 mac = (u8 *)addr->sa_data;
6010
6011 if (!is_valid_ether_addr(mac))
6012 return -EADDRNOTAVAIL;
6013
6014 if (test_bit(ICE_DOWN, pf->state) ||
6015 ice_is_reset_in_progress(pf->state)) {
6016 netdev_err(netdev, "can't set mac %pM. device not ready\n",
6017 mac);
6018 return -EBUSY;
6019 }
6020
6021 if (ice_chnl_dmac_fltr_cnt(pf)) {
6022 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
6023 mac);
6024 return -EAGAIN;
6025 }
6026
6027 netif_addr_lock_bh(netdev);
6028 ether_addr_copy(old_mac, netdev->dev_addr);
6029 /* change the netdev's MAC address */
6030 eth_hw_addr_set(netdev, mac);
6031 netif_addr_unlock_bh(netdev);
6032
6033 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
6034 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
6035 if (err && err != -ENOENT) {
6036 err = -EADDRNOTAVAIL;
6037 goto err_update_filters;
6038 }
6039
6040 /* Add filter for new MAC. If filter exists, return success */
6041 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
6042 if (err == -EEXIST) {
6043 /* Although this MAC filter is already present in hardware it's
6044 * possible in some cases (e.g. bonding) that dev_addr was
6045 * modified outside of the driver and needs to be restored back
6046 * to this value.
6047 */
6048 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
6049
6050 return 0;
6051 } else if (err) {
6052 /* error if the new filter addition failed */
6053 err = -EADDRNOTAVAIL;
6054 }
6055
6056err_update_filters:
6057 if (err) {
6058 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6059 mac);
6060 netif_addr_lock_bh(netdev);
6061 eth_hw_addr_set(netdev, old_mac);
6062 netif_addr_unlock_bh(netdev);
6063 return err;
6064 }
6065
6066 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6067 netdev->dev_addr);
6068
6069 /* write new MAC address to the firmware */
6070 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6071 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6072 if (err) {
6073 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6074 mac, err);
6075 }
6076 return 0;
6077}
6078
6079/**
6080 * ice_set_rx_mode - NDO callback to set the netdev filters
6081 * @netdev: network interface device structure
6082 */
6083static void ice_set_rx_mode(struct net_device *netdev)
6084{
6085 struct ice_netdev_priv *np = netdev_priv(netdev);
6086 struct ice_vsi *vsi = np->vsi;
6087
6088 if (!vsi || ice_is_switchdev_running(vsi->back))
6089 return;
6090
6091 /* Set the flags to synchronize filters
6092 * ndo_set_rx_mode may be triggered even without a change in netdev
6093 * flags
6094 */
6095 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6096 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6097 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6098
6099 /* schedule our worker thread which will take care of
6100 * applying the new filter changes
6101 */
6102 ice_service_task_schedule(vsi->back);
6103}
6104
6105/**
6106 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6107 * @netdev: network interface device structure
6108 * @queue_index: Queue ID
6109 * @maxrate: maximum bandwidth in Mbps
6110 */
6111static int
6112ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6113{
6114 struct ice_netdev_priv *np = netdev_priv(netdev);
6115 struct ice_vsi *vsi = np->vsi;
6116 u16 q_handle;
6117 int status;
6118 u8 tc;
6119
6120 /* Validate maxrate requested is within permitted range */
6121 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6122 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6123 maxrate, queue_index);
6124 return -EINVAL;
6125 }
6126
6127 q_handle = vsi->tx_rings[queue_index]->q_handle;
6128 tc = ice_dcb_get_tc(vsi, queue_index);
6129
6130 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6131 if (!vsi) {
6132 netdev_err(netdev, "Invalid VSI for given queue %d\n",
6133 queue_index);
6134 return -EINVAL;
6135 }
6136
6137 /* Set BW back to default, when user set maxrate to 0 */
6138 if (!maxrate)
6139 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6140 q_handle, ICE_MAX_BW);
6141 else
6142 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6143 q_handle, ICE_MAX_BW, maxrate * 1000);
6144 if (status)
6145 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6146 status);
6147
6148 return status;
6149}
6150
6151/**
6152 * ice_fdb_add - add an entry to the hardware database
6153 * @ndm: the input from the stack
6154 * @tb: pointer to array of nladdr (unused)
6155 * @dev: the net device pointer
6156 * @addr: the MAC address entry being added
6157 * @vid: VLAN ID
6158 * @flags: instructions from stack about fdb operation
6159 * @notified: whether notification was emitted
6160 * @extack: netlink extended ack
6161 */
6162static int
6163ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6164 struct net_device *dev, const unsigned char *addr, u16 vid,
6165 u16 flags, bool *notified,
6166 struct netlink_ext_ack __always_unused *extack)
6167{
6168 int err;
6169
6170 if (vid) {
6171 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6172 return -EINVAL;
6173 }
6174 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6175 netdev_err(dev, "FDB only supports static addresses\n");
6176 return -EINVAL;
6177 }
6178
6179 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6180 err = dev_uc_add_excl(dev, addr);
6181 else if (is_multicast_ether_addr(addr))
6182 err = dev_mc_add_excl(dev, addr);
6183 else
6184 err = -EINVAL;
6185
6186 /* Only return duplicate errors if NLM_F_EXCL is set */
6187 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6188 err = 0;
6189
6190 return err;
6191}
6192
6193/**
6194 * ice_fdb_del - delete an entry from the hardware database
6195 * @ndm: the input from the stack
6196 * @tb: pointer to array of nladdr (unused)
6197 * @dev: the net device pointer
6198 * @addr: the MAC address entry being added
6199 * @vid: VLAN ID
6200 * @notified: whether notification was emitted
6201 * @extack: netlink extended ack
6202 */
6203static int
6204ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6205 struct net_device *dev, const unsigned char *addr,
6206 __always_unused u16 vid, bool *notified,
6207 struct netlink_ext_ack *extack)
6208{
6209 int err;
6210
6211 if (ndm->ndm_state & NUD_PERMANENT) {
6212 netdev_err(dev, "FDB only supports static addresses\n");
6213 return -EINVAL;
6214 }
6215
6216 if (is_unicast_ether_addr(addr))
6217 err = dev_uc_del(dev, addr);
6218 else if (is_multicast_ether_addr(addr))
6219 err = dev_mc_del(dev, addr);
6220 else
6221 err = -EINVAL;
6222
6223 return err;
6224}
6225
6226#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6227 NETIF_F_HW_VLAN_CTAG_TX | \
6228 NETIF_F_HW_VLAN_STAG_RX | \
6229 NETIF_F_HW_VLAN_STAG_TX)
6230
6231#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6232 NETIF_F_HW_VLAN_STAG_RX)
6233
6234#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
6235 NETIF_F_HW_VLAN_STAG_FILTER)
6236
6237/**
6238 * ice_fix_features - fix the netdev features flags based on device limitations
6239 * @netdev: ptr to the netdev that flags are being fixed on
6240 * @features: features that need to be checked and possibly fixed
6241 *
6242 * Make sure any fixups are made to features in this callback. This enables the
6243 * driver to not have to check unsupported configurations throughout the driver
6244 * because that's the responsiblity of this callback.
6245 *
6246 * Single VLAN Mode (SVM) Supported Features:
6247 * NETIF_F_HW_VLAN_CTAG_FILTER
6248 * NETIF_F_HW_VLAN_CTAG_RX
6249 * NETIF_F_HW_VLAN_CTAG_TX
6250 *
6251 * Double VLAN Mode (DVM) Supported Features:
6252 * NETIF_F_HW_VLAN_CTAG_FILTER
6253 * NETIF_F_HW_VLAN_CTAG_RX
6254 * NETIF_F_HW_VLAN_CTAG_TX
6255 *
6256 * NETIF_F_HW_VLAN_STAG_FILTER
6257 * NETIF_HW_VLAN_STAG_RX
6258 * NETIF_HW_VLAN_STAG_TX
6259 *
6260 * Features that need fixing:
6261 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6262 * These are mutually exlusive as the VSI context cannot support multiple
6263 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
6264 * is not done, then default to clearing the requested STAG offload
6265 * settings.
6266 *
6267 * All supported filtering has to be enabled or disabled together. For
6268 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6269 * together. If this is not done, then default to VLAN filtering disabled.
6270 * These are mutually exclusive as there is currently no way to
6271 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6272 * prune rules.
6273 */
6274static netdev_features_t
6275ice_fix_features(struct net_device *netdev, netdev_features_t features)
6276{
6277 struct ice_netdev_priv *np = netdev_priv(netdev);
6278 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6279 bool cur_ctag, cur_stag, req_ctag, req_stag;
6280
6281 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6282 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6283 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6284
6285 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6286 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6287 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6288
6289 if (req_vlan_fltr != cur_vlan_fltr) {
6290 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6291 if (req_ctag && req_stag) {
6292 features |= NETIF_VLAN_FILTERING_FEATURES;
6293 } else if (!req_ctag && !req_stag) {
6294 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6295 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
6296 (!cur_stag && req_stag && !cur_ctag)) {
6297 features |= NETIF_VLAN_FILTERING_FEATURES;
6298 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6299 } else if ((cur_ctag && !req_ctag && cur_stag) ||
6300 (cur_stag && !req_stag && cur_ctag)) {
6301 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6302 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6303 }
6304 } else {
6305 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6306 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6307
6308 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6309 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6310 }
6311 }
6312
6313 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6314 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6315 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6316 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6317 NETIF_F_HW_VLAN_STAG_TX);
6318 }
6319
6320 if (!(netdev->features & NETIF_F_RXFCS) &&
6321 (features & NETIF_F_RXFCS) &&
6322 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6323 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6324 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6325 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6326 }
6327
6328 return features;
6329}
6330
6331/**
6332 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6333 * @vsi: PF's VSI
6334 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6335 *
6336 * Store current stripped VLAN proto in ring packet context,
6337 * so it can be accessed more efficiently by packet processing code.
6338 */
6339static void
6340ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6341{
6342 u16 i;
6343
6344 ice_for_each_alloc_rxq(vsi, i)
6345 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6346}
6347
6348/**
6349 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6350 * @vsi: PF's VSI
6351 * @features: features used to determine VLAN offload settings
6352 *
6353 * First, determine the vlan_ethertype based on the VLAN offload bits in
6354 * features. Then determine if stripping and insertion should be enabled or
6355 * disabled. Finally enable or disable VLAN stripping and insertion.
6356 */
6357static int
6358ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6359{
6360 bool enable_stripping = true, enable_insertion = true;
6361 struct ice_vsi_vlan_ops *vlan_ops;
6362 int strip_err = 0, insert_err = 0;
6363 u16 vlan_ethertype = 0;
6364
6365 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6366
6367 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6368 vlan_ethertype = ETH_P_8021AD;
6369 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6370 vlan_ethertype = ETH_P_8021Q;
6371
6372 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6373 enable_stripping = false;
6374 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6375 enable_insertion = false;
6376
6377 if (enable_stripping)
6378 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6379 else
6380 strip_err = vlan_ops->dis_stripping(vsi);
6381
6382 if (enable_insertion)
6383 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6384 else
6385 insert_err = vlan_ops->dis_insertion(vsi);
6386
6387 if (strip_err || insert_err)
6388 return -EIO;
6389
6390 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6391 htons(vlan_ethertype) : 0);
6392
6393 return 0;
6394}
6395
6396/**
6397 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6398 * @vsi: PF's VSI
6399 * @features: features used to determine VLAN filtering settings
6400 *
6401 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6402 * features.
6403 */
6404static int
6405ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6406{
6407 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6408 int err = 0;
6409
6410 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6411 * if either bit is set. In switchdev mode Rx filtering should never be
6412 * enabled.
6413 */
6414 if ((features &
6415 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
6416 !ice_is_eswitch_mode_switchdev(vsi->back))
6417 err = vlan_ops->ena_rx_filtering(vsi);
6418 else
6419 err = vlan_ops->dis_rx_filtering(vsi);
6420
6421 return err;
6422}
6423
6424/**
6425 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6426 * @netdev: ptr to the netdev being adjusted
6427 * @features: the feature set that the stack is suggesting
6428 *
6429 * Only update VLAN settings if the requested_vlan_features are different than
6430 * the current_vlan_features.
6431 */
6432static int
6433ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6434{
6435 netdev_features_t current_vlan_features, requested_vlan_features;
6436 struct ice_netdev_priv *np = netdev_priv(netdev);
6437 struct ice_vsi *vsi = np->vsi;
6438 int err;
6439
6440 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6441 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6442 if (current_vlan_features ^ requested_vlan_features) {
6443 if ((features & NETIF_F_RXFCS) &&
6444 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6445 dev_err(ice_pf_to_dev(vsi->back),
6446 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6447 return -EIO;
6448 }
6449
6450 err = ice_set_vlan_offload_features(vsi, features);
6451 if (err)
6452 return err;
6453 }
6454
6455 current_vlan_features = netdev->features &
6456 NETIF_VLAN_FILTERING_FEATURES;
6457 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6458 if (current_vlan_features ^ requested_vlan_features) {
6459 err = ice_set_vlan_filtering_features(vsi, features);
6460 if (err)
6461 return err;
6462 }
6463
6464 return 0;
6465}
6466
6467/**
6468 * ice_set_loopback - turn on/off loopback mode on underlying PF
6469 * @vsi: ptr to VSI
6470 * @ena: flag to indicate the on/off setting
6471 */
6472static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6473{
6474 bool if_running = netif_running(vsi->netdev);
6475 int ret;
6476
6477 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6478 ret = ice_down(vsi);
6479 if (ret) {
6480 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6481 return ret;
6482 }
6483 }
6484 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6485 if (ret)
6486 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6487 if (if_running)
6488 ret = ice_up(vsi);
6489
6490 return ret;
6491}
6492
6493/**
6494 * ice_set_features - set the netdev feature flags
6495 * @netdev: ptr to the netdev being adjusted
6496 * @features: the feature set that the stack is suggesting
6497 */
6498static int
6499ice_set_features(struct net_device *netdev, netdev_features_t features)
6500{
6501 netdev_features_t changed = netdev->features ^ features;
6502 struct ice_netdev_priv *np = netdev_priv(netdev);
6503 struct ice_vsi *vsi = np->vsi;
6504 struct ice_pf *pf = vsi->back;
6505 int ret = 0;
6506
6507 /* Don't set any netdev advanced features with device in Safe Mode */
6508 if (ice_is_safe_mode(pf)) {
6509 dev_err(ice_pf_to_dev(pf),
6510 "Device is in Safe Mode - not enabling advanced netdev features\n");
6511 return ret;
6512 }
6513
6514 /* Do not change setting during reset */
6515 if (ice_is_reset_in_progress(pf->state)) {
6516 dev_err(ice_pf_to_dev(pf),
6517 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6518 return -EBUSY;
6519 }
6520
6521 /* Multiple features can be changed in one call so keep features in
6522 * separate if/else statements to guarantee each feature is checked
6523 */
6524 if (changed & NETIF_F_RXHASH)
6525 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6526
6527 ret = ice_set_vlan_features(netdev, features);
6528 if (ret)
6529 return ret;
6530
6531 /* Turn on receive of FCS aka CRC, and after setting this
6532 * flag the packet data will have the 4 byte CRC appended
6533 */
6534 if (changed & NETIF_F_RXFCS) {
6535 if ((features & NETIF_F_RXFCS) &&
6536 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6537 dev_err(ice_pf_to_dev(vsi->back),
6538 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6539 return -EIO;
6540 }
6541
6542 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6543 ret = ice_down_up(vsi);
6544 if (ret)
6545 return ret;
6546 }
6547
6548 if (changed & NETIF_F_NTUPLE) {
6549 bool ena = !!(features & NETIF_F_NTUPLE);
6550
6551 ice_vsi_manage_fdir(vsi, ena);
6552 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6553 }
6554
6555 /* don't turn off hw_tc_offload when ADQ is already enabled */
6556 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6557 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6558 return -EACCES;
6559 }
6560
6561 if (changed & NETIF_F_HW_TC) {
6562 bool ena = !!(features & NETIF_F_HW_TC);
6563
6564 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
6565 }
6566
6567 if (changed & NETIF_F_LOOPBACK)
6568 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6569
6570 return ret;
6571}
6572
6573/**
6574 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6575 * @vsi: VSI to setup VLAN properties for
6576 */
6577static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6578{
6579 int err;
6580
6581 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6582 if (err)
6583 return err;
6584
6585 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6586 if (err)
6587 return err;
6588
6589 return ice_vsi_add_vlan_zero(vsi);
6590}
6591
6592/**
6593 * ice_vsi_cfg_lan - Setup the VSI lan related config
6594 * @vsi: the VSI being configured
6595 *
6596 * Return 0 on success and negative value on error
6597 */
6598int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6599{
6600 int err;
6601
6602 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6603 ice_set_rx_mode(vsi->netdev);
6604
6605 err = ice_vsi_vlan_setup(vsi);
6606 if (err)
6607 return err;
6608 }
6609 ice_vsi_cfg_dcb_rings(vsi);
6610
6611 err = ice_vsi_cfg_lan_txqs(vsi);
6612 if (!err && ice_is_xdp_ena_vsi(vsi))
6613 err = ice_vsi_cfg_xdp_txqs(vsi);
6614 if (!err)
6615 err = ice_vsi_cfg_rxqs(vsi);
6616
6617 return err;
6618}
6619
6620/* THEORY OF MODERATION:
6621 * The ice driver hardware works differently than the hardware that DIMLIB was
6622 * originally made for. ice hardware doesn't have packet count limits that
6623 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6624 * which is hard-coded to a limit of 250,000 ints/second.
6625 * If not using dynamic moderation, the INTRL value can be modified
6626 * by ethtool rx-usecs-high.
6627 */
6628struct ice_dim {
6629 /* the throttle rate for interrupts, basically worst case delay before
6630 * an initial interrupt fires, value is stored in microseconds.
6631 */
6632 u16 itr;
6633};
6634
6635/* Make a different profile for Rx that doesn't allow quite so aggressive
6636 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6637 * second.
6638 */
6639static const struct ice_dim rx_profile[] = {
6640 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6641 {8}, /* 125,000 ints/s */
6642 {16}, /* 62,500 ints/s */
6643 {62}, /* 16,129 ints/s */
6644 {126} /* 7,936 ints/s */
6645};
6646
6647/* The transmit profile, which has the same sorts of values
6648 * as the previous struct
6649 */
6650static const struct ice_dim tx_profile[] = {
6651 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6652 {8}, /* 125,000 ints/s */
6653 {40}, /* 16,125 ints/s */
6654 {128}, /* 7,812 ints/s */
6655 {256} /* 3,906 ints/s */
6656};
6657
6658static void ice_tx_dim_work(struct work_struct *work)
6659{
6660 struct ice_ring_container *rc;
6661 struct dim *dim;
6662 u16 itr;
6663
6664 dim = container_of(work, struct dim, work);
6665 rc = dim->priv;
6666
6667 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6668
6669 /* look up the values in our local table */
6670 itr = tx_profile[dim->profile_ix].itr;
6671
6672 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6673 ice_write_itr(rc, itr);
6674
6675 dim->state = DIM_START_MEASURE;
6676}
6677
6678static void ice_rx_dim_work(struct work_struct *work)
6679{
6680 struct ice_ring_container *rc;
6681 struct dim *dim;
6682 u16 itr;
6683
6684 dim = container_of(work, struct dim, work);
6685 rc = dim->priv;
6686
6687 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6688
6689 /* look up the values in our local table */
6690 itr = rx_profile[dim->profile_ix].itr;
6691
6692 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6693 ice_write_itr(rc, itr);
6694
6695 dim->state = DIM_START_MEASURE;
6696}
6697
6698#define ICE_DIM_DEFAULT_PROFILE_IX 1
6699
6700/**
6701 * ice_init_moderation - set up interrupt moderation
6702 * @q_vector: the vector containing rings to be configured
6703 *
6704 * Set up interrupt moderation registers, with the intent to do the right thing
6705 * when called from reset or from probe, and whether or not dynamic moderation
6706 * is enabled or not. Take special care to write all the registers in both
6707 * dynamic moderation mode or not in order to make sure hardware is in a known
6708 * state.
6709 */
6710static void ice_init_moderation(struct ice_q_vector *q_vector)
6711{
6712 struct ice_ring_container *rc;
6713 bool tx_dynamic, rx_dynamic;
6714
6715 rc = &q_vector->tx;
6716 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6717 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6718 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6719 rc->dim.priv = rc;
6720 tx_dynamic = ITR_IS_DYNAMIC(rc);
6721
6722 /* set the initial TX ITR to match the above */
6723 ice_write_itr(rc, tx_dynamic ?
6724 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6725
6726 rc = &q_vector->rx;
6727 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6728 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6729 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6730 rc->dim.priv = rc;
6731 rx_dynamic = ITR_IS_DYNAMIC(rc);
6732
6733 /* set the initial RX ITR to match the above */
6734 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6735 rc->itr_setting);
6736
6737 ice_set_q_vector_intrl(q_vector);
6738}
6739
6740/**
6741 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6742 * @vsi: the VSI being configured
6743 */
6744static void ice_napi_enable_all(struct ice_vsi *vsi)
6745{
6746 int q_idx;
6747
6748 if (!vsi->netdev)
6749 return;
6750
6751 ice_for_each_q_vector(vsi, q_idx) {
6752 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6753
6754 ice_init_moderation(q_vector);
6755
6756 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6757 napi_enable(&q_vector->napi);
6758 }
6759}
6760
6761/**
6762 * ice_up_complete - Finish the last steps of bringing up a connection
6763 * @vsi: The VSI being configured
6764 *
6765 * Return 0 on success and negative value on error
6766 */
6767static int ice_up_complete(struct ice_vsi *vsi)
6768{
6769 struct ice_pf *pf = vsi->back;
6770 int err;
6771
6772 ice_vsi_cfg_msix(vsi);
6773
6774 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6775 * Tx queue group list was configured and the context bits were
6776 * programmed using ice_vsi_cfg_txqs
6777 */
6778 err = ice_vsi_start_all_rx_rings(vsi);
6779 if (err)
6780 return err;
6781
6782 clear_bit(ICE_VSI_DOWN, vsi->state);
6783 ice_napi_enable_all(vsi);
6784 ice_vsi_ena_irq(vsi);
6785
6786 if (vsi->port_info &&
6787 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6788 ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
6789 vsi->type == ICE_VSI_SF)))) {
6790 ice_print_link_msg(vsi, true);
6791 netif_tx_start_all_queues(vsi->netdev);
6792 netif_carrier_on(vsi->netdev);
6793 ice_ptp_link_change(pf, true);
6794 }
6795
6796 /* Perform an initial read of the statistics registers now to
6797 * set the baseline so counters are ready when interface is up
6798 */
6799 ice_update_eth_stats(vsi);
6800
6801 if (vsi->type == ICE_VSI_PF)
6802 ice_service_task_schedule(pf);
6803
6804 return 0;
6805}
6806
6807/**
6808 * ice_up - Bring the connection back up after being down
6809 * @vsi: VSI being configured
6810 */
6811int ice_up(struct ice_vsi *vsi)
6812{
6813 int err;
6814
6815 err = ice_vsi_cfg_lan(vsi);
6816 if (!err)
6817 err = ice_up_complete(vsi);
6818
6819 return err;
6820}
6821
6822/**
6823 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6824 * @syncp: pointer to u64_stats_sync
6825 * @stats: stats that pkts and bytes count will be taken from
6826 * @pkts: packets stats counter
6827 * @bytes: bytes stats counter
6828 *
6829 * This function fetches stats from the ring considering the atomic operations
6830 * that needs to be performed to read u64 values in 32 bit machine.
6831 */
6832void
6833ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6834 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6835{
6836 unsigned int start;
6837
6838 do {
6839 start = u64_stats_fetch_begin(syncp);
6840 *pkts = stats.pkts;
6841 *bytes = stats.bytes;
6842 } while (u64_stats_fetch_retry(syncp, start));
6843}
6844
6845/**
6846 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6847 * @vsi: the VSI to be updated
6848 * @vsi_stats: the stats struct to be updated
6849 * @rings: rings to work on
6850 * @count: number of rings
6851 */
6852static void
6853ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6854 struct rtnl_link_stats64 *vsi_stats,
6855 struct ice_tx_ring **rings, u16 count)
6856{
6857 u16 i;
6858
6859 for (i = 0; i < count; i++) {
6860 struct ice_tx_ring *ring;
6861 u64 pkts = 0, bytes = 0;
6862
6863 ring = READ_ONCE(rings[i]);
6864 if (!ring || !ring->ring_stats)
6865 continue;
6866 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6867 ring->ring_stats->stats, &pkts,
6868 &bytes);
6869 vsi_stats->tx_packets += pkts;
6870 vsi_stats->tx_bytes += bytes;
6871 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6872 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6873 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6874 }
6875}
6876
6877/**
6878 * ice_update_vsi_ring_stats - Update VSI stats counters
6879 * @vsi: the VSI to be updated
6880 */
6881static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6882{
6883 struct rtnl_link_stats64 *net_stats, *stats_prev;
6884 struct rtnl_link_stats64 *vsi_stats;
6885 struct ice_pf *pf = vsi->back;
6886 u64 pkts, bytes;
6887 int i;
6888
6889 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6890 if (!vsi_stats)
6891 return;
6892
6893 /* reset non-netdev (extended) stats */
6894 vsi->tx_restart = 0;
6895 vsi->tx_busy = 0;
6896 vsi->tx_linearize = 0;
6897 vsi->rx_buf_failed = 0;
6898 vsi->rx_page_failed = 0;
6899
6900 rcu_read_lock();
6901
6902 /* update Tx rings counters */
6903 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6904 vsi->num_txq);
6905
6906 /* update Rx rings counters */
6907 ice_for_each_rxq(vsi, i) {
6908 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6909 struct ice_ring_stats *ring_stats;
6910
6911 ring_stats = ring->ring_stats;
6912 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6913 ring_stats->stats, &pkts,
6914 &bytes);
6915 vsi_stats->rx_packets += pkts;
6916 vsi_stats->rx_bytes += bytes;
6917 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6918 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6919 }
6920
6921 /* update XDP Tx rings counters */
6922 if (ice_is_xdp_ena_vsi(vsi))
6923 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6924 vsi->num_xdp_txq);
6925
6926 rcu_read_unlock();
6927
6928 net_stats = &vsi->net_stats;
6929 stats_prev = &vsi->net_stats_prev;
6930
6931 /* Update netdev counters, but keep in mind that values could start at
6932 * random value after PF reset. And as we increase the reported stat by
6933 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6934 * let's skip this round.
6935 */
6936 if (likely(pf->stat_prev_loaded)) {
6937 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6938 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6939 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6940 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6941 }
6942
6943 stats_prev->tx_packets = vsi_stats->tx_packets;
6944 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6945 stats_prev->rx_packets = vsi_stats->rx_packets;
6946 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6947
6948 kfree(vsi_stats);
6949}
6950
6951/**
6952 * ice_update_vsi_stats - Update VSI stats counters
6953 * @vsi: the VSI to be updated
6954 */
6955void ice_update_vsi_stats(struct ice_vsi *vsi)
6956{
6957 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6958 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6959 struct ice_pf *pf = vsi->back;
6960
6961 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6962 test_bit(ICE_CFG_BUSY, pf->state))
6963 return;
6964
6965 /* get stats as recorded by Tx/Rx rings */
6966 ice_update_vsi_ring_stats(vsi);
6967
6968 /* get VSI stats as recorded by the hardware */
6969 ice_update_eth_stats(vsi);
6970
6971 cur_ns->tx_errors = cur_es->tx_errors;
6972 cur_ns->rx_dropped = cur_es->rx_discards;
6973 cur_ns->tx_dropped = cur_es->tx_discards;
6974 cur_ns->multicast = cur_es->rx_multicast;
6975
6976 /* update some more netdev stats if this is main VSI */
6977 if (vsi->type == ICE_VSI_PF) {
6978 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6979 cur_ns->rx_errors = pf->stats.crc_errors +
6980 pf->stats.illegal_bytes +
6981 pf->stats.rx_undersize +
6982 pf->hw_csum_rx_error +
6983 pf->stats.rx_jabber +
6984 pf->stats.rx_fragments +
6985 pf->stats.rx_oversize;
6986 /* record drops from the port level */
6987 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6988 }
6989}
6990
6991/**
6992 * ice_update_pf_stats - Update PF port stats counters
6993 * @pf: PF whose stats needs to be updated
6994 */
6995void ice_update_pf_stats(struct ice_pf *pf)
6996{
6997 struct ice_hw_port_stats *prev_ps, *cur_ps;
6998 struct ice_hw *hw = &pf->hw;
6999 u16 fd_ctr_base;
7000 u8 port;
7001
7002 port = hw->port_info->lport;
7003 prev_ps = &pf->stats_prev;
7004 cur_ps = &pf->stats;
7005
7006 if (ice_is_reset_in_progress(pf->state))
7007 pf->stat_prev_loaded = false;
7008
7009 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
7010 &prev_ps->eth.rx_bytes,
7011 &cur_ps->eth.rx_bytes);
7012
7013 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
7014 &prev_ps->eth.rx_unicast,
7015 &cur_ps->eth.rx_unicast);
7016
7017 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
7018 &prev_ps->eth.rx_multicast,
7019 &cur_ps->eth.rx_multicast);
7020
7021 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
7022 &prev_ps->eth.rx_broadcast,
7023 &cur_ps->eth.rx_broadcast);
7024
7025 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
7026 &prev_ps->eth.rx_discards,
7027 &cur_ps->eth.rx_discards);
7028
7029 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
7030 &prev_ps->eth.tx_bytes,
7031 &cur_ps->eth.tx_bytes);
7032
7033 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
7034 &prev_ps->eth.tx_unicast,
7035 &cur_ps->eth.tx_unicast);
7036
7037 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
7038 &prev_ps->eth.tx_multicast,
7039 &cur_ps->eth.tx_multicast);
7040
7041 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
7042 &prev_ps->eth.tx_broadcast,
7043 &cur_ps->eth.tx_broadcast);
7044
7045 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
7046 &prev_ps->tx_dropped_link_down,
7047 &cur_ps->tx_dropped_link_down);
7048
7049 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
7050 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
7051
7052 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
7053 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
7054
7055 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
7056 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
7057
7058 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
7059 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7060
7061 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7062 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7063
7064 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7065 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7066
7067 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7068 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7069
7070 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7071 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7072
7073 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7074 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7075
7076 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7077 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7078
7079 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7080 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7081
7082 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7083 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7084
7085 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7086 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7087
7088 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7089 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7090
7091 fd_ctr_base = hw->fd_ctr_base;
7092
7093 ice_stat_update40(hw,
7094 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7095 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7096 &cur_ps->fd_sb_match);
7097 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7098 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7099
7100 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7101 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7102
7103 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7104 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7105
7106 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7107 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7108
7109 ice_update_dcb_stats(pf);
7110
7111 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7112 &prev_ps->crc_errors, &cur_ps->crc_errors);
7113
7114 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7115 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7116
7117 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7118 &prev_ps->mac_local_faults,
7119 &cur_ps->mac_local_faults);
7120
7121 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7122 &prev_ps->mac_remote_faults,
7123 &cur_ps->mac_remote_faults);
7124
7125 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7126 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7127
7128 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7129 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7130
7131 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7132 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7133
7134 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7135 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7136
7137 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7138
7139 pf->stat_prev_loaded = true;
7140}
7141
7142/**
7143 * ice_get_stats64 - get statistics for network device structure
7144 * @netdev: network interface device structure
7145 * @stats: main device statistics structure
7146 */
7147void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7148{
7149 struct ice_netdev_priv *np = netdev_priv(netdev);
7150 struct rtnl_link_stats64 *vsi_stats;
7151 struct ice_vsi *vsi = np->vsi;
7152
7153 vsi_stats = &vsi->net_stats;
7154
7155 if (!vsi->num_txq || !vsi->num_rxq)
7156 return;
7157
7158 /* netdev packet/byte stats come from ring counter. These are obtained
7159 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7160 * But, only call the update routine and read the registers if VSI is
7161 * not down.
7162 */
7163 if (!test_bit(ICE_VSI_DOWN, vsi->state))
7164 ice_update_vsi_ring_stats(vsi);
7165 stats->tx_packets = vsi_stats->tx_packets;
7166 stats->tx_bytes = vsi_stats->tx_bytes;
7167 stats->rx_packets = vsi_stats->rx_packets;
7168 stats->rx_bytes = vsi_stats->rx_bytes;
7169
7170 /* The rest of the stats can be read from the hardware but instead we
7171 * just return values that the watchdog task has already obtained from
7172 * the hardware.
7173 */
7174 stats->multicast = vsi_stats->multicast;
7175 stats->tx_errors = vsi_stats->tx_errors;
7176 stats->tx_dropped = vsi_stats->tx_dropped;
7177 stats->rx_errors = vsi_stats->rx_errors;
7178 stats->rx_dropped = vsi_stats->rx_dropped;
7179 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7180 stats->rx_length_errors = vsi_stats->rx_length_errors;
7181}
7182
7183/**
7184 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7185 * @vsi: VSI having NAPI disabled
7186 */
7187static void ice_napi_disable_all(struct ice_vsi *vsi)
7188{
7189 int q_idx;
7190
7191 if (!vsi->netdev)
7192 return;
7193
7194 ice_for_each_q_vector(vsi, q_idx) {
7195 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7196
7197 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7198 napi_disable(&q_vector->napi);
7199
7200 cancel_work_sync(&q_vector->tx.dim.work);
7201 cancel_work_sync(&q_vector->rx.dim.work);
7202 }
7203}
7204
7205/**
7206 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7207 * @vsi: the VSI being un-configured
7208 */
7209static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7210{
7211 struct ice_pf *pf = vsi->back;
7212 struct ice_hw *hw = &pf->hw;
7213 u32 val;
7214 int i;
7215
7216 /* disable interrupt causation from each Rx queue; Tx queues are
7217 * handled in ice_vsi_stop_tx_ring()
7218 */
7219 if (vsi->rx_rings) {
7220 ice_for_each_rxq(vsi, i) {
7221 if (vsi->rx_rings[i]) {
7222 u16 reg;
7223
7224 reg = vsi->rx_rings[i]->reg_idx;
7225 val = rd32(hw, QINT_RQCTL(reg));
7226 val &= ~QINT_RQCTL_CAUSE_ENA_M;
7227 wr32(hw, QINT_RQCTL(reg), val);
7228 }
7229 }
7230 }
7231
7232 /* disable each interrupt */
7233 ice_for_each_q_vector(vsi, i) {
7234 if (!vsi->q_vectors[i])
7235 continue;
7236 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7237 }
7238
7239 ice_flush(hw);
7240
7241 /* don't call synchronize_irq() for VF's from the host */
7242 if (vsi->type == ICE_VSI_VF)
7243 return;
7244
7245 ice_for_each_q_vector(vsi, i)
7246 synchronize_irq(vsi->q_vectors[i]->irq.virq);
7247}
7248
7249/**
7250 * ice_down - Shutdown the connection
7251 * @vsi: The VSI being stopped
7252 *
7253 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7254 */
7255int ice_down(struct ice_vsi *vsi)
7256{
7257 int i, tx_err, rx_err, vlan_err = 0;
7258
7259 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7260
7261 if (vsi->netdev) {
7262 vlan_err = ice_vsi_del_vlan_zero(vsi);
7263 ice_ptp_link_change(vsi->back, false);
7264 netif_carrier_off(vsi->netdev);
7265 netif_tx_disable(vsi->netdev);
7266 }
7267
7268 ice_vsi_dis_irq(vsi);
7269
7270 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7271 if (tx_err)
7272 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7273 vsi->vsi_num, tx_err);
7274 if (!tx_err && vsi->xdp_rings) {
7275 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7276 if (tx_err)
7277 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7278 vsi->vsi_num, tx_err);
7279 }
7280
7281 rx_err = ice_vsi_stop_all_rx_rings(vsi);
7282 if (rx_err)
7283 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7284 vsi->vsi_num, rx_err);
7285
7286 ice_napi_disable_all(vsi);
7287
7288 ice_for_each_txq(vsi, i)
7289 ice_clean_tx_ring(vsi->tx_rings[i]);
7290
7291 if (vsi->xdp_rings)
7292 ice_for_each_xdp_txq(vsi, i)
7293 ice_clean_tx_ring(vsi->xdp_rings[i]);
7294
7295 ice_for_each_rxq(vsi, i)
7296 ice_clean_rx_ring(vsi->rx_rings[i]);
7297
7298 if (tx_err || rx_err || vlan_err) {
7299 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7300 vsi->vsi_num, vsi->vsw->sw_id);
7301 return -EIO;
7302 }
7303
7304 return 0;
7305}
7306
7307/**
7308 * ice_down_up - shutdown the VSI connection and bring it up
7309 * @vsi: the VSI to be reconnected
7310 */
7311int ice_down_up(struct ice_vsi *vsi)
7312{
7313 int ret;
7314
7315 /* if DOWN already set, nothing to do */
7316 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7317 return 0;
7318
7319 ret = ice_down(vsi);
7320 if (ret)
7321 return ret;
7322
7323 ret = ice_up(vsi);
7324 if (ret) {
7325 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7326 return ret;
7327 }
7328
7329 return 0;
7330}
7331
7332/**
7333 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7334 * @vsi: VSI having resources allocated
7335 *
7336 * Return 0 on success, negative on failure
7337 */
7338int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7339{
7340 int i, err = 0;
7341
7342 if (!vsi->num_txq) {
7343 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7344 vsi->vsi_num);
7345 return -EINVAL;
7346 }
7347
7348 ice_for_each_txq(vsi, i) {
7349 struct ice_tx_ring *ring = vsi->tx_rings[i];
7350
7351 if (!ring)
7352 return -EINVAL;
7353
7354 if (vsi->netdev)
7355 ring->netdev = vsi->netdev;
7356 err = ice_setup_tx_ring(ring);
7357 if (err)
7358 break;
7359 }
7360
7361 return err;
7362}
7363
7364/**
7365 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7366 * @vsi: VSI having resources allocated
7367 *
7368 * Return 0 on success, negative on failure
7369 */
7370int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7371{
7372 int i, err = 0;
7373
7374 if (!vsi->num_rxq) {
7375 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7376 vsi->vsi_num);
7377 return -EINVAL;
7378 }
7379
7380 ice_for_each_rxq(vsi, i) {
7381 struct ice_rx_ring *ring = vsi->rx_rings[i];
7382
7383 if (!ring)
7384 return -EINVAL;
7385
7386 if (vsi->netdev)
7387 ring->netdev = vsi->netdev;
7388 err = ice_setup_rx_ring(ring);
7389 if (err)
7390 break;
7391 }
7392
7393 return err;
7394}
7395
7396/**
7397 * ice_vsi_open_ctrl - open control VSI for use
7398 * @vsi: the VSI to open
7399 *
7400 * Initialization of the Control VSI
7401 *
7402 * Returns 0 on success, negative value on error
7403 */
7404int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7405{
7406 char int_name[ICE_INT_NAME_STR_LEN];
7407 struct ice_pf *pf = vsi->back;
7408 struct device *dev;
7409 int err;
7410
7411 dev = ice_pf_to_dev(pf);
7412 /* allocate descriptors */
7413 err = ice_vsi_setup_tx_rings(vsi);
7414 if (err)
7415 goto err_setup_tx;
7416
7417 err = ice_vsi_setup_rx_rings(vsi);
7418 if (err)
7419 goto err_setup_rx;
7420
7421 err = ice_vsi_cfg_lan(vsi);
7422 if (err)
7423 goto err_setup_rx;
7424
7425 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7426 dev_driver_string(dev), dev_name(dev));
7427 err = ice_vsi_req_irq_msix(vsi, int_name);
7428 if (err)
7429 goto err_setup_rx;
7430
7431 ice_vsi_cfg_msix(vsi);
7432
7433 err = ice_vsi_start_all_rx_rings(vsi);
7434 if (err)
7435 goto err_up_complete;
7436
7437 clear_bit(ICE_VSI_DOWN, vsi->state);
7438 ice_vsi_ena_irq(vsi);
7439
7440 return 0;
7441
7442err_up_complete:
7443 ice_down(vsi);
7444err_setup_rx:
7445 ice_vsi_free_rx_rings(vsi);
7446err_setup_tx:
7447 ice_vsi_free_tx_rings(vsi);
7448
7449 return err;
7450}
7451
7452/**
7453 * ice_vsi_open - Called when a network interface is made active
7454 * @vsi: the VSI to open
7455 *
7456 * Initialization of the VSI
7457 *
7458 * Returns 0 on success, negative value on error
7459 */
7460int ice_vsi_open(struct ice_vsi *vsi)
7461{
7462 char int_name[ICE_INT_NAME_STR_LEN];
7463 struct ice_pf *pf = vsi->back;
7464 int err;
7465
7466 /* allocate descriptors */
7467 err = ice_vsi_setup_tx_rings(vsi);
7468 if (err)
7469 goto err_setup_tx;
7470
7471 err = ice_vsi_setup_rx_rings(vsi);
7472 if (err)
7473 goto err_setup_rx;
7474
7475 err = ice_vsi_cfg_lan(vsi);
7476 if (err)
7477 goto err_setup_rx;
7478
7479 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7480 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7481 err = ice_vsi_req_irq_msix(vsi, int_name);
7482 if (err)
7483 goto err_setup_rx;
7484
7485 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7486
7487 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
7488 /* Notify the stack of the actual queue counts. */
7489 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7490 if (err)
7491 goto err_set_qs;
7492
7493 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7494 if (err)
7495 goto err_set_qs;
7496
7497 ice_vsi_set_napi_queues(vsi);
7498 }
7499
7500 err = ice_up_complete(vsi);
7501 if (err)
7502 goto err_up_complete;
7503
7504 return 0;
7505
7506err_up_complete:
7507 ice_down(vsi);
7508err_set_qs:
7509 ice_vsi_free_irq(vsi);
7510err_setup_rx:
7511 ice_vsi_free_rx_rings(vsi);
7512err_setup_tx:
7513 ice_vsi_free_tx_rings(vsi);
7514
7515 return err;
7516}
7517
7518/**
7519 * ice_vsi_release_all - Delete all VSIs
7520 * @pf: PF from which all VSIs are being removed
7521 */
7522static void ice_vsi_release_all(struct ice_pf *pf)
7523{
7524 int err, i;
7525
7526 if (!pf->vsi)
7527 return;
7528
7529 ice_for_each_vsi(pf, i) {
7530 if (!pf->vsi[i])
7531 continue;
7532
7533 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7534 continue;
7535
7536 err = ice_vsi_release(pf->vsi[i]);
7537 if (err)
7538 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7539 i, err, pf->vsi[i]->vsi_num);
7540 }
7541}
7542
7543/**
7544 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7545 * @pf: pointer to the PF instance
7546 * @type: VSI type to rebuild
7547 *
7548 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7549 */
7550static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7551{
7552 struct device *dev = ice_pf_to_dev(pf);
7553 int i, err;
7554
7555 ice_for_each_vsi(pf, i) {
7556 struct ice_vsi *vsi = pf->vsi[i];
7557
7558 if (!vsi || vsi->type != type)
7559 continue;
7560
7561 /* rebuild the VSI */
7562 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7563 if (err) {
7564 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7565 err, vsi->idx, ice_vsi_type_str(type));
7566 return err;
7567 }
7568
7569 /* replay filters for the VSI */
7570 err = ice_replay_vsi(&pf->hw, vsi->idx);
7571 if (err) {
7572 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7573 err, vsi->idx, ice_vsi_type_str(type));
7574 return err;
7575 }
7576
7577 /* Re-map HW VSI number, using VSI handle that has been
7578 * previously validated in ice_replay_vsi() call above
7579 */
7580 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7581
7582 /* enable the VSI */
7583 err = ice_ena_vsi(vsi, false);
7584 if (err) {
7585 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7586 err, vsi->idx, ice_vsi_type_str(type));
7587 return err;
7588 }
7589
7590 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7591 ice_vsi_type_str(type));
7592 }
7593
7594 return 0;
7595}
7596
7597/**
7598 * ice_update_pf_netdev_link - Update PF netdev link status
7599 * @pf: pointer to the PF instance
7600 */
7601static void ice_update_pf_netdev_link(struct ice_pf *pf)
7602{
7603 bool link_up;
7604 int i;
7605
7606 ice_for_each_vsi(pf, i) {
7607 struct ice_vsi *vsi = pf->vsi[i];
7608
7609 if (!vsi || vsi->type != ICE_VSI_PF)
7610 return;
7611
7612 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7613 if (link_up) {
7614 netif_carrier_on(pf->vsi[i]->netdev);
7615 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7616 } else {
7617 netif_carrier_off(pf->vsi[i]->netdev);
7618 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7619 }
7620 }
7621}
7622
7623/**
7624 * ice_rebuild - rebuild after reset
7625 * @pf: PF to rebuild
7626 * @reset_type: type of reset
7627 *
7628 * Do not rebuild VF VSI in this flow because that is already handled via
7629 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7630 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7631 * to reset/rebuild all the VF VSI twice.
7632 */
7633static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7634{
7635 struct ice_vsi *vsi = ice_get_main_vsi(pf);
7636 struct device *dev = ice_pf_to_dev(pf);
7637 struct ice_hw *hw = &pf->hw;
7638 bool dvm;
7639 int err;
7640
7641 if (test_bit(ICE_DOWN, pf->state))
7642 goto clear_recovery;
7643
7644 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7645
7646#define ICE_EMP_RESET_SLEEP_MS 5000
7647 if (reset_type == ICE_RESET_EMPR) {
7648 /* If an EMP reset has occurred, any previously pending flash
7649 * update will have completed. We no longer know whether or
7650 * not the NVM update EMP reset is restricted.
7651 */
7652 pf->fw_emp_reset_disabled = false;
7653
7654 msleep(ICE_EMP_RESET_SLEEP_MS);
7655 }
7656
7657 err = ice_init_all_ctrlq(hw);
7658 if (err) {
7659 dev_err(dev, "control queues init failed %d\n", err);
7660 goto err_init_ctrlq;
7661 }
7662
7663 /* if DDP was previously loaded successfully */
7664 if (!ice_is_safe_mode(pf)) {
7665 /* reload the SW DB of filter tables */
7666 if (reset_type == ICE_RESET_PFR)
7667 ice_fill_blk_tbls(hw);
7668 else
7669 /* Reload DDP Package after CORER/GLOBR reset */
7670 ice_load_pkg(NULL, pf);
7671 }
7672
7673 err = ice_clear_pf_cfg(hw);
7674 if (err) {
7675 dev_err(dev, "clear PF configuration failed %d\n", err);
7676 goto err_init_ctrlq;
7677 }
7678
7679 ice_clear_pxe_mode(hw);
7680
7681 err = ice_init_nvm(hw);
7682 if (err) {
7683 dev_err(dev, "ice_init_nvm failed %d\n", err);
7684 goto err_init_ctrlq;
7685 }
7686
7687 err = ice_get_caps(hw);
7688 if (err) {
7689 dev_err(dev, "ice_get_caps failed %d\n", err);
7690 goto err_init_ctrlq;
7691 }
7692
7693 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7694 if (err) {
7695 dev_err(dev, "set_mac_cfg failed %d\n", err);
7696 goto err_init_ctrlq;
7697 }
7698
7699 dvm = ice_is_dvm_ena(hw);
7700
7701 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7702 if (err)
7703 goto err_init_ctrlq;
7704
7705 err = ice_sched_init_port(hw->port_info);
7706 if (err)
7707 goto err_sched_init_port;
7708
7709 /* start misc vector */
7710 err = ice_req_irq_msix_misc(pf);
7711 if (err) {
7712 dev_err(dev, "misc vector setup failed: %d\n", err);
7713 goto err_sched_init_port;
7714 }
7715
7716 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7717 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7718 if (!rd32(hw, PFQF_FD_SIZE)) {
7719 u16 unused, guar, b_effort;
7720
7721 guar = hw->func_caps.fd_fltr_guar;
7722 b_effort = hw->func_caps.fd_fltr_best_effort;
7723
7724 /* force guaranteed filter pool for PF */
7725 ice_alloc_fd_guar_item(hw, &unused, guar);
7726 /* force shared filter pool for PF */
7727 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7728 }
7729 }
7730
7731 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7732 ice_dcb_rebuild(pf);
7733
7734 /* If the PF previously had enabled PTP, PTP init needs to happen before
7735 * the VSI rebuild. If not, this causes the PTP link status events to
7736 * fail.
7737 */
7738 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7739 ice_ptp_rebuild(pf, reset_type);
7740
7741 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7742 ice_gnss_init(pf);
7743
7744 /* rebuild PF VSI */
7745 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7746 if (err) {
7747 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7748 goto err_vsi_rebuild;
7749 }
7750
7751 if (reset_type == ICE_RESET_PFR) {
7752 err = ice_rebuild_channels(pf);
7753 if (err) {
7754 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7755 err);
7756 goto err_vsi_rebuild;
7757 }
7758 }
7759
7760 /* If Flow Director is active */
7761 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7762 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7763 if (err) {
7764 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7765 goto err_vsi_rebuild;
7766 }
7767
7768 /* replay HW Flow Director recipes */
7769 if (hw->fdir_prof)
7770 ice_fdir_replay_flows(hw);
7771
7772 /* replay Flow Director filters */
7773 ice_fdir_replay_fltrs(pf);
7774
7775 ice_rebuild_arfs(pf);
7776 }
7777
7778 if (vsi && vsi->netdev)
7779 netif_device_attach(vsi->netdev);
7780
7781 ice_update_pf_netdev_link(pf);
7782
7783 /* tell the firmware we are up */
7784 err = ice_send_version(pf);
7785 if (err) {
7786 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7787 err);
7788 goto err_vsi_rebuild;
7789 }
7790
7791 ice_replay_post(hw);
7792
7793 /* if we get here, reset flow is successful */
7794 clear_bit(ICE_RESET_FAILED, pf->state);
7795
7796 ice_plug_aux_dev(pf);
7797 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7798 ice_lag_rebuild(pf);
7799
7800 /* Restore timestamp mode settings after VSI rebuild */
7801 ice_ptp_restore_timestamp_mode(pf);
7802 return;
7803
7804err_vsi_rebuild:
7805err_sched_init_port:
7806 ice_sched_cleanup_all(hw);
7807err_init_ctrlq:
7808 ice_shutdown_all_ctrlq(hw, false);
7809 set_bit(ICE_RESET_FAILED, pf->state);
7810clear_recovery:
7811 /* set this bit in PF state to control service task scheduling */
7812 set_bit(ICE_NEEDS_RESTART, pf->state);
7813 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7814}
7815
7816/**
7817 * ice_change_mtu - NDO callback to change the MTU
7818 * @netdev: network interface device structure
7819 * @new_mtu: new value for maximum frame size
7820 *
7821 * Returns 0 on success, negative on failure
7822 */
7823int ice_change_mtu(struct net_device *netdev, int new_mtu)
7824{
7825 struct ice_netdev_priv *np = netdev_priv(netdev);
7826 struct ice_vsi *vsi = np->vsi;
7827 struct ice_pf *pf = vsi->back;
7828 struct bpf_prog *prog;
7829 u8 count = 0;
7830 int err = 0;
7831
7832 if (new_mtu == (int)netdev->mtu) {
7833 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7834 return 0;
7835 }
7836
7837 prog = vsi->xdp_prog;
7838 if (prog && !prog->aux->xdp_has_frags) {
7839 int frame_size = ice_max_xdp_frame_size(vsi);
7840
7841 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7842 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7843 frame_size - ICE_ETH_PKT_HDR_PAD);
7844 return -EINVAL;
7845 }
7846 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7847 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7848 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7849 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7850 return -EINVAL;
7851 }
7852 }
7853
7854 /* if a reset is in progress, wait for some time for it to complete */
7855 do {
7856 if (ice_is_reset_in_progress(pf->state)) {
7857 count++;
7858 usleep_range(1000, 2000);
7859 } else {
7860 break;
7861 }
7862
7863 } while (count < 100);
7864
7865 if (count == 100) {
7866 netdev_err(netdev, "can't change MTU. Device is busy\n");
7867 return -EBUSY;
7868 }
7869
7870 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7871 err = ice_down_up(vsi);
7872 if (err)
7873 return err;
7874
7875 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7876 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7877
7878 return err;
7879}
7880
7881/**
7882 * ice_eth_ioctl - Access the hwtstamp interface
7883 * @netdev: network interface device structure
7884 * @ifr: interface request data
7885 * @cmd: ioctl command
7886 */
7887static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7888{
7889 struct ice_netdev_priv *np = netdev_priv(netdev);
7890 struct ice_pf *pf = np->vsi->back;
7891
7892 switch (cmd) {
7893 case SIOCGHWTSTAMP:
7894 return ice_ptp_get_ts_config(pf, ifr);
7895 case SIOCSHWTSTAMP:
7896 return ice_ptp_set_ts_config(pf, ifr);
7897 default:
7898 return -EOPNOTSUPP;
7899 }
7900}
7901
7902/**
7903 * ice_aq_str - convert AQ err code to a string
7904 * @aq_err: the AQ error code to convert
7905 */
7906const char *ice_aq_str(enum ice_aq_err aq_err)
7907{
7908 switch (aq_err) {
7909 case ICE_AQ_RC_OK:
7910 return "OK";
7911 case ICE_AQ_RC_EPERM:
7912 return "ICE_AQ_RC_EPERM";
7913 case ICE_AQ_RC_ENOENT:
7914 return "ICE_AQ_RC_ENOENT";
7915 case ICE_AQ_RC_ENOMEM:
7916 return "ICE_AQ_RC_ENOMEM";
7917 case ICE_AQ_RC_EBUSY:
7918 return "ICE_AQ_RC_EBUSY";
7919 case ICE_AQ_RC_EEXIST:
7920 return "ICE_AQ_RC_EEXIST";
7921 case ICE_AQ_RC_EINVAL:
7922 return "ICE_AQ_RC_EINVAL";
7923 case ICE_AQ_RC_ENOSPC:
7924 return "ICE_AQ_RC_ENOSPC";
7925 case ICE_AQ_RC_ENOSYS:
7926 return "ICE_AQ_RC_ENOSYS";
7927 case ICE_AQ_RC_EMODE:
7928 return "ICE_AQ_RC_EMODE";
7929 case ICE_AQ_RC_ENOSEC:
7930 return "ICE_AQ_RC_ENOSEC";
7931 case ICE_AQ_RC_EBADSIG:
7932 return "ICE_AQ_RC_EBADSIG";
7933 case ICE_AQ_RC_ESVN:
7934 return "ICE_AQ_RC_ESVN";
7935 case ICE_AQ_RC_EBADMAN:
7936 return "ICE_AQ_RC_EBADMAN";
7937 case ICE_AQ_RC_EBADBUF:
7938 return "ICE_AQ_RC_EBADBUF";
7939 }
7940
7941 return "ICE_AQ_RC_UNKNOWN";
7942}
7943
7944/**
7945 * ice_set_rss_lut - Set RSS LUT
7946 * @vsi: Pointer to VSI structure
7947 * @lut: Lookup table
7948 * @lut_size: Lookup table size
7949 *
7950 * Returns 0 on success, negative on failure
7951 */
7952int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7953{
7954 struct ice_aq_get_set_rss_lut_params params = {};
7955 struct ice_hw *hw = &vsi->back->hw;
7956 int status;
7957
7958 if (!lut)
7959 return -EINVAL;
7960
7961 params.vsi_handle = vsi->idx;
7962 params.lut_size = lut_size;
7963 params.lut_type = vsi->rss_lut_type;
7964 params.lut = lut;
7965
7966 status = ice_aq_set_rss_lut(hw, ¶ms);
7967 if (status)
7968 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7969 status, ice_aq_str(hw->adminq.sq_last_status));
7970
7971 return status;
7972}
7973
7974/**
7975 * ice_set_rss_key - Set RSS key
7976 * @vsi: Pointer to the VSI structure
7977 * @seed: RSS hash seed
7978 *
7979 * Returns 0 on success, negative on failure
7980 */
7981int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7982{
7983 struct ice_hw *hw = &vsi->back->hw;
7984 int status;
7985
7986 if (!seed)
7987 return -EINVAL;
7988
7989 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7990 if (status)
7991 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7992 status, ice_aq_str(hw->adminq.sq_last_status));
7993
7994 return status;
7995}
7996
7997/**
7998 * ice_get_rss_lut - Get RSS LUT
7999 * @vsi: Pointer to VSI structure
8000 * @lut: Buffer to store the lookup table entries
8001 * @lut_size: Size of buffer to store the lookup table entries
8002 *
8003 * Returns 0 on success, negative on failure
8004 */
8005int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
8006{
8007 struct ice_aq_get_set_rss_lut_params params = {};
8008 struct ice_hw *hw = &vsi->back->hw;
8009 int status;
8010
8011 if (!lut)
8012 return -EINVAL;
8013
8014 params.vsi_handle = vsi->idx;
8015 params.lut_size = lut_size;
8016 params.lut_type = vsi->rss_lut_type;
8017 params.lut = lut;
8018
8019 status = ice_aq_get_rss_lut(hw, ¶ms);
8020 if (status)
8021 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
8022 status, ice_aq_str(hw->adminq.sq_last_status));
8023
8024 return status;
8025}
8026
8027/**
8028 * ice_get_rss_key - Get RSS key
8029 * @vsi: Pointer to VSI structure
8030 * @seed: Buffer to store the key in
8031 *
8032 * Returns 0 on success, negative on failure
8033 */
8034int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
8035{
8036 struct ice_hw *hw = &vsi->back->hw;
8037 int status;
8038
8039 if (!seed)
8040 return -EINVAL;
8041
8042 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
8043 if (status)
8044 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
8045 status, ice_aq_str(hw->adminq.sq_last_status));
8046
8047 return status;
8048}
8049
8050/**
8051 * ice_set_rss_hfunc - Set RSS HASH function
8052 * @vsi: Pointer to VSI structure
8053 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
8054 *
8055 * Returns 0 on success, negative on failure
8056 */
8057int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
8058{
8059 struct ice_hw *hw = &vsi->back->hw;
8060 struct ice_vsi_ctx *ctx;
8061 bool symm;
8062 int err;
8063
8064 if (hfunc == vsi->rss_hfunc)
8065 return 0;
8066
8067 if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8068 hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8069 return -EOPNOTSUPP;
8070
8071 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8072 if (!ctx)
8073 return -ENOMEM;
8074
8075 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8076 ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8077 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8078 ctx->info.q_opt_rss |=
8079 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8080 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8081 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8082
8083 err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8084 if (err) {
8085 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8086 vsi->vsi_num, err);
8087 } else {
8088 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8089 vsi->rss_hfunc = hfunc;
8090 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8091 hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8092 "Symmetric " : "");
8093 }
8094 kfree(ctx);
8095 if (err)
8096 return err;
8097
8098 /* Fix the symmetry setting for all existing RSS configurations */
8099 symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8100 return ice_set_rss_cfg_symm(hw, vsi, symm);
8101}
8102
8103/**
8104 * ice_bridge_getlink - Get the hardware bridge mode
8105 * @skb: skb buff
8106 * @pid: process ID
8107 * @seq: RTNL message seq
8108 * @dev: the netdev being configured
8109 * @filter_mask: filter mask passed in
8110 * @nlflags: netlink flags passed in
8111 *
8112 * Return the bridge mode (VEB/VEPA)
8113 */
8114static int
8115ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8116 struct net_device *dev, u32 filter_mask, int nlflags)
8117{
8118 struct ice_netdev_priv *np = netdev_priv(dev);
8119 struct ice_vsi *vsi = np->vsi;
8120 struct ice_pf *pf = vsi->back;
8121 u16 bmode;
8122
8123 bmode = pf->first_sw->bridge_mode;
8124
8125 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8126 filter_mask, NULL);
8127}
8128
8129/**
8130 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8131 * @vsi: Pointer to VSI structure
8132 * @bmode: Hardware bridge mode (VEB/VEPA)
8133 *
8134 * Returns 0 on success, negative on failure
8135 */
8136static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8137{
8138 struct ice_aqc_vsi_props *vsi_props;
8139 struct ice_hw *hw = &vsi->back->hw;
8140 struct ice_vsi_ctx *ctxt;
8141 int ret;
8142
8143 vsi_props = &vsi->info;
8144
8145 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8146 if (!ctxt)
8147 return -ENOMEM;
8148
8149 ctxt->info = vsi->info;
8150
8151 if (bmode == BRIDGE_MODE_VEB)
8152 /* change from VEPA to VEB mode */
8153 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8154 else
8155 /* change from VEB to VEPA mode */
8156 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8157 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8158
8159 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8160 if (ret) {
8161 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8162 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
8163 goto out;
8164 }
8165 /* Update sw flags for book keeping */
8166 vsi_props->sw_flags = ctxt->info.sw_flags;
8167
8168out:
8169 kfree(ctxt);
8170 return ret;
8171}
8172
8173/**
8174 * ice_bridge_setlink - Set the hardware bridge mode
8175 * @dev: the netdev being configured
8176 * @nlh: RTNL message
8177 * @flags: bridge setlink flags
8178 * @extack: netlink extended ack
8179 *
8180 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8181 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8182 * not already set for all VSIs connected to this switch. And also update the
8183 * unicast switch filter rules for the corresponding switch of the netdev.
8184 */
8185static int
8186ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8187 u16 __always_unused flags,
8188 struct netlink_ext_ack __always_unused *extack)
8189{
8190 struct ice_netdev_priv *np = netdev_priv(dev);
8191 struct ice_pf *pf = np->vsi->back;
8192 struct nlattr *attr, *br_spec;
8193 struct ice_hw *hw = &pf->hw;
8194 struct ice_sw *pf_sw;
8195 int rem, v, err = 0;
8196
8197 pf_sw = pf->first_sw;
8198 /* find the attribute in the netlink message */
8199 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8200 if (!br_spec)
8201 return -EINVAL;
8202
8203 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8204 __u16 mode = nla_get_u16(attr);
8205
8206 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8207 return -EINVAL;
8208 /* Continue if bridge mode is not being flipped */
8209 if (mode == pf_sw->bridge_mode)
8210 continue;
8211 /* Iterates through the PF VSI list and update the loopback
8212 * mode of the VSI
8213 */
8214 ice_for_each_vsi(pf, v) {
8215 if (!pf->vsi[v])
8216 continue;
8217 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8218 if (err)
8219 return err;
8220 }
8221
8222 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8223 /* Update the unicast switch filter rules for the corresponding
8224 * switch of the netdev
8225 */
8226 err = ice_update_sw_rule_bridge_mode(hw);
8227 if (err) {
8228 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8229 mode, err,
8230 ice_aq_str(hw->adminq.sq_last_status));
8231 /* revert hw->evb_veb */
8232 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8233 return err;
8234 }
8235
8236 pf_sw->bridge_mode = mode;
8237 }
8238
8239 return 0;
8240}
8241
8242/**
8243 * ice_tx_timeout - Respond to a Tx Hang
8244 * @netdev: network interface device structure
8245 * @txqueue: Tx queue
8246 */
8247void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8248{
8249 struct ice_netdev_priv *np = netdev_priv(netdev);
8250 struct ice_tx_ring *tx_ring = NULL;
8251 struct ice_vsi *vsi = np->vsi;
8252 struct ice_pf *pf = vsi->back;
8253 u32 i;
8254
8255 pf->tx_timeout_count++;
8256
8257 /* Check if PFC is enabled for the TC to which the queue belongs
8258 * to. If yes then Tx timeout is not caused by a hung queue, no
8259 * need to reset and rebuild
8260 */
8261 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8262 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8263 txqueue);
8264 return;
8265 }
8266
8267 /* now that we have an index, find the tx_ring struct */
8268 ice_for_each_txq(vsi, i)
8269 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8270 if (txqueue == vsi->tx_rings[i]->q_index) {
8271 tx_ring = vsi->tx_rings[i];
8272 break;
8273 }
8274
8275 /* Reset recovery level if enough time has elapsed after last timeout.
8276 * Also ensure no new reset action happens before next timeout period.
8277 */
8278 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8279 pf->tx_timeout_recovery_level = 1;
8280 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8281 netdev->watchdog_timeo)))
8282 return;
8283
8284 if (tx_ring) {
8285 struct ice_hw *hw = &pf->hw;
8286 u32 head, val = 0;
8287
8288 head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8289 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8290 /* Read interrupt register */
8291 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8292
8293 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8294 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8295 head, tx_ring->next_to_use, val);
8296 }
8297
8298 pf->tx_timeout_last_recovery = jiffies;
8299 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8300 pf->tx_timeout_recovery_level, txqueue);
8301
8302 switch (pf->tx_timeout_recovery_level) {
8303 case 1:
8304 set_bit(ICE_PFR_REQ, pf->state);
8305 break;
8306 case 2:
8307 set_bit(ICE_CORER_REQ, pf->state);
8308 break;
8309 case 3:
8310 set_bit(ICE_GLOBR_REQ, pf->state);
8311 break;
8312 default:
8313 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8314 set_bit(ICE_DOWN, pf->state);
8315 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8316 set_bit(ICE_SERVICE_DIS, pf->state);
8317 break;
8318 }
8319
8320 ice_service_task_schedule(pf);
8321 pf->tx_timeout_recovery_level++;
8322}
8323
8324/**
8325 * ice_setup_tc_cls_flower - flower classifier offloads
8326 * @np: net device to configure
8327 * @filter_dev: device on which filter is added
8328 * @cls_flower: offload data
8329 */
8330static int
8331ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8332 struct net_device *filter_dev,
8333 struct flow_cls_offload *cls_flower)
8334{
8335 struct ice_vsi *vsi = np->vsi;
8336
8337 if (cls_flower->common.chain_index)
8338 return -EOPNOTSUPP;
8339
8340 switch (cls_flower->command) {
8341 case FLOW_CLS_REPLACE:
8342 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8343 case FLOW_CLS_DESTROY:
8344 return ice_del_cls_flower(vsi, cls_flower);
8345 default:
8346 return -EINVAL;
8347 }
8348}
8349
8350/**
8351 * ice_setup_tc_block_cb - callback handler registered for TC block
8352 * @type: TC SETUP type
8353 * @type_data: TC flower offload data that contains user input
8354 * @cb_priv: netdev private data
8355 */
8356static int
8357ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8358{
8359 struct ice_netdev_priv *np = cb_priv;
8360
8361 switch (type) {
8362 case TC_SETUP_CLSFLOWER:
8363 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8364 type_data);
8365 default:
8366 return -EOPNOTSUPP;
8367 }
8368}
8369
8370/**
8371 * ice_validate_mqprio_qopt - Validate TCF input parameters
8372 * @vsi: Pointer to VSI
8373 * @mqprio_qopt: input parameters for mqprio queue configuration
8374 *
8375 * This function validates MQPRIO params, such as qcount (power of 2 wherever
8376 * needed), and make sure user doesn't specify qcount and BW rate limit
8377 * for TCs, which are more than "num_tc"
8378 */
8379static int
8380ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8381 struct tc_mqprio_qopt_offload *mqprio_qopt)
8382{
8383 int non_power_of_2_qcount = 0;
8384 struct ice_pf *pf = vsi->back;
8385 int max_rss_q_cnt = 0;
8386 u64 sum_min_rate = 0;
8387 struct device *dev;
8388 int i, speed;
8389 u8 num_tc;
8390
8391 if (vsi->type != ICE_VSI_PF)
8392 return -EINVAL;
8393
8394 if (mqprio_qopt->qopt.offset[0] != 0 ||
8395 mqprio_qopt->qopt.num_tc < 1 ||
8396 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8397 return -EINVAL;
8398
8399 dev = ice_pf_to_dev(pf);
8400 vsi->ch_rss_size = 0;
8401 num_tc = mqprio_qopt->qopt.num_tc;
8402 speed = ice_get_link_speed_kbps(vsi);
8403
8404 for (i = 0; num_tc; i++) {
8405 int qcount = mqprio_qopt->qopt.count[i];
8406 u64 max_rate, min_rate, rem;
8407
8408 if (!qcount)
8409 return -EINVAL;
8410
8411 if (is_power_of_2(qcount)) {
8412 if (non_power_of_2_qcount &&
8413 qcount > non_power_of_2_qcount) {
8414 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8415 qcount, non_power_of_2_qcount);
8416 return -EINVAL;
8417 }
8418 if (qcount > max_rss_q_cnt)
8419 max_rss_q_cnt = qcount;
8420 } else {
8421 if (non_power_of_2_qcount &&
8422 qcount != non_power_of_2_qcount) {
8423 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8424 qcount, non_power_of_2_qcount);
8425 return -EINVAL;
8426 }
8427 if (qcount < max_rss_q_cnt) {
8428 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8429 qcount, max_rss_q_cnt);
8430 return -EINVAL;
8431 }
8432 max_rss_q_cnt = qcount;
8433 non_power_of_2_qcount = qcount;
8434 }
8435
8436 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8437 * converts the bandwidth rate limit into Bytes/s when
8438 * passing it down to the driver. So convert input bandwidth
8439 * from Bytes/s to Kbps
8440 */
8441 max_rate = mqprio_qopt->max_rate[i];
8442 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8443
8444 /* min_rate is minimum guaranteed rate and it can't be zero */
8445 min_rate = mqprio_qopt->min_rate[i];
8446 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8447 sum_min_rate += min_rate;
8448
8449 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8450 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8451 min_rate, ICE_MIN_BW_LIMIT);
8452 return -EINVAL;
8453 }
8454
8455 if (max_rate && max_rate > speed) {
8456 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8457 i, max_rate, speed);
8458 return -EINVAL;
8459 }
8460
8461 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8462 if (rem) {
8463 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8464 i, ICE_MIN_BW_LIMIT);
8465 return -EINVAL;
8466 }
8467
8468 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8469 if (rem) {
8470 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8471 i, ICE_MIN_BW_LIMIT);
8472 return -EINVAL;
8473 }
8474
8475 /* min_rate can't be more than max_rate, except when max_rate
8476 * is zero (implies max_rate sought is max line rate). In such
8477 * a case min_rate can be more than max.
8478 */
8479 if (max_rate && min_rate > max_rate) {
8480 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8481 min_rate, max_rate);
8482 return -EINVAL;
8483 }
8484
8485 if (i >= mqprio_qopt->qopt.num_tc - 1)
8486 break;
8487 if (mqprio_qopt->qopt.offset[i + 1] !=
8488 (mqprio_qopt->qopt.offset[i] + qcount))
8489 return -EINVAL;
8490 }
8491 if (vsi->num_rxq <
8492 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8493 return -EINVAL;
8494 if (vsi->num_txq <
8495 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8496 return -EINVAL;
8497
8498 if (sum_min_rate && sum_min_rate > (u64)speed) {
8499 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8500 sum_min_rate, speed);
8501 return -EINVAL;
8502 }
8503
8504 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8505 vsi->ch_rss_size = max_rss_q_cnt;
8506
8507 return 0;
8508}
8509
8510/**
8511 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8512 * @pf: ptr to PF device
8513 * @vsi: ptr to VSI
8514 */
8515static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8516{
8517 struct device *dev = ice_pf_to_dev(pf);
8518 bool added = false;
8519 struct ice_hw *hw;
8520 int flow;
8521
8522 if (!(vsi->num_gfltr || vsi->num_bfltr))
8523 return -EINVAL;
8524
8525 hw = &pf->hw;
8526 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8527 struct ice_fd_hw_prof *prof;
8528 int tun, status;
8529 u64 entry_h;
8530
8531 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8532 hw->fdir_prof[flow]->cnt))
8533 continue;
8534
8535 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8536 enum ice_flow_priority prio;
8537
8538 /* add this VSI to FDir profile for this flow */
8539 prio = ICE_FLOW_PRIO_NORMAL;
8540 prof = hw->fdir_prof[flow];
8541 status = ice_flow_add_entry(hw, ICE_BLK_FD,
8542 prof->prof_id[tun],
8543 prof->vsi_h[0], vsi->idx,
8544 prio, prof->fdir_seg[tun],
8545 &entry_h);
8546 if (status) {
8547 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8548 vsi->idx, flow);
8549 continue;
8550 }
8551
8552 prof->entry_h[prof->cnt][tun] = entry_h;
8553 }
8554
8555 /* store VSI for filter replay and delete */
8556 prof->vsi_h[prof->cnt] = vsi->idx;
8557 prof->cnt++;
8558
8559 added = true;
8560 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8561 flow);
8562 }
8563
8564 if (!added)
8565 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8566
8567 return 0;
8568}
8569
8570/**
8571 * ice_add_channel - add a channel by adding VSI
8572 * @pf: ptr to PF device
8573 * @sw_id: underlying HW switching element ID
8574 * @ch: ptr to channel structure
8575 *
8576 * Add a channel (VSI) using add_vsi and queue_map
8577 */
8578static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8579{
8580 struct device *dev = ice_pf_to_dev(pf);
8581 struct ice_vsi *vsi;
8582
8583 if (ch->type != ICE_VSI_CHNL) {
8584 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8585 return -EINVAL;
8586 }
8587
8588 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8589 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8590 dev_err(dev, "create chnl VSI failure\n");
8591 return -EINVAL;
8592 }
8593
8594 ice_add_vsi_to_fdir(pf, vsi);
8595
8596 ch->sw_id = sw_id;
8597 ch->vsi_num = vsi->vsi_num;
8598 ch->info.mapping_flags = vsi->info.mapping_flags;
8599 ch->ch_vsi = vsi;
8600 /* set the back pointer of channel for newly created VSI */
8601 vsi->ch = ch;
8602
8603 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8604 sizeof(vsi->info.q_mapping));
8605 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8606 sizeof(vsi->info.tc_mapping));
8607
8608 return 0;
8609}
8610
8611/**
8612 * ice_chnl_cfg_res
8613 * @vsi: the VSI being setup
8614 * @ch: ptr to channel structure
8615 *
8616 * Configure channel specific resources such as rings, vector.
8617 */
8618static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8619{
8620 int i;
8621
8622 for (i = 0; i < ch->num_txq; i++) {
8623 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8624 struct ice_ring_container *rc;
8625 struct ice_tx_ring *tx_ring;
8626 struct ice_rx_ring *rx_ring;
8627
8628 tx_ring = vsi->tx_rings[ch->base_q + i];
8629 rx_ring = vsi->rx_rings[ch->base_q + i];
8630 if (!tx_ring || !rx_ring)
8631 continue;
8632
8633 /* setup ring being channel enabled */
8634 tx_ring->ch = ch;
8635 rx_ring->ch = ch;
8636
8637 /* following code block sets up vector specific attributes */
8638 tx_q_vector = tx_ring->q_vector;
8639 rx_q_vector = rx_ring->q_vector;
8640 if (!tx_q_vector && !rx_q_vector)
8641 continue;
8642
8643 if (tx_q_vector) {
8644 tx_q_vector->ch = ch;
8645 /* setup Tx and Rx ITR setting if DIM is off */
8646 rc = &tx_q_vector->tx;
8647 if (!ITR_IS_DYNAMIC(rc))
8648 ice_write_itr(rc, rc->itr_setting);
8649 }
8650 if (rx_q_vector) {
8651 rx_q_vector->ch = ch;
8652 /* setup Tx and Rx ITR setting if DIM is off */
8653 rc = &rx_q_vector->rx;
8654 if (!ITR_IS_DYNAMIC(rc))
8655 ice_write_itr(rc, rc->itr_setting);
8656 }
8657 }
8658
8659 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8660 * GLINT_ITR register would have written to perform in-context
8661 * update, hence perform flush
8662 */
8663 if (ch->num_txq || ch->num_rxq)
8664 ice_flush(&vsi->back->hw);
8665}
8666
8667/**
8668 * ice_cfg_chnl_all_res - configure channel resources
8669 * @vsi: pte to main_vsi
8670 * @ch: ptr to channel structure
8671 *
8672 * This function configures channel specific resources such as flow-director
8673 * counter index, and other resources such as queues, vectors, ITR settings
8674 */
8675static void
8676ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8677{
8678 /* configure channel (aka ADQ) resources such as queues, vectors,
8679 * ITR settings for channel specific vectors and anything else
8680 */
8681 ice_chnl_cfg_res(vsi, ch);
8682}
8683
8684/**
8685 * ice_setup_hw_channel - setup new channel
8686 * @pf: ptr to PF device
8687 * @vsi: the VSI being setup
8688 * @ch: ptr to channel structure
8689 * @sw_id: underlying HW switching element ID
8690 * @type: type of channel to be created (VMDq2/VF)
8691 *
8692 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8693 * and configures Tx rings accordingly
8694 */
8695static int
8696ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8697 struct ice_channel *ch, u16 sw_id, u8 type)
8698{
8699 struct device *dev = ice_pf_to_dev(pf);
8700 int ret;
8701
8702 ch->base_q = vsi->next_base_q;
8703 ch->type = type;
8704
8705 ret = ice_add_channel(pf, sw_id, ch);
8706 if (ret) {
8707 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8708 return ret;
8709 }
8710
8711 /* configure/setup ADQ specific resources */
8712 ice_cfg_chnl_all_res(vsi, ch);
8713
8714 /* make sure to update the next_base_q so that subsequent channel's
8715 * (aka ADQ) VSI queue map is correct
8716 */
8717 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8718 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8719 ch->num_rxq);
8720
8721 return 0;
8722}
8723
8724/**
8725 * ice_setup_channel - setup new channel using uplink element
8726 * @pf: ptr to PF device
8727 * @vsi: the VSI being setup
8728 * @ch: ptr to channel structure
8729 *
8730 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8731 * and uplink switching element
8732 */
8733static bool
8734ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8735 struct ice_channel *ch)
8736{
8737 struct device *dev = ice_pf_to_dev(pf);
8738 u16 sw_id;
8739 int ret;
8740
8741 if (vsi->type != ICE_VSI_PF) {
8742 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8743 return false;
8744 }
8745
8746 sw_id = pf->first_sw->sw_id;
8747
8748 /* create channel (VSI) */
8749 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8750 if (ret) {
8751 dev_err(dev, "failed to setup hw_channel\n");
8752 return false;
8753 }
8754 dev_dbg(dev, "successfully created channel()\n");
8755
8756 return ch->ch_vsi ? true : false;
8757}
8758
8759/**
8760 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8761 * @vsi: VSI to be configured
8762 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8763 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8764 */
8765static int
8766ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8767{
8768 int err;
8769
8770 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8771 if (err)
8772 return err;
8773
8774 return ice_set_max_bw_limit(vsi, max_tx_rate);
8775}
8776
8777/**
8778 * ice_create_q_channel - function to create channel
8779 * @vsi: VSI to be configured
8780 * @ch: ptr to channel (it contains channel specific params)
8781 *
8782 * This function creates channel (VSI) using num_queues specified by user,
8783 * reconfigs RSS if needed.
8784 */
8785static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8786{
8787 struct ice_pf *pf = vsi->back;
8788 struct device *dev;
8789
8790 if (!ch)
8791 return -EINVAL;
8792
8793 dev = ice_pf_to_dev(pf);
8794 if (!ch->num_txq || !ch->num_rxq) {
8795 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8796 return -EINVAL;
8797 }
8798
8799 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8800 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8801 vsi->cnt_q_avail, ch->num_txq);
8802 return -EINVAL;
8803 }
8804
8805 if (!ice_setup_channel(pf, vsi, ch)) {
8806 dev_info(dev, "Failed to setup channel\n");
8807 return -EINVAL;
8808 }
8809 /* configure BW rate limit */
8810 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8811 int ret;
8812
8813 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8814 ch->min_tx_rate);
8815 if (ret)
8816 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8817 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8818 else
8819 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8820 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8821 }
8822
8823 vsi->cnt_q_avail -= ch->num_txq;
8824
8825 return 0;
8826}
8827
8828/**
8829 * ice_rem_all_chnl_fltrs - removes all channel filters
8830 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8831 *
8832 * Remove all advanced switch filters only if they are channel specific
8833 * tc-flower based filter
8834 */
8835static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8836{
8837 struct ice_tc_flower_fltr *fltr;
8838 struct hlist_node *node;
8839
8840 /* to remove all channel filters, iterate an ordered list of filters */
8841 hlist_for_each_entry_safe(fltr, node,
8842 &pf->tc_flower_fltr_list,
8843 tc_flower_node) {
8844 struct ice_rule_query_data rule;
8845 int status;
8846
8847 /* for now process only channel specific filters */
8848 if (!ice_is_chnl_fltr(fltr))
8849 continue;
8850
8851 rule.rid = fltr->rid;
8852 rule.rule_id = fltr->rule_id;
8853 rule.vsi_handle = fltr->dest_vsi_handle;
8854 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8855 if (status) {
8856 if (status == -ENOENT)
8857 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8858 rule.rule_id);
8859 else
8860 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8861 status);
8862 } else if (fltr->dest_vsi) {
8863 /* update advanced switch filter count */
8864 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8865 u32 flags = fltr->flags;
8866
8867 fltr->dest_vsi->num_chnl_fltr--;
8868 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8869 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8870 pf->num_dmac_chnl_fltrs--;
8871 }
8872 }
8873
8874 hlist_del(&fltr->tc_flower_node);
8875 kfree(fltr);
8876 }
8877}
8878
8879/**
8880 * ice_remove_q_channels - Remove queue channels for the TCs
8881 * @vsi: VSI to be configured
8882 * @rem_fltr: delete advanced switch filter or not
8883 *
8884 * Remove queue channels for the TCs
8885 */
8886static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8887{
8888 struct ice_channel *ch, *ch_tmp;
8889 struct ice_pf *pf = vsi->back;
8890 int i;
8891
8892 /* remove all tc-flower based filter if they are channel filters only */
8893 if (rem_fltr)
8894 ice_rem_all_chnl_fltrs(pf);
8895
8896 /* remove ntuple filters since queue configuration is being changed */
8897 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8898 struct ice_hw *hw = &pf->hw;
8899
8900 mutex_lock(&hw->fdir_fltr_lock);
8901 ice_fdir_del_all_fltrs(vsi);
8902 mutex_unlock(&hw->fdir_fltr_lock);
8903 }
8904
8905 /* perform cleanup for channels if they exist */
8906 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8907 struct ice_vsi *ch_vsi;
8908
8909 list_del(&ch->list);
8910 ch_vsi = ch->ch_vsi;
8911 if (!ch_vsi) {
8912 kfree(ch);
8913 continue;
8914 }
8915
8916 /* Reset queue contexts */
8917 for (i = 0; i < ch->num_rxq; i++) {
8918 struct ice_tx_ring *tx_ring;
8919 struct ice_rx_ring *rx_ring;
8920
8921 tx_ring = vsi->tx_rings[ch->base_q + i];
8922 rx_ring = vsi->rx_rings[ch->base_q + i];
8923 if (tx_ring) {
8924 tx_ring->ch = NULL;
8925 if (tx_ring->q_vector)
8926 tx_ring->q_vector->ch = NULL;
8927 }
8928 if (rx_ring) {
8929 rx_ring->ch = NULL;
8930 if (rx_ring->q_vector)
8931 rx_ring->q_vector->ch = NULL;
8932 }
8933 }
8934
8935 /* Release FD resources for the channel VSI */
8936 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8937
8938 /* clear the VSI from scheduler tree */
8939 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8940
8941 /* Delete VSI from FW, PF and HW VSI arrays */
8942 ice_vsi_delete(ch->ch_vsi);
8943
8944 /* free the channel */
8945 kfree(ch);
8946 }
8947
8948 /* clear the channel VSI map which is stored in main VSI */
8949 ice_for_each_chnl_tc(i)
8950 vsi->tc_map_vsi[i] = NULL;
8951
8952 /* reset main VSI's all TC information */
8953 vsi->all_enatc = 0;
8954 vsi->all_numtc = 0;
8955}
8956
8957/**
8958 * ice_rebuild_channels - rebuild channel
8959 * @pf: ptr to PF
8960 *
8961 * Recreate channel VSIs and replay filters
8962 */
8963static int ice_rebuild_channels(struct ice_pf *pf)
8964{
8965 struct device *dev = ice_pf_to_dev(pf);
8966 struct ice_vsi *main_vsi;
8967 bool rem_adv_fltr = true;
8968 struct ice_channel *ch;
8969 struct ice_vsi *vsi;
8970 int tc_idx = 1;
8971 int i, err;
8972
8973 main_vsi = ice_get_main_vsi(pf);
8974 if (!main_vsi)
8975 return 0;
8976
8977 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8978 main_vsi->old_numtc == 1)
8979 return 0; /* nothing to be done */
8980
8981 /* reconfigure main VSI based on old value of TC and cached values
8982 * for MQPRIO opts
8983 */
8984 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8985 if (err) {
8986 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8987 main_vsi->old_ena_tc, main_vsi->vsi_num);
8988 return err;
8989 }
8990
8991 /* rebuild ADQ VSIs */
8992 ice_for_each_vsi(pf, i) {
8993 enum ice_vsi_type type;
8994
8995 vsi = pf->vsi[i];
8996 if (!vsi || vsi->type != ICE_VSI_CHNL)
8997 continue;
8998
8999 type = vsi->type;
9000
9001 /* rebuild ADQ VSI */
9002 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
9003 if (err) {
9004 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
9005 ice_vsi_type_str(type), vsi->idx, err);
9006 goto cleanup;
9007 }
9008
9009 /* Re-map HW VSI number, using VSI handle that has been
9010 * previously validated in ice_replay_vsi() call above
9011 */
9012 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
9013
9014 /* replay filters for the VSI */
9015 err = ice_replay_vsi(&pf->hw, vsi->idx);
9016 if (err) {
9017 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
9018 ice_vsi_type_str(type), err, vsi->idx);
9019 rem_adv_fltr = false;
9020 goto cleanup;
9021 }
9022 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
9023 ice_vsi_type_str(type), vsi->idx);
9024
9025 /* store ADQ VSI at correct TC index in main VSI's
9026 * map of TC to VSI
9027 */
9028 main_vsi->tc_map_vsi[tc_idx++] = vsi;
9029 }
9030
9031 /* ADQ VSI(s) has been rebuilt successfully, so setup
9032 * channel for main VSI's Tx and Rx rings
9033 */
9034 list_for_each_entry(ch, &main_vsi->ch_list, list) {
9035 struct ice_vsi *ch_vsi;
9036
9037 ch_vsi = ch->ch_vsi;
9038 if (!ch_vsi)
9039 continue;
9040
9041 /* reconfig channel resources */
9042 ice_cfg_chnl_all_res(main_vsi, ch);
9043
9044 /* replay BW rate limit if it is non-zero */
9045 if (!ch->max_tx_rate && !ch->min_tx_rate)
9046 continue;
9047
9048 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
9049 ch->min_tx_rate);
9050 if (err)
9051 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9052 err, ch->max_tx_rate, ch->min_tx_rate,
9053 ch_vsi->vsi_num);
9054 else
9055 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9056 ch->max_tx_rate, ch->min_tx_rate,
9057 ch_vsi->vsi_num);
9058 }
9059
9060 /* reconfig RSS for main VSI */
9061 if (main_vsi->ch_rss_size)
9062 ice_vsi_cfg_rss_lut_key(main_vsi);
9063
9064 return 0;
9065
9066cleanup:
9067 ice_remove_q_channels(main_vsi, rem_adv_fltr);
9068 return err;
9069}
9070
9071/**
9072 * ice_create_q_channels - Add queue channel for the given TCs
9073 * @vsi: VSI to be configured
9074 *
9075 * Configures queue channel mapping to the given TCs
9076 */
9077static int ice_create_q_channels(struct ice_vsi *vsi)
9078{
9079 struct ice_pf *pf = vsi->back;
9080 struct ice_channel *ch;
9081 int ret = 0, i;
9082
9083 ice_for_each_chnl_tc(i) {
9084 if (!(vsi->all_enatc & BIT(i)))
9085 continue;
9086
9087 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9088 if (!ch) {
9089 ret = -ENOMEM;
9090 goto err_free;
9091 }
9092 INIT_LIST_HEAD(&ch->list);
9093 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9094 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9095 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9096 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9097 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9098
9099 /* convert to Kbits/s */
9100 if (ch->max_tx_rate)
9101 ch->max_tx_rate = div_u64(ch->max_tx_rate,
9102 ICE_BW_KBPS_DIVISOR);
9103 if (ch->min_tx_rate)
9104 ch->min_tx_rate = div_u64(ch->min_tx_rate,
9105 ICE_BW_KBPS_DIVISOR);
9106
9107 ret = ice_create_q_channel(vsi, ch);
9108 if (ret) {
9109 dev_err(ice_pf_to_dev(pf),
9110 "failed creating channel TC:%d\n", i);
9111 kfree(ch);
9112 goto err_free;
9113 }
9114 list_add_tail(&ch->list, &vsi->ch_list);
9115 vsi->tc_map_vsi[i] = ch->ch_vsi;
9116 dev_dbg(ice_pf_to_dev(pf),
9117 "successfully created channel: VSI %pK\n", ch->ch_vsi);
9118 }
9119 return 0;
9120
9121err_free:
9122 ice_remove_q_channels(vsi, false);
9123
9124 return ret;
9125}
9126
9127/**
9128 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9129 * @netdev: net device to configure
9130 * @type_data: TC offload data
9131 */
9132static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9133{
9134 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9135 struct ice_netdev_priv *np = netdev_priv(netdev);
9136 struct ice_vsi *vsi = np->vsi;
9137 struct ice_pf *pf = vsi->back;
9138 u16 mode, ena_tc_qdisc = 0;
9139 int cur_txq, cur_rxq;
9140 u8 hw = 0, num_tcf;
9141 struct device *dev;
9142 int ret, i;
9143
9144 dev = ice_pf_to_dev(pf);
9145 num_tcf = mqprio_qopt->qopt.num_tc;
9146 hw = mqprio_qopt->qopt.hw;
9147 mode = mqprio_qopt->mode;
9148 if (!hw) {
9149 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9150 vsi->ch_rss_size = 0;
9151 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9152 goto config_tcf;
9153 }
9154
9155 /* Generate queue region map for number of TCF requested */
9156 for (i = 0; i < num_tcf; i++)
9157 ena_tc_qdisc |= BIT(i);
9158
9159 switch (mode) {
9160 case TC_MQPRIO_MODE_CHANNEL:
9161
9162 if (pf->hw.port_info->is_custom_tx_enabled) {
9163 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9164 return -EBUSY;
9165 }
9166 ice_tear_down_devlink_rate_tree(pf);
9167
9168 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9169 if (ret) {
9170 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9171 ret);
9172 return ret;
9173 }
9174 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9175 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9176 /* don't assume state of hw_tc_offload during driver load
9177 * and set the flag for TC flower filter if hw_tc_offload
9178 * already ON
9179 */
9180 if (vsi->netdev->features & NETIF_F_HW_TC)
9181 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9182 break;
9183 default:
9184 return -EINVAL;
9185 }
9186
9187config_tcf:
9188
9189 /* Requesting same TCF configuration as already enabled */
9190 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9191 mode != TC_MQPRIO_MODE_CHANNEL)
9192 return 0;
9193
9194 /* Pause VSI queues */
9195 ice_dis_vsi(vsi, true);
9196
9197 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9198 ice_remove_q_channels(vsi, true);
9199
9200 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9201 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9202 num_online_cpus());
9203 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9204 num_online_cpus());
9205 } else {
9206 /* logic to rebuild VSI, same like ethtool -L */
9207 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9208
9209 for (i = 0; i < num_tcf; i++) {
9210 if (!(ena_tc_qdisc & BIT(i)))
9211 continue;
9212
9213 offset = vsi->mqprio_qopt.qopt.offset[i];
9214 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9215 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9216 }
9217 vsi->req_txq = offset + qcount_tx;
9218 vsi->req_rxq = offset + qcount_rx;
9219
9220 /* store away original rss_size info, so that it gets reused
9221 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9222 * determine, what should be the rss_sizefor main VSI
9223 */
9224 vsi->orig_rss_size = vsi->rss_size;
9225 }
9226
9227 /* save current values of Tx and Rx queues before calling VSI rebuild
9228 * for fallback option
9229 */
9230 cur_txq = vsi->num_txq;
9231 cur_rxq = vsi->num_rxq;
9232
9233 /* proceed with rebuild main VSI using correct number of queues */
9234 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9235 if (ret) {
9236 /* fallback to current number of queues */
9237 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9238 vsi->req_txq = cur_txq;
9239 vsi->req_rxq = cur_rxq;
9240 clear_bit(ICE_RESET_FAILED, pf->state);
9241 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9242 dev_err(dev, "Rebuild of main VSI failed again\n");
9243 return ret;
9244 }
9245 }
9246
9247 vsi->all_numtc = num_tcf;
9248 vsi->all_enatc = ena_tc_qdisc;
9249 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9250 if (ret) {
9251 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9252 vsi->vsi_num);
9253 goto exit;
9254 }
9255
9256 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9257 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9258 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9259
9260 /* set TC0 rate limit if specified */
9261 if (max_tx_rate || min_tx_rate) {
9262 /* convert to Kbits/s */
9263 if (max_tx_rate)
9264 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9265 if (min_tx_rate)
9266 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9267
9268 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9269 if (!ret) {
9270 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9271 max_tx_rate, min_tx_rate, vsi->vsi_num);
9272 } else {
9273 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9274 max_tx_rate, min_tx_rate, vsi->vsi_num);
9275 goto exit;
9276 }
9277 }
9278 ret = ice_create_q_channels(vsi);
9279 if (ret) {
9280 netdev_err(netdev, "failed configuring queue channels\n");
9281 goto exit;
9282 } else {
9283 netdev_dbg(netdev, "successfully configured channels\n");
9284 }
9285 }
9286
9287 if (vsi->ch_rss_size)
9288 ice_vsi_cfg_rss_lut_key(vsi);
9289
9290exit:
9291 /* if error, reset the all_numtc and all_enatc */
9292 if (ret) {
9293 vsi->all_numtc = 0;
9294 vsi->all_enatc = 0;
9295 }
9296 /* resume VSI */
9297 ice_ena_vsi(vsi, true);
9298
9299 return ret;
9300}
9301
9302static LIST_HEAD(ice_block_cb_list);
9303
9304static int
9305ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9306 void *type_data)
9307{
9308 struct ice_netdev_priv *np = netdev_priv(netdev);
9309 struct ice_pf *pf = np->vsi->back;
9310 bool locked = false;
9311 int err;
9312
9313 switch (type) {
9314 case TC_SETUP_BLOCK:
9315 return flow_block_cb_setup_simple(type_data,
9316 &ice_block_cb_list,
9317 ice_setup_tc_block_cb,
9318 np, np, true);
9319 case TC_SETUP_QDISC_MQPRIO:
9320 if (ice_is_eswitch_mode_switchdev(pf)) {
9321 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9322 return -EOPNOTSUPP;
9323 }
9324
9325 if (pf->adev) {
9326 mutex_lock(&pf->adev_mutex);
9327 device_lock(&pf->adev->dev);
9328 locked = true;
9329 if (pf->adev->dev.driver) {
9330 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9331 err = -EBUSY;
9332 goto adev_unlock;
9333 }
9334 }
9335
9336 /* setup traffic classifier for receive side */
9337 mutex_lock(&pf->tc_mutex);
9338 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9339 mutex_unlock(&pf->tc_mutex);
9340
9341adev_unlock:
9342 if (locked) {
9343 device_unlock(&pf->adev->dev);
9344 mutex_unlock(&pf->adev_mutex);
9345 }
9346 return err;
9347 default:
9348 return -EOPNOTSUPP;
9349 }
9350 return -EOPNOTSUPP;
9351}
9352
9353static struct ice_indr_block_priv *
9354ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9355 struct net_device *netdev)
9356{
9357 struct ice_indr_block_priv *cb_priv;
9358
9359 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9360 if (!cb_priv->netdev)
9361 return NULL;
9362 if (cb_priv->netdev == netdev)
9363 return cb_priv;
9364 }
9365 return NULL;
9366}
9367
9368static int
9369ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9370 void *indr_priv)
9371{
9372 struct ice_indr_block_priv *priv = indr_priv;
9373 struct ice_netdev_priv *np = priv->np;
9374
9375 switch (type) {
9376 case TC_SETUP_CLSFLOWER:
9377 return ice_setup_tc_cls_flower(np, priv->netdev,
9378 (struct flow_cls_offload *)
9379 type_data);
9380 default:
9381 return -EOPNOTSUPP;
9382 }
9383}
9384
9385static int
9386ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9387 struct ice_netdev_priv *np,
9388 struct flow_block_offload *f, void *data,
9389 void (*cleanup)(struct flow_block_cb *block_cb))
9390{
9391 struct ice_indr_block_priv *indr_priv;
9392 struct flow_block_cb *block_cb;
9393
9394 if (!ice_is_tunnel_supported(netdev) &&
9395 !(is_vlan_dev(netdev) &&
9396 vlan_dev_real_dev(netdev) == np->vsi->netdev))
9397 return -EOPNOTSUPP;
9398
9399 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9400 return -EOPNOTSUPP;
9401
9402 switch (f->command) {
9403 case FLOW_BLOCK_BIND:
9404 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9405 if (indr_priv)
9406 return -EEXIST;
9407
9408 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9409 if (!indr_priv)
9410 return -ENOMEM;
9411
9412 indr_priv->netdev = netdev;
9413 indr_priv->np = np;
9414 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9415
9416 block_cb =
9417 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9418 indr_priv, indr_priv,
9419 ice_rep_indr_tc_block_unbind,
9420 f, netdev, sch, data, np,
9421 cleanup);
9422
9423 if (IS_ERR(block_cb)) {
9424 list_del(&indr_priv->list);
9425 kfree(indr_priv);
9426 return PTR_ERR(block_cb);
9427 }
9428 flow_block_cb_add(block_cb, f);
9429 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9430 break;
9431 case FLOW_BLOCK_UNBIND:
9432 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9433 if (!indr_priv)
9434 return -ENOENT;
9435
9436 block_cb = flow_block_cb_lookup(f->block,
9437 ice_indr_setup_block_cb,
9438 indr_priv);
9439 if (!block_cb)
9440 return -ENOENT;
9441
9442 flow_indr_block_cb_remove(block_cb, f);
9443
9444 list_del(&block_cb->driver_list);
9445 break;
9446 default:
9447 return -EOPNOTSUPP;
9448 }
9449 return 0;
9450}
9451
9452static int
9453ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9454 void *cb_priv, enum tc_setup_type type, void *type_data,
9455 void *data,
9456 void (*cleanup)(struct flow_block_cb *block_cb))
9457{
9458 switch (type) {
9459 case TC_SETUP_BLOCK:
9460 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9461 data, cleanup);
9462
9463 default:
9464 return -EOPNOTSUPP;
9465 }
9466}
9467
9468/**
9469 * ice_open - Called when a network interface becomes active
9470 * @netdev: network interface device structure
9471 *
9472 * The open entry point is called when a network interface is made
9473 * active by the system (IFF_UP). At this point all resources needed
9474 * for transmit and receive operations are allocated, the interrupt
9475 * handler is registered with the OS, the netdev watchdog is enabled,
9476 * and the stack is notified that the interface is ready.
9477 *
9478 * Returns 0 on success, negative value on failure
9479 */
9480int ice_open(struct net_device *netdev)
9481{
9482 struct ice_netdev_priv *np = netdev_priv(netdev);
9483 struct ice_pf *pf = np->vsi->back;
9484
9485 if (ice_is_reset_in_progress(pf->state)) {
9486 netdev_err(netdev, "can't open net device while reset is in progress");
9487 return -EBUSY;
9488 }
9489
9490 return ice_open_internal(netdev);
9491}
9492
9493/**
9494 * ice_open_internal - Called when a network interface becomes active
9495 * @netdev: network interface device structure
9496 *
9497 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9498 * handling routine
9499 *
9500 * Returns 0 on success, negative value on failure
9501 */
9502int ice_open_internal(struct net_device *netdev)
9503{
9504 struct ice_netdev_priv *np = netdev_priv(netdev);
9505 struct ice_vsi *vsi = np->vsi;
9506 struct ice_pf *pf = vsi->back;
9507 struct ice_port_info *pi;
9508 int err;
9509
9510 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9511 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9512 return -EIO;
9513 }
9514
9515 netif_carrier_off(netdev);
9516
9517 pi = vsi->port_info;
9518 err = ice_update_link_info(pi);
9519 if (err) {
9520 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9521 return err;
9522 }
9523
9524 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9525
9526 /* Set PHY if there is media, otherwise, turn off PHY */
9527 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9528 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9529 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9530 err = ice_init_phy_user_cfg(pi);
9531 if (err) {
9532 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9533 err);
9534 return err;
9535 }
9536 }
9537
9538 err = ice_configure_phy(vsi);
9539 if (err) {
9540 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9541 err);
9542 return err;
9543 }
9544 } else {
9545 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9546 ice_set_link(vsi, false);
9547 }
9548
9549 err = ice_vsi_open(vsi);
9550 if (err)
9551 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9552 vsi->vsi_num, vsi->vsw->sw_id);
9553
9554 /* Update existing tunnels information */
9555 udp_tunnel_get_rx_info(netdev);
9556
9557 return err;
9558}
9559
9560/**
9561 * ice_stop - Disables a network interface
9562 * @netdev: network interface device structure
9563 *
9564 * The stop entry point is called when an interface is de-activated by the OS,
9565 * and the netdevice enters the DOWN state. The hardware is still under the
9566 * driver's control, but the netdev interface is disabled.
9567 *
9568 * Returns success only - not allowed to fail
9569 */
9570int ice_stop(struct net_device *netdev)
9571{
9572 struct ice_netdev_priv *np = netdev_priv(netdev);
9573 struct ice_vsi *vsi = np->vsi;
9574 struct ice_pf *pf = vsi->back;
9575
9576 if (ice_is_reset_in_progress(pf->state)) {
9577 netdev_err(netdev, "can't stop net device while reset is in progress");
9578 return -EBUSY;
9579 }
9580
9581 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9582 int link_err = ice_force_phys_link_state(vsi, false);
9583
9584 if (link_err) {
9585 if (link_err == -ENOMEDIUM)
9586 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9587 vsi->vsi_num);
9588 else
9589 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9590 vsi->vsi_num, link_err);
9591
9592 ice_vsi_close(vsi);
9593 return -EIO;
9594 }
9595 }
9596
9597 ice_vsi_close(vsi);
9598
9599 return 0;
9600}
9601
9602/**
9603 * ice_features_check - Validate encapsulated packet conforms to limits
9604 * @skb: skb buffer
9605 * @netdev: This port's netdev
9606 * @features: Offload features that the stack believes apply
9607 */
9608static netdev_features_t
9609ice_features_check(struct sk_buff *skb,
9610 struct net_device __always_unused *netdev,
9611 netdev_features_t features)
9612{
9613 bool gso = skb_is_gso(skb);
9614 size_t len;
9615
9616 /* No point in doing any of this if neither checksum nor GSO are
9617 * being requested for this frame. We can rule out both by just
9618 * checking for CHECKSUM_PARTIAL
9619 */
9620 if (skb->ip_summed != CHECKSUM_PARTIAL)
9621 return features;
9622
9623 /* We cannot support GSO if the MSS is going to be less than
9624 * 64 bytes. If it is then we need to drop support for GSO.
9625 */
9626 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9627 features &= ~NETIF_F_GSO_MASK;
9628
9629 len = skb_network_offset(skb);
9630 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9631 goto out_rm_features;
9632
9633 len = skb_network_header_len(skb);
9634 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9635 goto out_rm_features;
9636
9637 if (skb->encapsulation) {
9638 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9639 * the case of IPIP frames, the transport header pointer is
9640 * after the inner header! So check to make sure that this
9641 * is a GRE or UDP_TUNNEL frame before doing that math.
9642 */
9643 if (gso && (skb_shinfo(skb)->gso_type &
9644 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9645 len = skb_inner_network_header(skb) -
9646 skb_transport_header(skb);
9647 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9648 goto out_rm_features;
9649 }
9650
9651 len = skb_inner_network_header_len(skb);
9652 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9653 goto out_rm_features;
9654 }
9655
9656 return features;
9657out_rm_features:
9658 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9659}
9660
9661static const struct net_device_ops ice_netdev_safe_mode_ops = {
9662 .ndo_open = ice_open,
9663 .ndo_stop = ice_stop,
9664 .ndo_start_xmit = ice_start_xmit,
9665 .ndo_set_mac_address = ice_set_mac_address,
9666 .ndo_validate_addr = eth_validate_addr,
9667 .ndo_change_mtu = ice_change_mtu,
9668 .ndo_get_stats64 = ice_get_stats64,
9669 .ndo_tx_timeout = ice_tx_timeout,
9670 .ndo_bpf = ice_xdp_safe_mode,
9671};
9672
9673static const struct net_device_ops ice_netdev_ops = {
9674 .ndo_open = ice_open,
9675 .ndo_stop = ice_stop,
9676 .ndo_start_xmit = ice_start_xmit,
9677 .ndo_select_queue = ice_select_queue,
9678 .ndo_features_check = ice_features_check,
9679 .ndo_fix_features = ice_fix_features,
9680 .ndo_set_rx_mode = ice_set_rx_mode,
9681 .ndo_set_mac_address = ice_set_mac_address,
9682 .ndo_validate_addr = eth_validate_addr,
9683 .ndo_change_mtu = ice_change_mtu,
9684 .ndo_get_stats64 = ice_get_stats64,
9685 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9686 .ndo_eth_ioctl = ice_eth_ioctl,
9687 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9688 .ndo_set_vf_mac = ice_set_vf_mac,
9689 .ndo_get_vf_config = ice_get_vf_cfg,
9690 .ndo_set_vf_trust = ice_set_vf_trust,
9691 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9692 .ndo_set_vf_link_state = ice_set_vf_link_state,
9693 .ndo_get_vf_stats = ice_get_vf_stats,
9694 .ndo_set_vf_rate = ice_set_vf_bw,
9695 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9696 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9697 .ndo_setup_tc = ice_setup_tc,
9698 .ndo_set_features = ice_set_features,
9699 .ndo_bridge_getlink = ice_bridge_getlink,
9700 .ndo_bridge_setlink = ice_bridge_setlink,
9701 .ndo_fdb_add = ice_fdb_add,
9702 .ndo_fdb_del = ice_fdb_del,
9703#ifdef CONFIG_RFS_ACCEL
9704 .ndo_rx_flow_steer = ice_rx_flow_steer,
9705#endif
9706 .ndo_tx_timeout = ice_tx_timeout,
9707 .ndo_bpf = ice_xdp,
9708 .ndo_xdp_xmit = ice_xdp_xmit,
9709 .ndo_xsk_wakeup = ice_xsk_wakeup,
9710};
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <generated/utsrelease.h>
9#include "ice.h"
10#include "ice_base.h"
11#include "ice_lib.h"
12#include "ice_fltr.h"
13#include "ice_dcb_lib.h"
14#include "ice_dcb_nl.h"
15#include "ice_devlink.h"
16
17#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
18static const char ice_driver_string[] = DRV_SUMMARY;
19static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
20
21/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
22#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
23#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
24
25MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
26MODULE_DESCRIPTION(DRV_SUMMARY);
27MODULE_LICENSE("GPL v2");
28MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
29
30static int debug = -1;
31module_param(debug, int, 0644);
32#ifndef CONFIG_DYNAMIC_DEBUG
33MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
34#else
35MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
36#endif /* !CONFIG_DYNAMIC_DEBUG */
37
38static struct workqueue_struct *ice_wq;
39static const struct net_device_ops ice_netdev_safe_mode_ops;
40static const struct net_device_ops ice_netdev_ops;
41static int ice_vsi_open(struct ice_vsi *vsi);
42
43static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
44
45static void ice_vsi_release_all(struct ice_pf *pf);
46
47/**
48 * ice_get_tx_pending - returns number of Tx descriptors not processed
49 * @ring: the ring of descriptors
50 */
51static u16 ice_get_tx_pending(struct ice_ring *ring)
52{
53 u16 head, tail;
54
55 head = ring->next_to_clean;
56 tail = ring->next_to_use;
57
58 if (head != tail)
59 return (head < tail) ?
60 tail - head : (tail + ring->count - head);
61 return 0;
62}
63
64/**
65 * ice_check_for_hang_subtask - check for and recover hung queues
66 * @pf: pointer to PF struct
67 */
68static void ice_check_for_hang_subtask(struct ice_pf *pf)
69{
70 struct ice_vsi *vsi = NULL;
71 struct ice_hw *hw;
72 unsigned int i;
73 int packets;
74 u32 v;
75
76 ice_for_each_vsi(pf, v)
77 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
78 vsi = pf->vsi[v];
79 break;
80 }
81
82 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
83 return;
84
85 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
86 return;
87
88 hw = &vsi->back->hw;
89
90 for (i = 0; i < vsi->num_txq; i++) {
91 struct ice_ring *tx_ring = vsi->tx_rings[i];
92
93 if (tx_ring && tx_ring->desc) {
94 /* If packet counter has not changed the queue is
95 * likely stalled, so force an interrupt for this
96 * queue.
97 *
98 * prev_pkt would be negative if there was no
99 * pending work.
100 */
101 packets = tx_ring->stats.pkts & INT_MAX;
102 if (tx_ring->tx_stats.prev_pkt == packets) {
103 /* Trigger sw interrupt to revive the queue */
104 ice_trigger_sw_intr(hw, tx_ring->q_vector);
105 continue;
106 }
107
108 /* Memory barrier between read of packet count and call
109 * to ice_get_tx_pending()
110 */
111 smp_rmb();
112 tx_ring->tx_stats.prev_pkt =
113 ice_get_tx_pending(tx_ring) ? packets : -1;
114 }
115 }
116}
117
118/**
119 * ice_init_mac_fltr - Set initial MAC filters
120 * @pf: board private structure
121 *
122 * Set initial set of MAC filters for PF VSI; configure filters for permanent
123 * address and broadcast address. If an error is encountered, netdevice will be
124 * unregistered.
125 */
126static int ice_init_mac_fltr(struct ice_pf *pf)
127{
128 enum ice_status status;
129 struct ice_vsi *vsi;
130 u8 *perm_addr;
131
132 vsi = ice_get_main_vsi(pf);
133 if (!vsi)
134 return -EINVAL;
135
136 perm_addr = vsi->port_info->mac.perm_addr;
137 status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
138 if (!status)
139 return 0;
140
141 /* We aren't useful with no MAC filters, so unregister if we
142 * had an error
143 */
144 if (vsi->netdev->reg_state == NETREG_REGISTERED) {
145 dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
146 ice_stat_str(status));
147 unregister_netdev(vsi->netdev);
148 free_netdev(vsi->netdev);
149 vsi->netdev = NULL;
150 }
151
152 return -EIO;
153}
154
155/**
156 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
157 * @netdev: the net device on which the sync is happening
158 * @addr: MAC address to sync
159 *
160 * This is a callback function which is called by the in kernel device sync
161 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
162 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
163 * MAC filters from the hardware.
164 */
165static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
166{
167 struct ice_netdev_priv *np = netdev_priv(netdev);
168 struct ice_vsi *vsi = np->vsi;
169
170 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
171 ICE_FWD_TO_VSI))
172 return -EINVAL;
173
174 return 0;
175}
176
177/**
178 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
179 * @netdev: the net device on which the unsync is happening
180 * @addr: MAC address to unsync
181 *
182 * This is a callback function which is called by the in kernel device unsync
183 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
184 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
185 * delete the MAC filters from the hardware.
186 */
187static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
188{
189 struct ice_netdev_priv *np = netdev_priv(netdev);
190 struct ice_vsi *vsi = np->vsi;
191
192 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
193 ICE_FWD_TO_VSI))
194 return -EINVAL;
195
196 return 0;
197}
198
199/**
200 * ice_vsi_fltr_changed - check if filter state changed
201 * @vsi: VSI to be checked
202 *
203 * returns true if filter state has changed, false otherwise.
204 */
205static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
206{
207 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
208 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
209 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
210}
211
212/**
213 * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
214 * @vsi: the VSI being configured
215 * @promisc_m: mask of promiscuous config bits
216 * @set_promisc: enable or disable promisc flag request
217 *
218 */
219static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
220{
221 struct ice_hw *hw = &vsi->back->hw;
222 enum ice_status status = 0;
223
224 if (vsi->type != ICE_VSI_PF)
225 return 0;
226
227 if (vsi->vlan_ena) {
228 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
229 set_promisc);
230 } else {
231 if (set_promisc)
232 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
233 0);
234 else
235 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
236 0);
237 }
238
239 if (status)
240 return -EIO;
241
242 return 0;
243}
244
245/**
246 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
247 * @vsi: ptr to the VSI
248 *
249 * Push any outstanding VSI filter changes through the AdminQ.
250 */
251static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
252{
253 struct device *dev = ice_pf_to_dev(vsi->back);
254 struct net_device *netdev = vsi->netdev;
255 bool promisc_forced_on = false;
256 struct ice_pf *pf = vsi->back;
257 struct ice_hw *hw = &pf->hw;
258 enum ice_status status = 0;
259 u32 changed_flags = 0;
260 u8 promisc_m;
261 int err = 0;
262
263 if (!vsi->netdev)
264 return -EINVAL;
265
266 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
267 usleep_range(1000, 2000);
268
269 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
270 vsi->current_netdev_flags = vsi->netdev->flags;
271
272 INIT_LIST_HEAD(&vsi->tmp_sync_list);
273 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
274
275 if (ice_vsi_fltr_changed(vsi)) {
276 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
277 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
278 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
279
280 /* grab the netdev's addr_list_lock */
281 netif_addr_lock_bh(netdev);
282 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
283 ice_add_mac_to_unsync_list);
284 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
285 ice_add_mac_to_unsync_list);
286 /* our temp lists are populated. release lock */
287 netif_addr_unlock_bh(netdev);
288 }
289
290 /* Remove MAC addresses in the unsync list */
291 status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
292 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
293 if (status) {
294 netdev_err(netdev, "Failed to delete MAC filters\n");
295 /* if we failed because of alloc failures, just bail */
296 if (status == ICE_ERR_NO_MEMORY) {
297 err = -ENOMEM;
298 goto out;
299 }
300 }
301
302 /* Add MAC addresses in the sync list */
303 status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
304 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
305 /* If filter is added successfully or already exists, do not go into
306 * 'if' condition and report it as error. Instead continue processing
307 * rest of the function.
308 */
309 if (status && status != ICE_ERR_ALREADY_EXISTS) {
310 netdev_err(netdev, "Failed to add MAC filters\n");
311 /* If there is no more space for new umac filters, VSI
312 * should go into promiscuous mode. There should be some
313 * space reserved for promiscuous filters.
314 */
315 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
316 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
317 vsi->state)) {
318 promisc_forced_on = true;
319 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
320 vsi->vsi_num);
321 } else {
322 err = -EIO;
323 goto out;
324 }
325 }
326 /* check for changes in promiscuous modes */
327 if (changed_flags & IFF_ALLMULTI) {
328 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
329 if (vsi->vlan_ena)
330 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
331 else
332 promisc_m = ICE_MCAST_PROMISC_BITS;
333
334 err = ice_cfg_promisc(vsi, promisc_m, true);
335 if (err) {
336 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
337 vsi->vsi_num);
338 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
339 goto out_promisc;
340 }
341 } else {
342 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
343 if (vsi->vlan_ena)
344 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
345 else
346 promisc_m = ICE_MCAST_PROMISC_BITS;
347
348 err = ice_cfg_promisc(vsi, promisc_m, false);
349 if (err) {
350 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
351 vsi->vsi_num);
352 vsi->current_netdev_flags |= IFF_ALLMULTI;
353 goto out_promisc;
354 }
355 }
356 }
357
358 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
359 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
360 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
361 if (vsi->current_netdev_flags & IFF_PROMISC) {
362 /* Apply Rx filter rule to get traffic from wire */
363 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
364 err = ice_set_dflt_vsi(pf->first_sw, vsi);
365 if (err && err != -EEXIST) {
366 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
367 err, vsi->vsi_num);
368 vsi->current_netdev_flags &=
369 ~IFF_PROMISC;
370 goto out_promisc;
371 }
372 ice_cfg_vlan_pruning(vsi, false, false);
373 }
374 } else {
375 /* Clear Rx filter to remove traffic from wire */
376 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
377 err = ice_clear_dflt_vsi(pf->first_sw);
378 if (err) {
379 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
380 err, vsi->vsi_num);
381 vsi->current_netdev_flags |=
382 IFF_PROMISC;
383 goto out_promisc;
384 }
385 if (vsi->num_vlan > 1)
386 ice_cfg_vlan_pruning(vsi, true, false);
387 }
388 }
389 }
390 goto exit;
391
392out_promisc:
393 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
394 goto exit;
395out:
396 /* if something went wrong then set the changed flag so we try again */
397 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
398 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
399exit:
400 clear_bit(__ICE_CFG_BUSY, vsi->state);
401 return err;
402}
403
404/**
405 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
406 * @pf: board private structure
407 */
408static void ice_sync_fltr_subtask(struct ice_pf *pf)
409{
410 int v;
411
412 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
413 return;
414
415 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
416
417 ice_for_each_vsi(pf, v)
418 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
419 ice_vsi_sync_fltr(pf->vsi[v])) {
420 /* come back and try again later */
421 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
422 break;
423 }
424}
425
426/**
427 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
428 * @pf: the PF
429 * @locked: is the rtnl_lock already held
430 */
431static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
432{
433 int v;
434
435 ice_for_each_vsi(pf, v)
436 if (pf->vsi[v])
437 ice_dis_vsi(pf->vsi[v], locked);
438}
439
440/**
441 * ice_prepare_for_reset - prep for the core to reset
442 * @pf: board private structure
443 *
444 * Inform or close all dependent features in prep for reset.
445 */
446static void
447ice_prepare_for_reset(struct ice_pf *pf)
448{
449 struct ice_hw *hw = &pf->hw;
450 unsigned int i;
451
452 /* already prepared for reset */
453 if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
454 return;
455
456 /* Notify VFs of impending reset */
457 if (ice_check_sq_alive(hw, &hw->mailboxq))
458 ice_vc_notify_reset(pf);
459
460 /* Disable VFs until reset is completed */
461 ice_for_each_vf(pf, i)
462 ice_set_vf_state_qs_dis(&pf->vf[i]);
463
464 /* clear SW filtering DB */
465 ice_clear_hw_tbls(hw);
466 /* disable the VSIs and their queues that are not already DOWN */
467 ice_pf_dis_all_vsi(pf, false);
468
469 if (hw->port_info)
470 ice_sched_clear_port(hw->port_info);
471
472 ice_shutdown_all_ctrlq(hw);
473
474 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
475}
476
477/**
478 * ice_do_reset - Initiate one of many types of resets
479 * @pf: board private structure
480 * @reset_type: reset type requested
481 * before this function was called.
482 */
483static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
484{
485 struct device *dev = ice_pf_to_dev(pf);
486 struct ice_hw *hw = &pf->hw;
487
488 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
489 WARN_ON(in_interrupt());
490
491 ice_prepare_for_reset(pf);
492
493 /* trigger the reset */
494 if (ice_reset(hw, reset_type)) {
495 dev_err(dev, "reset %d failed\n", reset_type);
496 set_bit(__ICE_RESET_FAILED, pf->state);
497 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
498 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
499 clear_bit(__ICE_PFR_REQ, pf->state);
500 clear_bit(__ICE_CORER_REQ, pf->state);
501 clear_bit(__ICE_GLOBR_REQ, pf->state);
502 return;
503 }
504
505 /* PFR is a bit of a special case because it doesn't result in an OICR
506 * interrupt. So for PFR, rebuild after the reset and clear the reset-
507 * associated state bits.
508 */
509 if (reset_type == ICE_RESET_PFR) {
510 pf->pfr_count++;
511 ice_rebuild(pf, reset_type);
512 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
513 clear_bit(__ICE_PFR_REQ, pf->state);
514 ice_reset_all_vfs(pf, true);
515 }
516}
517
518/**
519 * ice_reset_subtask - Set up for resetting the device and driver
520 * @pf: board private structure
521 */
522static void ice_reset_subtask(struct ice_pf *pf)
523{
524 enum ice_reset_req reset_type = ICE_RESET_INVAL;
525
526 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
527 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
528 * of reset is pending and sets bits in pf->state indicating the reset
529 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
530 * prepare for pending reset if not already (for PF software-initiated
531 * global resets the software should already be prepared for it as
532 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
533 * by firmware or software on other PFs, that bit is not set so prepare
534 * for the reset now), poll for reset done, rebuild and return.
535 */
536 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
537 /* Perform the largest reset requested */
538 if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
539 reset_type = ICE_RESET_CORER;
540 if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
541 reset_type = ICE_RESET_GLOBR;
542 if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
543 reset_type = ICE_RESET_EMPR;
544 /* return if no valid reset type requested */
545 if (reset_type == ICE_RESET_INVAL)
546 return;
547 ice_prepare_for_reset(pf);
548
549 /* make sure we are ready to rebuild */
550 if (ice_check_reset(&pf->hw)) {
551 set_bit(__ICE_RESET_FAILED, pf->state);
552 } else {
553 /* done with reset. start rebuild */
554 pf->hw.reset_ongoing = false;
555 ice_rebuild(pf, reset_type);
556 /* clear bit to resume normal operations, but
557 * ICE_NEEDS_RESTART bit is set in case rebuild failed
558 */
559 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
560 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
561 clear_bit(__ICE_PFR_REQ, pf->state);
562 clear_bit(__ICE_CORER_REQ, pf->state);
563 clear_bit(__ICE_GLOBR_REQ, pf->state);
564 ice_reset_all_vfs(pf, true);
565 }
566
567 return;
568 }
569
570 /* No pending resets to finish processing. Check for new resets */
571 if (test_bit(__ICE_PFR_REQ, pf->state))
572 reset_type = ICE_RESET_PFR;
573 if (test_bit(__ICE_CORER_REQ, pf->state))
574 reset_type = ICE_RESET_CORER;
575 if (test_bit(__ICE_GLOBR_REQ, pf->state))
576 reset_type = ICE_RESET_GLOBR;
577 /* If no valid reset type requested just return */
578 if (reset_type == ICE_RESET_INVAL)
579 return;
580
581 /* reset if not already down or busy */
582 if (!test_bit(__ICE_DOWN, pf->state) &&
583 !test_bit(__ICE_CFG_BUSY, pf->state)) {
584 ice_do_reset(pf, reset_type);
585 }
586}
587
588/**
589 * ice_print_topo_conflict - print topology conflict message
590 * @vsi: the VSI whose topology status is being checked
591 */
592static void ice_print_topo_conflict(struct ice_vsi *vsi)
593{
594 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
595 case ICE_AQ_LINK_TOPO_CONFLICT:
596 case ICE_AQ_LINK_MEDIA_CONFLICT:
597 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
598 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
599 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
600 netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
601 break;
602 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
603 netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
604 break;
605 default:
606 break;
607 }
608}
609
610/**
611 * ice_print_link_msg - print link up or down message
612 * @vsi: the VSI whose link status is being queried
613 * @isup: boolean for if the link is now up or down
614 */
615void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
616{
617 struct ice_aqc_get_phy_caps_data *caps;
618 const char *an_advertised;
619 enum ice_status status;
620 const char *fec_req;
621 const char *speed;
622 const char *fec;
623 const char *fc;
624 const char *an;
625
626 if (!vsi)
627 return;
628
629 if (vsi->current_isup == isup)
630 return;
631
632 vsi->current_isup = isup;
633
634 if (!isup) {
635 netdev_info(vsi->netdev, "NIC Link is Down\n");
636 return;
637 }
638
639 switch (vsi->port_info->phy.link_info.link_speed) {
640 case ICE_AQ_LINK_SPEED_100GB:
641 speed = "100 G";
642 break;
643 case ICE_AQ_LINK_SPEED_50GB:
644 speed = "50 G";
645 break;
646 case ICE_AQ_LINK_SPEED_40GB:
647 speed = "40 G";
648 break;
649 case ICE_AQ_LINK_SPEED_25GB:
650 speed = "25 G";
651 break;
652 case ICE_AQ_LINK_SPEED_20GB:
653 speed = "20 G";
654 break;
655 case ICE_AQ_LINK_SPEED_10GB:
656 speed = "10 G";
657 break;
658 case ICE_AQ_LINK_SPEED_5GB:
659 speed = "5 G";
660 break;
661 case ICE_AQ_LINK_SPEED_2500MB:
662 speed = "2.5 G";
663 break;
664 case ICE_AQ_LINK_SPEED_1000MB:
665 speed = "1 G";
666 break;
667 case ICE_AQ_LINK_SPEED_100MB:
668 speed = "100 M";
669 break;
670 default:
671 speed = "Unknown";
672 break;
673 }
674
675 switch (vsi->port_info->fc.current_mode) {
676 case ICE_FC_FULL:
677 fc = "Rx/Tx";
678 break;
679 case ICE_FC_TX_PAUSE:
680 fc = "Tx";
681 break;
682 case ICE_FC_RX_PAUSE:
683 fc = "Rx";
684 break;
685 case ICE_FC_NONE:
686 fc = "None";
687 break;
688 default:
689 fc = "Unknown";
690 break;
691 }
692
693 /* Get FEC mode based on negotiated link info */
694 switch (vsi->port_info->phy.link_info.fec_info) {
695 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
696 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
697 fec = "RS-FEC";
698 break;
699 case ICE_AQ_LINK_25G_KR_FEC_EN:
700 fec = "FC-FEC/BASE-R";
701 break;
702 default:
703 fec = "NONE";
704 break;
705 }
706
707 /* check if autoneg completed, might be false due to not supported */
708 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
709 an = "True";
710 else
711 an = "False";
712
713 /* Get FEC mode requested based on PHY caps last SW configuration */
714 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
715 if (!caps) {
716 fec_req = "Unknown";
717 an_advertised = "Unknown";
718 goto done;
719 }
720
721 status = ice_aq_get_phy_caps(vsi->port_info, false,
722 ICE_AQC_REPORT_SW_CFG, caps, NULL);
723 if (status)
724 netdev_info(vsi->netdev, "Get phy capability failed.\n");
725
726 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
727
728 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
729 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
730 fec_req = "RS-FEC";
731 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
732 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
733 fec_req = "FC-FEC/BASE-R";
734 else
735 fec_req = "NONE";
736
737 kfree(caps);
738
739done:
740 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
741 speed, fec_req, fec, an_advertised, an, fc);
742 ice_print_topo_conflict(vsi);
743}
744
745/**
746 * ice_vsi_link_event - update the VSI's netdev
747 * @vsi: the VSI on which the link event occurred
748 * @link_up: whether or not the VSI needs to be set up or down
749 */
750static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
751{
752 if (!vsi)
753 return;
754
755 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
756 return;
757
758 if (vsi->type == ICE_VSI_PF) {
759 if (link_up == netif_carrier_ok(vsi->netdev))
760 return;
761
762 if (link_up) {
763 netif_carrier_on(vsi->netdev);
764 netif_tx_wake_all_queues(vsi->netdev);
765 } else {
766 netif_carrier_off(vsi->netdev);
767 netif_tx_stop_all_queues(vsi->netdev);
768 }
769 }
770}
771
772/**
773 * ice_set_dflt_mib - send a default config MIB to the FW
774 * @pf: private PF struct
775 *
776 * This function sends a default configuration MIB to the FW.
777 *
778 * If this function errors out at any point, the driver is still able to
779 * function. The main impact is that LFC may not operate as expected.
780 * Therefore an error state in this function should be treated with a DBG
781 * message and continue on with driver rebuild/reenable.
782 */
783static void ice_set_dflt_mib(struct ice_pf *pf)
784{
785 struct device *dev = ice_pf_to_dev(pf);
786 u8 mib_type, *buf, *lldpmib = NULL;
787 u16 len, typelen, offset = 0;
788 struct ice_lldp_org_tlv *tlv;
789 struct ice_hw *hw;
790 u32 ouisubtype;
791
792 if (!pf) {
793 dev_dbg(dev, "%s NULL pf pointer\n", __func__);
794 return;
795 }
796
797 hw = &pf->hw;
798 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
799 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
800 if (!lldpmib) {
801 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
802 __func__);
803 return;
804 }
805
806 /* Add ETS CFG TLV */
807 tlv = (struct ice_lldp_org_tlv *)lldpmib;
808 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
809 ICE_IEEE_ETS_TLV_LEN);
810 tlv->typelen = htons(typelen);
811 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
812 ICE_IEEE_SUBTYPE_ETS_CFG);
813 tlv->ouisubtype = htonl(ouisubtype);
814
815 buf = tlv->tlvinfo;
816 buf[0] = 0;
817
818 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
819 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
820 * Octets 13 - 20 are TSA values - leave as zeros
821 */
822 buf[5] = 0x64;
823 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
824 offset += len + 2;
825 tlv = (struct ice_lldp_org_tlv *)
826 ((char *)tlv + sizeof(tlv->typelen) + len);
827
828 /* Add ETS REC TLV */
829 buf = tlv->tlvinfo;
830 tlv->typelen = htons(typelen);
831
832 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
833 ICE_IEEE_SUBTYPE_ETS_REC);
834 tlv->ouisubtype = htonl(ouisubtype);
835
836 /* First octet of buf is reserved
837 * Octets 1 - 4 map UP to TC - all UPs map to zero
838 * Octets 5 - 12 are BW values - set TC 0 to 100%.
839 * Octets 13 - 20 are TSA value - leave as zeros
840 */
841 buf[5] = 0x64;
842 offset += len + 2;
843 tlv = (struct ice_lldp_org_tlv *)
844 ((char *)tlv + sizeof(tlv->typelen) + len);
845
846 /* Add PFC CFG TLV */
847 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
848 ICE_IEEE_PFC_TLV_LEN);
849 tlv->typelen = htons(typelen);
850
851 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
852 ICE_IEEE_SUBTYPE_PFC_CFG);
853 tlv->ouisubtype = htonl(ouisubtype);
854
855 /* Octet 1 left as all zeros - PFC disabled */
856 buf[0] = 0x08;
857 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
858 offset += len + 2;
859
860 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
861 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
862
863 kfree(lldpmib);
864}
865
866/**
867 * ice_link_event - process the link event
868 * @pf: PF that the link event is associated with
869 * @pi: port_info for the port that the link event is associated with
870 * @link_up: true if the physical link is up and false if it is down
871 * @link_speed: current link speed received from the link event
872 *
873 * Returns 0 on success and negative on failure
874 */
875static int
876ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
877 u16 link_speed)
878{
879 struct device *dev = ice_pf_to_dev(pf);
880 struct ice_phy_info *phy_info;
881 struct ice_vsi *vsi;
882 u16 old_link_speed;
883 bool old_link;
884 int result;
885
886 phy_info = &pi->phy;
887 phy_info->link_info_old = phy_info->link_info;
888
889 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
890 old_link_speed = phy_info->link_info_old.link_speed;
891
892 /* update the link info structures and re-enable link events,
893 * don't bail on failure due to other book keeping needed
894 */
895 result = ice_update_link_info(pi);
896 if (result)
897 dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
898 pi->lport);
899
900 /* Check if the link state is up after updating link info, and treat
901 * this event as an UP event since the link is actually UP now.
902 */
903 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
904 link_up = true;
905
906 vsi = ice_get_main_vsi(pf);
907 if (!vsi || !vsi->port_info)
908 return -EINVAL;
909
910 /* turn off PHY if media was removed */
911 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
912 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
913 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
914
915 result = ice_aq_set_link_restart_an(pi, false, NULL);
916 if (result) {
917 dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
918 vsi->vsi_num, result);
919 return result;
920 }
921 }
922
923 /* if the old link up/down and speed is the same as the new */
924 if (link_up == old_link && link_speed == old_link_speed)
925 return result;
926
927 if (ice_is_dcb_active(pf)) {
928 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
929 ice_dcb_rebuild(pf);
930 } else {
931 if (link_up)
932 ice_set_dflt_mib(pf);
933 }
934 ice_vsi_link_event(vsi, link_up);
935 ice_print_link_msg(vsi, link_up);
936
937 ice_vc_notify_link_state(pf);
938
939 return result;
940}
941
942/**
943 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
944 * @pf: board private structure
945 */
946static void ice_watchdog_subtask(struct ice_pf *pf)
947{
948 int i;
949
950 /* if interface is down do nothing */
951 if (test_bit(__ICE_DOWN, pf->state) ||
952 test_bit(__ICE_CFG_BUSY, pf->state))
953 return;
954
955 /* make sure we don't do these things too often */
956 if (time_before(jiffies,
957 pf->serv_tmr_prev + pf->serv_tmr_period))
958 return;
959
960 pf->serv_tmr_prev = jiffies;
961
962 /* Update the stats for active netdevs so the network stack
963 * can look at updated numbers whenever it cares to
964 */
965 ice_update_pf_stats(pf);
966 ice_for_each_vsi(pf, i)
967 if (pf->vsi[i] && pf->vsi[i]->netdev)
968 ice_update_vsi_stats(pf->vsi[i]);
969}
970
971/**
972 * ice_init_link_events - enable/initialize link events
973 * @pi: pointer to the port_info instance
974 *
975 * Returns -EIO on failure, 0 on success
976 */
977static int ice_init_link_events(struct ice_port_info *pi)
978{
979 u16 mask;
980
981 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
982 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
983
984 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
985 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
986 pi->lport);
987 return -EIO;
988 }
989
990 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
991 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
992 pi->lport);
993 return -EIO;
994 }
995
996 return 0;
997}
998
999/**
1000 * ice_handle_link_event - handle link event via ARQ
1001 * @pf: PF that the link event is associated with
1002 * @event: event structure containing link status info
1003 */
1004static int
1005ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1006{
1007 struct ice_aqc_get_link_status_data *link_data;
1008 struct ice_port_info *port_info;
1009 int status;
1010
1011 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1012 port_info = pf->hw.port_info;
1013 if (!port_info)
1014 return -EINVAL;
1015
1016 status = ice_link_event(pf, port_info,
1017 !!(link_data->link_info & ICE_AQ_LINK_UP),
1018 le16_to_cpu(link_data->link_speed));
1019 if (status)
1020 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1021 status);
1022
1023 return status;
1024}
1025
1026enum ice_aq_task_state {
1027 ICE_AQ_TASK_WAITING = 0,
1028 ICE_AQ_TASK_COMPLETE,
1029 ICE_AQ_TASK_CANCELED,
1030};
1031
1032struct ice_aq_task {
1033 struct hlist_node entry;
1034
1035 u16 opcode;
1036 struct ice_rq_event_info *event;
1037 enum ice_aq_task_state state;
1038};
1039
1040/**
1041 * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
1042 * @pf: pointer to the PF private structure
1043 * @opcode: the opcode to wait for
1044 * @timeout: how long to wait, in jiffies
1045 * @event: storage for the event info
1046 *
1047 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1048 * current thread will be put to sleep until the specified event occurs or
1049 * until the given timeout is reached.
1050 *
1051 * To obtain only the descriptor contents, pass an event without an allocated
1052 * msg_buf. If the complete data buffer is desired, allocate the
1053 * event->msg_buf with enough space ahead of time.
1054 *
1055 * Returns: zero on success, or a negative error code on failure.
1056 */
1057int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1058 struct ice_rq_event_info *event)
1059{
1060 struct ice_aq_task *task;
1061 long ret;
1062 int err;
1063
1064 task = kzalloc(sizeof(*task), GFP_KERNEL);
1065 if (!task)
1066 return -ENOMEM;
1067
1068 INIT_HLIST_NODE(&task->entry);
1069 task->opcode = opcode;
1070 task->event = event;
1071 task->state = ICE_AQ_TASK_WAITING;
1072
1073 spin_lock_bh(&pf->aq_wait_lock);
1074 hlist_add_head(&task->entry, &pf->aq_wait_list);
1075 spin_unlock_bh(&pf->aq_wait_lock);
1076
1077 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1078 timeout);
1079 switch (task->state) {
1080 case ICE_AQ_TASK_WAITING:
1081 err = ret < 0 ? ret : -ETIMEDOUT;
1082 break;
1083 case ICE_AQ_TASK_CANCELED:
1084 err = ret < 0 ? ret : -ECANCELED;
1085 break;
1086 case ICE_AQ_TASK_COMPLETE:
1087 err = ret < 0 ? ret : 0;
1088 break;
1089 default:
1090 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1091 err = -EINVAL;
1092 break;
1093 }
1094
1095 spin_lock_bh(&pf->aq_wait_lock);
1096 hlist_del(&task->entry);
1097 spin_unlock_bh(&pf->aq_wait_lock);
1098 kfree(task);
1099
1100 return err;
1101}
1102
1103/**
1104 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1105 * @pf: pointer to the PF private structure
1106 * @opcode: the opcode of the event
1107 * @event: the event to check
1108 *
1109 * Loops over the current list of pending threads waiting for an AdminQ event.
1110 * For each matching task, copy the contents of the event into the task
1111 * structure and wake up the thread.
1112 *
1113 * If multiple threads wait for the same opcode, they will all be woken up.
1114 *
1115 * Note that event->msg_buf will only be duplicated if the event has a buffer
1116 * with enough space already allocated. Otherwise, only the descriptor and
1117 * message length will be copied.
1118 *
1119 * Returns: true if an event was found, false otherwise
1120 */
1121static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1122 struct ice_rq_event_info *event)
1123{
1124 struct ice_aq_task *task;
1125 bool found = false;
1126
1127 spin_lock_bh(&pf->aq_wait_lock);
1128 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1129 if (task->state || task->opcode != opcode)
1130 continue;
1131
1132 memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1133 task->event->msg_len = event->msg_len;
1134
1135 /* Only copy the data buffer if a destination was set */
1136 if (task->event->msg_buf &&
1137 task->event->buf_len > event->buf_len) {
1138 memcpy(task->event->msg_buf, event->msg_buf,
1139 event->buf_len);
1140 task->event->buf_len = event->buf_len;
1141 }
1142
1143 task->state = ICE_AQ_TASK_COMPLETE;
1144 found = true;
1145 }
1146 spin_unlock_bh(&pf->aq_wait_lock);
1147
1148 if (found)
1149 wake_up(&pf->aq_wait_queue);
1150}
1151
1152/**
1153 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1154 * @pf: the PF private structure
1155 *
1156 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1157 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1158 */
1159static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1160{
1161 struct ice_aq_task *task;
1162
1163 spin_lock_bh(&pf->aq_wait_lock);
1164 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1165 task->state = ICE_AQ_TASK_CANCELED;
1166 spin_unlock_bh(&pf->aq_wait_lock);
1167
1168 wake_up(&pf->aq_wait_queue);
1169}
1170
1171/**
1172 * __ice_clean_ctrlq - helper function to clean controlq rings
1173 * @pf: ptr to struct ice_pf
1174 * @q_type: specific Control queue type
1175 */
1176static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1177{
1178 struct device *dev = ice_pf_to_dev(pf);
1179 struct ice_rq_event_info event;
1180 struct ice_hw *hw = &pf->hw;
1181 struct ice_ctl_q_info *cq;
1182 u16 pending, i = 0;
1183 const char *qtype;
1184 u32 oldval, val;
1185
1186 /* Do not clean control queue if/when PF reset fails */
1187 if (test_bit(__ICE_RESET_FAILED, pf->state))
1188 return 0;
1189
1190 switch (q_type) {
1191 case ICE_CTL_Q_ADMIN:
1192 cq = &hw->adminq;
1193 qtype = "Admin";
1194 break;
1195 case ICE_CTL_Q_MAILBOX:
1196 cq = &hw->mailboxq;
1197 qtype = "Mailbox";
1198 break;
1199 default:
1200 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1201 return 0;
1202 }
1203
1204 /* check for error indications - PF_xx_AxQLEN register layout for
1205 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1206 */
1207 val = rd32(hw, cq->rq.len);
1208 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1209 PF_FW_ARQLEN_ARQCRIT_M)) {
1210 oldval = val;
1211 if (val & PF_FW_ARQLEN_ARQVFE_M)
1212 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1213 qtype);
1214 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1215 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1216 qtype);
1217 }
1218 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1219 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1220 qtype);
1221 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1222 PF_FW_ARQLEN_ARQCRIT_M);
1223 if (oldval != val)
1224 wr32(hw, cq->rq.len, val);
1225 }
1226
1227 val = rd32(hw, cq->sq.len);
1228 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1229 PF_FW_ATQLEN_ATQCRIT_M)) {
1230 oldval = val;
1231 if (val & PF_FW_ATQLEN_ATQVFE_M)
1232 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1233 qtype);
1234 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1235 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1236 qtype);
1237 }
1238 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1239 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1240 qtype);
1241 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1242 PF_FW_ATQLEN_ATQCRIT_M);
1243 if (oldval != val)
1244 wr32(hw, cq->sq.len, val);
1245 }
1246
1247 event.buf_len = cq->rq_buf_size;
1248 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1249 if (!event.msg_buf)
1250 return 0;
1251
1252 do {
1253 enum ice_status ret;
1254 u16 opcode;
1255
1256 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1257 if (ret == ICE_ERR_AQ_NO_WORK)
1258 break;
1259 if (ret) {
1260 dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1261 ice_stat_str(ret));
1262 break;
1263 }
1264
1265 opcode = le16_to_cpu(event.desc.opcode);
1266
1267 /* Notify any thread that might be waiting for this event */
1268 ice_aq_check_events(pf, opcode, &event);
1269
1270 switch (opcode) {
1271 case ice_aqc_opc_get_link_status:
1272 if (ice_handle_link_event(pf, &event))
1273 dev_err(dev, "Could not handle link event\n");
1274 break;
1275 case ice_aqc_opc_event_lan_overflow:
1276 ice_vf_lan_overflow_event(pf, &event);
1277 break;
1278 case ice_mbx_opc_send_msg_to_pf:
1279 ice_vc_process_vf_msg(pf, &event);
1280 break;
1281 case ice_aqc_opc_fw_logging:
1282 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1283 break;
1284 case ice_aqc_opc_lldp_set_mib_change:
1285 ice_dcb_process_lldp_set_mib_change(pf, &event);
1286 break;
1287 default:
1288 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1289 qtype, opcode);
1290 break;
1291 }
1292 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1293
1294 kfree(event.msg_buf);
1295
1296 return pending && (i == ICE_DFLT_IRQ_WORK);
1297}
1298
1299/**
1300 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1301 * @hw: pointer to hardware info
1302 * @cq: control queue information
1303 *
1304 * returns true if there are pending messages in a queue, false if there aren't
1305 */
1306static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1307{
1308 u16 ntu;
1309
1310 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1311 return cq->rq.next_to_clean != ntu;
1312}
1313
1314/**
1315 * ice_clean_adminq_subtask - clean the AdminQ rings
1316 * @pf: board private structure
1317 */
1318static void ice_clean_adminq_subtask(struct ice_pf *pf)
1319{
1320 struct ice_hw *hw = &pf->hw;
1321
1322 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1323 return;
1324
1325 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1326 return;
1327
1328 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1329
1330 /* There might be a situation where new messages arrive to a control
1331 * queue between processing the last message and clearing the
1332 * EVENT_PENDING bit. So before exiting, check queue head again (using
1333 * ice_ctrlq_pending) and process new messages if any.
1334 */
1335 if (ice_ctrlq_pending(hw, &hw->adminq))
1336 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1337
1338 ice_flush(hw);
1339}
1340
1341/**
1342 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1343 * @pf: board private structure
1344 */
1345static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1346{
1347 struct ice_hw *hw = &pf->hw;
1348
1349 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1350 return;
1351
1352 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1353 return;
1354
1355 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1356
1357 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1358 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1359
1360 ice_flush(hw);
1361}
1362
1363/**
1364 * ice_service_task_schedule - schedule the service task to wake up
1365 * @pf: board private structure
1366 *
1367 * If not already scheduled, this puts the task into the work queue.
1368 */
1369void ice_service_task_schedule(struct ice_pf *pf)
1370{
1371 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1372 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1373 !test_bit(__ICE_NEEDS_RESTART, pf->state))
1374 queue_work(ice_wq, &pf->serv_task);
1375}
1376
1377/**
1378 * ice_service_task_complete - finish up the service task
1379 * @pf: board private structure
1380 */
1381static void ice_service_task_complete(struct ice_pf *pf)
1382{
1383 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1384
1385 /* force memory (pf->state) to sync before next service task */
1386 smp_mb__before_atomic();
1387 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1388}
1389
1390/**
1391 * ice_service_task_stop - stop service task and cancel works
1392 * @pf: board private structure
1393 *
1394 * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
1395 * 1 otherwise.
1396 */
1397static int ice_service_task_stop(struct ice_pf *pf)
1398{
1399 int ret;
1400
1401 ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
1402
1403 if (pf->serv_tmr.function)
1404 del_timer_sync(&pf->serv_tmr);
1405 if (pf->serv_task.func)
1406 cancel_work_sync(&pf->serv_task);
1407
1408 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1409 return ret;
1410}
1411
1412/**
1413 * ice_service_task_restart - restart service task and schedule works
1414 * @pf: board private structure
1415 *
1416 * This function is needed for suspend and resume works (e.g WoL scenario)
1417 */
1418static void ice_service_task_restart(struct ice_pf *pf)
1419{
1420 clear_bit(__ICE_SERVICE_DIS, pf->state);
1421 ice_service_task_schedule(pf);
1422}
1423
1424/**
1425 * ice_service_timer - timer callback to schedule service task
1426 * @t: pointer to timer_list
1427 */
1428static void ice_service_timer(struct timer_list *t)
1429{
1430 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1431
1432 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1433 ice_service_task_schedule(pf);
1434}
1435
1436/**
1437 * ice_handle_mdd_event - handle malicious driver detect event
1438 * @pf: pointer to the PF structure
1439 *
1440 * Called from service task. OICR interrupt handler indicates MDD event.
1441 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1442 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1443 * disable the queue, the PF can be configured to reset the VF using ethtool
1444 * private flag mdd-auto-reset-vf.
1445 */
1446static void ice_handle_mdd_event(struct ice_pf *pf)
1447{
1448 struct device *dev = ice_pf_to_dev(pf);
1449 struct ice_hw *hw = &pf->hw;
1450 unsigned int i;
1451 u32 reg;
1452
1453 if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
1454 /* Since the VF MDD event logging is rate limited, check if
1455 * there are pending MDD events.
1456 */
1457 ice_print_vfs_mdd_events(pf);
1458 return;
1459 }
1460
1461 /* find what triggered an MDD event */
1462 reg = rd32(hw, GL_MDET_TX_PQM);
1463 if (reg & GL_MDET_TX_PQM_VALID_M) {
1464 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1465 GL_MDET_TX_PQM_PF_NUM_S;
1466 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1467 GL_MDET_TX_PQM_VF_NUM_S;
1468 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1469 GL_MDET_TX_PQM_MAL_TYPE_S;
1470 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1471 GL_MDET_TX_PQM_QNUM_S);
1472
1473 if (netif_msg_tx_err(pf))
1474 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1475 event, queue, pf_num, vf_num);
1476 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1477 }
1478
1479 reg = rd32(hw, GL_MDET_TX_TCLAN);
1480 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1481 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1482 GL_MDET_TX_TCLAN_PF_NUM_S;
1483 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1484 GL_MDET_TX_TCLAN_VF_NUM_S;
1485 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1486 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1487 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1488 GL_MDET_TX_TCLAN_QNUM_S);
1489
1490 if (netif_msg_tx_err(pf))
1491 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1492 event, queue, pf_num, vf_num);
1493 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1494 }
1495
1496 reg = rd32(hw, GL_MDET_RX);
1497 if (reg & GL_MDET_RX_VALID_M) {
1498 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1499 GL_MDET_RX_PF_NUM_S;
1500 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1501 GL_MDET_RX_VF_NUM_S;
1502 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1503 GL_MDET_RX_MAL_TYPE_S;
1504 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1505 GL_MDET_RX_QNUM_S);
1506
1507 if (netif_msg_rx_err(pf))
1508 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1509 event, queue, pf_num, vf_num);
1510 wr32(hw, GL_MDET_RX, 0xffffffff);
1511 }
1512
1513 /* check to see if this PF caused an MDD event */
1514 reg = rd32(hw, PF_MDET_TX_PQM);
1515 if (reg & PF_MDET_TX_PQM_VALID_M) {
1516 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1517 if (netif_msg_tx_err(pf))
1518 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1519 }
1520
1521 reg = rd32(hw, PF_MDET_TX_TCLAN);
1522 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1523 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1524 if (netif_msg_tx_err(pf))
1525 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1526 }
1527
1528 reg = rd32(hw, PF_MDET_RX);
1529 if (reg & PF_MDET_RX_VALID_M) {
1530 wr32(hw, PF_MDET_RX, 0xFFFF);
1531 if (netif_msg_rx_err(pf))
1532 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1533 }
1534
1535 /* Check to see if one of the VFs caused an MDD event, and then
1536 * increment counters and set print pending
1537 */
1538 ice_for_each_vf(pf, i) {
1539 struct ice_vf *vf = &pf->vf[i];
1540
1541 reg = rd32(hw, VP_MDET_TX_PQM(i));
1542 if (reg & VP_MDET_TX_PQM_VALID_M) {
1543 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1544 vf->mdd_tx_events.count++;
1545 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1546 if (netif_msg_tx_err(pf))
1547 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1548 i);
1549 }
1550
1551 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1552 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1553 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1554 vf->mdd_tx_events.count++;
1555 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1556 if (netif_msg_tx_err(pf))
1557 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1558 i);
1559 }
1560
1561 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1562 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1563 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1564 vf->mdd_tx_events.count++;
1565 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1566 if (netif_msg_tx_err(pf))
1567 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1568 i);
1569 }
1570
1571 reg = rd32(hw, VP_MDET_RX(i));
1572 if (reg & VP_MDET_RX_VALID_M) {
1573 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1574 vf->mdd_rx_events.count++;
1575 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1576 if (netif_msg_rx_err(pf))
1577 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1578 i);
1579
1580 /* Since the queue is disabled on VF Rx MDD events, the
1581 * PF can be configured to reset the VF through ethtool
1582 * private flag mdd-auto-reset-vf.
1583 */
1584 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1585 /* VF MDD event counters will be cleared by
1586 * reset, so print the event prior to reset.
1587 */
1588 ice_print_vf_rx_mdd_event(vf);
1589 ice_reset_vf(&pf->vf[i], false);
1590 }
1591 }
1592 }
1593
1594 ice_print_vfs_mdd_events(pf);
1595}
1596
1597/**
1598 * ice_force_phys_link_state - Force the physical link state
1599 * @vsi: VSI to force the physical link state to up/down
1600 * @link_up: true/false indicates to set the physical link to up/down
1601 *
1602 * Force the physical link state by getting the current PHY capabilities from
1603 * hardware and setting the PHY config based on the determined capabilities. If
1604 * link changes a link event will be triggered because both the Enable Automatic
1605 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1606 *
1607 * Returns 0 on success, negative on failure
1608 */
1609static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1610{
1611 struct ice_aqc_get_phy_caps_data *pcaps;
1612 struct ice_aqc_set_phy_cfg_data *cfg;
1613 struct ice_port_info *pi;
1614 struct device *dev;
1615 int retcode;
1616
1617 if (!vsi || !vsi->port_info || !vsi->back)
1618 return -EINVAL;
1619 if (vsi->type != ICE_VSI_PF)
1620 return 0;
1621
1622 dev = ice_pf_to_dev(vsi->back);
1623
1624 pi = vsi->port_info;
1625
1626 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1627 if (!pcaps)
1628 return -ENOMEM;
1629
1630 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1631 NULL);
1632 if (retcode) {
1633 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1634 vsi->vsi_num, retcode);
1635 retcode = -EIO;
1636 goto out;
1637 }
1638
1639 /* No change in link */
1640 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1641 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1642 goto out;
1643
1644 /* Use the current user PHY configuration. The current user PHY
1645 * configuration is initialized during probe from PHY capabilities
1646 * software mode, and updated on set PHY configuration.
1647 */
1648 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1649 if (!cfg) {
1650 retcode = -ENOMEM;
1651 goto out;
1652 }
1653
1654 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1655 if (link_up)
1656 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1657 else
1658 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1659
1660 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1661 if (retcode) {
1662 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1663 vsi->vsi_num, retcode);
1664 retcode = -EIO;
1665 }
1666
1667 kfree(cfg);
1668out:
1669 kfree(pcaps);
1670 return retcode;
1671}
1672
1673/**
1674 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1675 * @pi: port info structure
1676 *
1677 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1678 */
1679static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1680{
1681 struct ice_aqc_get_phy_caps_data *pcaps;
1682 struct ice_pf *pf = pi->hw->back;
1683 enum ice_status status;
1684 int err = 0;
1685
1686 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1687 if (!pcaps)
1688 return -ENOMEM;
1689
1690 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
1691 NULL);
1692
1693 if (status) {
1694 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1695 err = -EIO;
1696 goto out;
1697 }
1698
1699 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1700 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1701
1702out:
1703 kfree(pcaps);
1704 return err;
1705}
1706
1707/**
1708 * ice_init_link_dflt_override - Initialize link default override
1709 * @pi: port info structure
1710 *
1711 * Initialize link default override and PHY total port shutdown during probe
1712 */
1713static void ice_init_link_dflt_override(struct ice_port_info *pi)
1714{
1715 struct ice_link_default_override_tlv *ldo;
1716 struct ice_pf *pf = pi->hw->back;
1717
1718 ldo = &pf->link_dflt_override;
1719 if (ice_get_link_default_override(ldo, pi))
1720 return;
1721
1722 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1723 return;
1724
1725 /* Enable Total Port Shutdown (override/replace link-down-on-close
1726 * ethtool private flag) for ports with Port Disable bit set.
1727 */
1728 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1729 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1730}
1731
1732/**
1733 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1734 * @pi: port info structure
1735 *
1736 * If default override is enabled, initialized the user PHY cfg speed and FEC
1737 * settings using the default override mask from the NVM.
1738 *
1739 * The PHY should only be configured with the default override settings the
1740 * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1741 * is used to indicate that the user PHY cfg default override is initialized
1742 * and the PHY has not been configured with the default override settings. The
1743 * state is set here, and cleared in ice_configure_phy the first time the PHY is
1744 * configured.
1745 */
1746static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1747{
1748 struct ice_link_default_override_tlv *ldo;
1749 struct ice_aqc_set_phy_cfg_data *cfg;
1750 struct ice_phy_info *phy = &pi->phy;
1751 struct ice_pf *pf = pi->hw->back;
1752
1753 ldo = &pf->link_dflt_override;
1754
1755 /* If link default override is enabled, use to mask NVM PHY capabilities
1756 * for speed and FEC default configuration.
1757 */
1758 cfg = &phy->curr_user_phy_cfg;
1759
1760 if (ldo->phy_type_low || ldo->phy_type_high) {
1761 cfg->phy_type_low = pf->nvm_phy_type_lo &
1762 cpu_to_le64(ldo->phy_type_low);
1763 cfg->phy_type_high = pf->nvm_phy_type_hi &
1764 cpu_to_le64(ldo->phy_type_high);
1765 }
1766 cfg->link_fec_opt = ldo->fec_options;
1767 phy->curr_user_fec_req = ICE_FEC_AUTO;
1768
1769 set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1770}
1771
1772/**
1773 * ice_init_phy_user_cfg - Initialize the PHY user configuration
1774 * @pi: port info structure
1775 *
1776 * Initialize the current user PHY configuration, speed, FEC, and FC requested
1777 * mode to default. The PHY defaults are from get PHY capabilities topology
1778 * with media so call when media is first available. An error is returned if
1779 * called when media is not available. The PHY initialization completed state is
1780 * set here.
1781 *
1782 * These configurations are used when setting PHY
1783 * configuration. The user PHY configuration is updated on set PHY
1784 * configuration. Returns 0 on success, negative on failure
1785 */
1786static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1787{
1788 struct ice_aqc_get_phy_caps_data *pcaps;
1789 struct ice_phy_info *phy = &pi->phy;
1790 struct ice_pf *pf = pi->hw->back;
1791 enum ice_status status;
1792 struct ice_vsi *vsi;
1793 int err = 0;
1794
1795 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1796 return -EIO;
1797
1798 vsi = ice_get_main_vsi(pf);
1799 if (!vsi)
1800 return -EINVAL;
1801
1802 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1803 if (!pcaps)
1804 return -ENOMEM;
1805
1806 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1807 NULL);
1808 if (status) {
1809 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1810 err = -EIO;
1811 goto err_out;
1812 }
1813
1814 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1815
1816 /* check if lenient mode is supported and enabled */
1817 if (ice_fw_supports_link_override(&vsi->back->hw) &&
1818 !(pcaps->module_compliance_enforcement &
1819 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1820 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1821
1822 /* if link default override is enabled, initialize user PHY
1823 * configuration with link default override values
1824 */
1825 if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
1826 ice_init_phy_cfg_dflt_override(pi);
1827 goto out;
1828 }
1829 }
1830
1831 /* if link default override is not enabled, initialize PHY using
1832 * topology with media
1833 */
1834 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1835 pcaps->link_fec_options);
1836 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1837
1838out:
1839 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1840 set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
1841err_out:
1842 kfree(pcaps);
1843 return err;
1844}
1845
1846/**
1847 * ice_configure_phy - configure PHY
1848 * @vsi: VSI of PHY
1849 *
1850 * Set the PHY configuration. If the current PHY configuration is the same as
1851 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1852 * configure the based get PHY capabilities for topology with media.
1853 */
1854static int ice_configure_phy(struct ice_vsi *vsi)
1855{
1856 struct device *dev = ice_pf_to_dev(vsi->back);
1857 struct ice_aqc_get_phy_caps_data *pcaps;
1858 struct ice_aqc_set_phy_cfg_data *cfg;
1859 struct ice_port_info *pi;
1860 enum ice_status status;
1861 int err = 0;
1862
1863 pi = vsi->port_info;
1864 if (!pi)
1865 return -EINVAL;
1866
1867 /* Ensure we have media as we cannot configure a medialess port */
1868 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1869 return -EPERM;
1870
1871 ice_print_topo_conflict(vsi);
1872
1873 if (vsi->port_info->phy.link_info.topo_media_conflict ==
1874 ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1875 return -EPERM;
1876
1877 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
1878 return ice_force_phys_link_state(vsi, true);
1879
1880 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1881 if (!pcaps)
1882 return -ENOMEM;
1883
1884 /* Get current PHY config */
1885 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1886 NULL);
1887 if (status) {
1888 dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1889 vsi->vsi_num, ice_stat_str(status));
1890 err = -EIO;
1891 goto done;
1892 }
1893
1894 /* If PHY enable link is configured and configuration has not changed,
1895 * there's nothing to do
1896 */
1897 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1898 ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
1899 goto done;
1900
1901 /* Use PHY topology as baseline for configuration */
1902 memset(pcaps, 0, sizeof(*pcaps));
1903 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1904 NULL);
1905 if (status) {
1906 dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
1907 vsi->vsi_num, ice_stat_str(status));
1908 err = -EIO;
1909 goto done;
1910 }
1911
1912 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1913 if (!cfg) {
1914 err = -ENOMEM;
1915 goto done;
1916 }
1917
1918 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1919
1920 /* Speed - If default override pending, use curr_user_phy_cfg set in
1921 * ice_init_phy_user_cfg_ldo.
1922 */
1923 if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
1924 vsi->back->state)) {
1925 cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
1926 cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
1927 } else {
1928 u64 phy_low = 0, phy_high = 0;
1929
1930 ice_update_phy_type(&phy_low, &phy_high,
1931 pi->phy.curr_user_speed_req);
1932 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
1933 cfg->phy_type_high = pcaps->phy_type_high &
1934 cpu_to_le64(phy_high);
1935 }
1936
1937 /* Can't provide what was requested; use PHY capabilities */
1938 if (!cfg->phy_type_low && !cfg->phy_type_high) {
1939 cfg->phy_type_low = pcaps->phy_type_low;
1940 cfg->phy_type_high = pcaps->phy_type_high;
1941 }
1942
1943 /* FEC */
1944 ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
1945
1946 /* Can't provide what was requested; use PHY capabilities */
1947 if (cfg->link_fec_opt !=
1948 (cfg->link_fec_opt & pcaps->link_fec_options)) {
1949 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
1950 cfg->link_fec_opt = pcaps->link_fec_options;
1951 }
1952
1953 /* Flow Control - always supported; no need to check against
1954 * capabilities
1955 */
1956 ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
1957
1958 /* Enable link and link update */
1959 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
1960
1961 status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1962 if (status) {
1963 dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
1964 vsi->vsi_num, ice_stat_str(status));
1965 err = -EIO;
1966 }
1967
1968 kfree(cfg);
1969done:
1970 kfree(pcaps);
1971 return err;
1972}
1973
1974/**
1975 * ice_check_media_subtask - Check for media
1976 * @pf: pointer to PF struct
1977 *
1978 * If media is available, then initialize PHY user configuration if it is not
1979 * been, and configure the PHY if the interface is up.
1980 */
1981static void ice_check_media_subtask(struct ice_pf *pf)
1982{
1983 struct ice_port_info *pi;
1984 struct ice_vsi *vsi;
1985 int err;
1986
1987 /* No need to check for media if it's already present */
1988 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
1989 return;
1990
1991 vsi = ice_get_main_vsi(pf);
1992 if (!vsi)
1993 return;
1994
1995 /* Refresh link info and check if media is present */
1996 pi = vsi->port_info;
1997 err = ice_update_link_info(pi);
1998 if (err)
1999 return;
2000
2001 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2002 if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
2003 ice_init_phy_user_cfg(pi);
2004
2005 /* PHY settings are reset on media insertion, reconfigure
2006 * PHY to preserve settings.
2007 */
2008 if (test_bit(__ICE_DOWN, vsi->state) &&
2009 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2010 return;
2011
2012 err = ice_configure_phy(vsi);
2013 if (!err)
2014 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2015
2016 /* A Link Status Event will be generated; the event handler
2017 * will complete bringing the interface up
2018 */
2019 }
2020}
2021
2022/**
2023 * ice_service_task - manage and run subtasks
2024 * @work: pointer to work_struct contained by the PF struct
2025 */
2026static void ice_service_task(struct work_struct *work)
2027{
2028 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2029 unsigned long start_time = jiffies;
2030
2031 /* subtasks */
2032
2033 /* process reset requests first */
2034 ice_reset_subtask(pf);
2035
2036 /* bail if a reset/recovery cycle is pending or rebuild failed */
2037 if (ice_is_reset_in_progress(pf->state) ||
2038 test_bit(__ICE_SUSPENDED, pf->state) ||
2039 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
2040 ice_service_task_complete(pf);
2041 return;
2042 }
2043
2044 ice_clean_adminq_subtask(pf);
2045 ice_check_media_subtask(pf);
2046 ice_check_for_hang_subtask(pf);
2047 ice_sync_fltr_subtask(pf);
2048 ice_handle_mdd_event(pf);
2049 ice_watchdog_subtask(pf);
2050
2051 if (ice_is_safe_mode(pf)) {
2052 ice_service_task_complete(pf);
2053 return;
2054 }
2055
2056 ice_process_vflr_event(pf);
2057 ice_clean_mailboxq_subtask(pf);
2058 ice_sync_arfs_fltrs(pf);
2059 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
2060 ice_service_task_complete(pf);
2061
2062 /* If the tasks have taken longer than one service timer period
2063 * or there is more work to be done, reset the service timer to
2064 * schedule the service task now.
2065 */
2066 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2067 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
2068 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
2069 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2070 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
2071 mod_timer(&pf->serv_tmr, jiffies);
2072}
2073
2074/**
2075 * ice_set_ctrlq_len - helper function to set controlq length
2076 * @hw: pointer to the HW instance
2077 */
2078static void ice_set_ctrlq_len(struct ice_hw *hw)
2079{
2080 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2081 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2082 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2083 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2084 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2085 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2086 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2087 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2088}
2089
2090/**
2091 * ice_schedule_reset - schedule a reset
2092 * @pf: board private structure
2093 * @reset: reset being requested
2094 */
2095int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2096{
2097 struct device *dev = ice_pf_to_dev(pf);
2098
2099 /* bail out if earlier reset has failed */
2100 if (test_bit(__ICE_RESET_FAILED, pf->state)) {
2101 dev_dbg(dev, "earlier reset has failed\n");
2102 return -EIO;
2103 }
2104 /* bail if reset/recovery already in progress */
2105 if (ice_is_reset_in_progress(pf->state)) {
2106 dev_dbg(dev, "Reset already in progress\n");
2107 return -EBUSY;
2108 }
2109
2110 switch (reset) {
2111 case ICE_RESET_PFR:
2112 set_bit(__ICE_PFR_REQ, pf->state);
2113 break;
2114 case ICE_RESET_CORER:
2115 set_bit(__ICE_CORER_REQ, pf->state);
2116 break;
2117 case ICE_RESET_GLOBR:
2118 set_bit(__ICE_GLOBR_REQ, pf->state);
2119 break;
2120 default:
2121 return -EINVAL;
2122 }
2123
2124 ice_service_task_schedule(pf);
2125 return 0;
2126}
2127
2128/**
2129 * ice_irq_affinity_notify - Callback for affinity changes
2130 * @notify: context as to what irq was changed
2131 * @mask: the new affinity mask
2132 *
2133 * This is a callback function used by the irq_set_affinity_notifier function
2134 * so that we may register to receive changes to the irq affinity masks.
2135 */
2136static void
2137ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2138 const cpumask_t *mask)
2139{
2140 struct ice_q_vector *q_vector =
2141 container_of(notify, struct ice_q_vector, affinity_notify);
2142
2143 cpumask_copy(&q_vector->affinity_mask, mask);
2144}
2145
2146/**
2147 * ice_irq_affinity_release - Callback for affinity notifier release
2148 * @ref: internal core kernel usage
2149 *
2150 * This is a callback function used by the irq_set_affinity_notifier function
2151 * to inform the current notification subscriber that they will no longer
2152 * receive notifications.
2153 */
2154static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2155
2156/**
2157 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2158 * @vsi: the VSI being configured
2159 */
2160static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2161{
2162 struct ice_hw *hw = &vsi->back->hw;
2163 int i;
2164
2165 ice_for_each_q_vector(vsi, i)
2166 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2167
2168 ice_flush(hw);
2169 return 0;
2170}
2171
2172/**
2173 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2174 * @vsi: the VSI being configured
2175 * @basename: name for the vector
2176 */
2177static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2178{
2179 int q_vectors = vsi->num_q_vectors;
2180 struct ice_pf *pf = vsi->back;
2181 int base = vsi->base_vector;
2182 struct device *dev;
2183 int rx_int_idx = 0;
2184 int tx_int_idx = 0;
2185 int vector, err;
2186 int irq_num;
2187
2188 dev = ice_pf_to_dev(pf);
2189 for (vector = 0; vector < q_vectors; vector++) {
2190 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2191
2192 irq_num = pf->msix_entries[base + vector].vector;
2193
2194 if (q_vector->tx.ring && q_vector->rx.ring) {
2195 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2196 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2197 tx_int_idx++;
2198 } else if (q_vector->rx.ring) {
2199 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2200 "%s-%s-%d", basename, "rx", rx_int_idx++);
2201 } else if (q_vector->tx.ring) {
2202 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2203 "%s-%s-%d", basename, "tx", tx_int_idx++);
2204 } else {
2205 /* skip this unused q_vector */
2206 continue;
2207 }
2208 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
2209 q_vector->name, q_vector);
2210 if (err) {
2211 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2212 err);
2213 goto free_q_irqs;
2214 }
2215
2216 /* register for affinity change notifications */
2217 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2218 struct irq_affinity_notify *affinity_notify;
2219
2220 affinity_notify = &q_vector->affinity_notify;
2221 affinity_notify->notify = ice_irq_affinity_notify;
2222 affinity_notify->release = ice_irq_affinity_release;
2223 irq_set_affinity_notifier(irq_num, affinity_notify);
2224 }
2225
2226 /* assign the mask for this irq */
2227 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2228 }
2229
2230 vsi->irqs_ready = true;
2231 return 0;
2232
2233free_q_irqs:
2234 while (vector) {
2235 vector--;
2236 irq_num = pf->msix_entries[base + vector].vector;
2237 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2238 irq_set_affinity_notifier(irq_num, NULL);
2239 irq_set_affinity_hint(irq_num, NULL);
2240 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2241 }
2242 return err;
2243}
2244
2245/**
2246 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2247 * @vsi: VSI to setup Tx rings used by XDP
2248 *
2249 * Return 0 on success and negative value on error
2250 */
2251static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2252{
2253 struct device *dev = ice_pf_to_dev(vsi->back);
2254 int i;
2255
2256 for (i = 0; i < vsi->num_xdp_txq; i++) {
2257 u16 xdp_q_idx = vsi->alloc_txq + i;
2258 struct ice_ring *xdp_ring;
2259
2260 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2261
2262 if (!xdp_ring)
2263 goto free_xdp_rings;
2264
2265 xdp_ring->q_index = xdp_q_idx;
2266 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2267 xdp_ring->ring_active = false;
2268 xdp_ring->vsi = vsi;
2269 xdp_ring->netdev = NULL;
2270 xdp_ring->dev = dev;
2271 xdp_ring->count = vsi->num_tx_desc;
2272 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2273 if (ice_setup_tx_ring(xdp_ring))
2274 goto free_xdp_rings;
2275 ice_set_ring_xdp(xdp_ring);
2276 xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
2277 }
2278
2279 return 0;
2280
2281free_xdp_rings:
2282 for (; i >= 0; i--)
2283 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2284 ice_free_tx_ring(vsi->xdp_rings[i]);
2285 return -ENOMEM;
2286}
2287
2288/**
2289 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2290 * @vsi: VSI to set the bpf prog on
2291 * @prog: the bpf prog pointer
2292 */
2293static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2294{
2295 struct bpf_prog *old_prog;
2296 int i;
2297
2298 old_prog = xchg(&vsi->xdp_prog, prog);
2299 if (old_prog)
2300 bpf_prog_put(old_prog);
2301
2302 ice_for_each_rxq(vsi, i)
2303 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2304}
2305
2306/**
2307 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2308 * @vsi: VSI to bring up Tx rings used by XDP
2309 * @prog: bpf program that will be assigned to VSI
2310 *
2311 * Return 0 on success and negative value on error
2312 */
2313int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2314{
2315 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2316 int xdp_rings_rem = vsi->num_xdp_txq;
2317 struct ice_pf *pf = vsi->back;
2318 struct ice_qs_cfg xdp_qs_cfg = {
2319 .qs_mutex = &pf->avail_q_mutex,
2320 .pf_map = pf->avail_txqs,
2321 .pf_map_size = pf->max_pf_txqs,
2322 .q_count = vsi->num_xdp_txq,
2323 .scatter_count = ICE_MAX_SCATTER_TXQS,
2324 .vsi_map = vsi->txq_map,
2325 .vsi_map_offset = vsi->alloc_txq,
2326 .mapping_mode = ICE_VSI_MAP_CONTIG
2327 };
2328 enum ice_status status;
2329 struct device *dev;
2330 int i, v_idx;
2331
2332 dev = ice_pf_to_dev(pf);
2333 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2334 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2335 if (!vsi->xdp_rings)
2336 return -ENOMEM;
2337
2338 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2339 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2340 goto err_map_xdp;
2341
2342 if (ice_xdp_alloc_setup_rings(vsi))
2343 goto clear_xdp_rings;
2344
2345 /* follow the logic from ice_vsi_map_rings_to_vectors */
2346 ice_for_each_q_vector(vsi, v_idx) {
2347 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2348 int xdp_rings_per_v, q_id, q_base;
2349
2350 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2351 vsi->num_q_vectors - v_idx);
2352 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2353
2354 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2355 struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2356
2357 xdp_ring->q_vector = q_vector;
2358 xdp_ring->next = q_vector->tx.ring;
2359 q_vector->tx.ring = xdp_ring;
2360 }
2361 xdp_rings_rem -= xdp_rings_per_v;
2362 }
2363
2364 /* omit the scheduler update if in reset path; XDP queues will be
2365 * taken into account at the end of ice_vsi_rebuild, where
2366 * ice_cfg_vsi_lan is being called
2367 */
2368 if (ice_is_reset_in_progress(pf->state))
2369 return 0;
2370
2371 /* tell the Tx scheduler that right now we have
2372 * additional queues
2373 */
2374 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2375 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2376
2377 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2378 max_txqs);
2379 if (status) {
2380 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2381 ice_stat_str(status));
2382 goto clear_xdp_rings;
2383 }
2384 ice_vsi_assign_bpf_prog(vsi, prog);
2385
2386 return 0;
2387clear_xdp_rings:
2388 for (i = 0; i < vsi->num_xdp_txq; i++)
2389 if (vsi->xdp_rings[i]) {
2390 kfree_rcu(vsi->xdp_rings[i], rcu);
2391 vsi->xdp_rings[i] = NULL;
2392 }
2393
2394err_map_xdp:
2395 mutex_lock(&pf->avail_q_mutex);
2396 for (i = 0; i < vsi->num_xdp_txq; i++) {
2397 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2398 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2399 }
2400 mutex_unlock(&pf->avail_q_mutex);
2401
2402 devm_kfree(dev, vsi->xdp_rings);
2403 return -ENOMEM;
2404}
2405
2406/**
2407 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2408 * @vsi: VSI to remove XDP rings
2409 *
2410 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2411 * resources
2412 */
2413int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2414{
2415 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2416 struct ice_pf *pf = vsi->back;
2417 int i, v_idx;
2418
2419 /* q_vectors are freed in reset path so there's no point in detaching
2420 * rings; in case of rebuild being triggered not from reset reset bits
2421 * in pf->state won't be set, so additionally check first q_vector
2422 * against NULL
2423 */
2424 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2425 goto free_qmap;
2426
2427 ice_for_each_q_vector(vsi, v_idx) {
2428 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2429 struct ice_ring *ring;
2430
2431 ice_for_each_ring(ring, q_vector->tx)
2432 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2433 break;
2434
2435 /* restore the value of last node prior to XDP setup */
2436 q_vector->tx.ring = ring;
2437 }
2438
2439free_qmap:
2440 mutex_lock(&pf->avail_q_mutex);
2441 for (i = 0; i < vsi->num_xdp_txq; i++) {
2442 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2443 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2444 }
2445 mutex_unlock(&pf->avail_q_mutex);
2446
2447 for (i = 0; i < vsi->num_xdp_txq; i++)
2448 if (vsi->xdp_rings[i]) {
2449 if (vsi->xdp_rings[i]->desc)
2450 ice_free_tx_ring(vsi->xdp_rings[i]);
2451 kfree_rcu(vsi->xdp_rings[i], rcu);
2452 vsi->xdp_rings[i] = NULL;
2453 }
2454
2455 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2456 vsi->xdp_rings = NULL;
2457
2458 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2459 return 0;
2460
2461 ice_vsi_assign_bpf_prog(vsi, NULL);
2462
2463 /* notify Tx scheduler that we destroyed XDP queues and bring
2464 * back the old number of child nodes
2465 */
2466 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2467 max_txqs[i] = vsi->num_txq;
2468
2469 /* change number of XDP Tx queues to 0 */
2470 vsi->num_xdp_txq = 0;
2471
2472 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2473 max_txqs);
2474}
2475
2476/**
2477 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2478 * @vsi: VSI to setup XDP for
2479 * @prog: XDP program
2480 * @extack: netlink extended ack
2481 */
2482static int
2483ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2484 struct netlink_ext_ack *extack)
2485{
2486 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2487 bool if_running = netif_running(vsi->netdev);
2488 int ret = 0, xdp_ring_err = 0;
2489
2490 if (frame_size > vsi->rx_buf_len) {
2491 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2492 return -EOPNOTSUPP;
2493 }
2494
2495 /* need to stop netdev while setting up the program for Rx rings */
2496 if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
2497 ret = ice_down(vsi);
2498 if (ret) {
2499 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2500 return ret;
2501 }
2502 }
2503
2504 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2505 vsi->num_xdp_txq = vsi->alloc_rxq;
2506 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2507 if (xdp_ring_err)
2508 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2509 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2510 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2511 if (xdp_ring_err)
2512 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2513 } else {
2514 ice_vsi_assign_bpf_prog(vsi, prog);
2515 }
2516
2517 if (if_running)
2518 ret = ice_up(vsi);
2519
2520 if (!ret && prog && vsi->xsk_umems) {
2521 int i;
2522
2523 ice_for_each_rxq(vsi, i) {
2524 struct ice_ring *rx_ring = vsi->rx_rings[i];
2525
2526 if (rx_ring->xsk_umem)
2527 napi_schedule(&rx_ring->q_vector->napi);
2528 }
2529 }
2530
2531 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2532}
2533
2534/**
2535 * ice_xdp - implements XDP handler
2536 * @dev: netdevice
2537 * @xdp: XDP command
2538 */
2539static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2540{
2541 struct ice_netdev_priv *np = netdev_priv(dev);
2542 struct ice_vsi *vsi = np->vsi;
2543
2544 if (vsi->type != ICE_VSI_PF) {
2545 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2546 return -EINVAL;
2547 }
2548
2549 switch (xdp->command) {
2550 case XDP_SETUP_PROG:
2551 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2552 case XDP_SETUP_XSK_UMEM:
2553 return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
2554 xdp->xsk.queue_id);
2555 default:
2556 return -EINVAL;
2557 }
2558}
2559
2560/**
2561 * ice_ena_misc_vector - enable the non-queue interrupts
2562 * @pf: board private structure
2563 */
2564static void ice_ena_misc_vector(struct ice_pf *pf)
2565{
2566 struct ice_hw *hw = &pf->hw;
2567 u32 val;
2568
2569 /* Disable anti-spoof detection interrupt to prevent spurious event
2570 * interrupts during a function reset. Anti-spoof functionally is
2571 * still supported.
2572 */
2573 val = rd32(hw, GL_MDCK_TX_TDPU);
2574 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2575 wr32(hw, GL_MDCK_TX_TDPU, val);
2576
2577 /* clear things first */
2578 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
2579 rd32(hw, PFINT_OICR); /* read to clear */
2580
2581 val = (PFINT_OICR_ECC_ERR_M |
2582 PFINT_OICR_MAL_DETECT_M |
2583 PFINT_OICR_GRST_M |
2584 PFINT_OICR_PCI_EXCEPTION_M |
2585 PFINT_OICR_VFLR_M |
2586 PFINT_OICR_HMC_ERR_M |
2587 PFINT_OICR_PE_CRITERR_M);
2588
2589 wr32(hw, PFINT_OICR_ENA, val);
2590
2591 /* SW_ITR_IDX = 0, but don't change INTENA */
2592 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2593 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2594}
2595
2596/**
2597 * ice_misc_intr - misc interrupt handler
2598 * @irq: interrupt number
2599 * @data: pointer to a q_vector
2600 */
2601static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2602{
2603 struct ice_pf *pf = (struct ice_pf *)data;
2604 struct ice_hw *hw = &pf->hw;
2605 irqreturn_t ret = IRQ_NONE;
2606 struct device *dev;
2607 u32 oicr, ena_mask;
2608
2609 dev = ice_pf_to_dev(pf);
2610 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
2611 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2612
2613 oicr = rd32(hw, PFINT_OICR);
2614 ena_mask = rd32(hw, PFINT_OICR_ENA);
2615
2616 if (oicr & PFINT_OICR_SWINT_M) {
2617 ena_mask &= ~PFINT_OICR_SWINT_M;
2618 pf->sw_int_count++;
2619 }
2620
2621 if (oicr & PFINT_OICR_MAL_DETECT_M) {
2622 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2623 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
2624 }
2625 if (oicr & PFINT_OICR_VFLR_M) {
2626 /* disable any further VFLR event notifications */
2627 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
2628 u32 reg = rd32(hw, PFINT_OICR_ENA);
2629
2630 reg &= ~PFINT_OICR_VFLR_M;
2631 wr32(hw, PFINT_OICR_ENA, reg);
2632 } else {
2633 ena_mask &= ~PFINT_OICR_VFLR_M;
2634 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
2635 }
2636 }
2637
2638 if (oicr & PFINT_OICR_GRST_M) {
2639 u32 reset;
2640
2641 /* we have a reset warning */
2642 ena_mask &= ~PFINT_OICR_GRST_M;
2643 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2644 GLGEN_RSTAT_RESET_TYPE_S;
2645
2646 if (reset == ICE_RESET_CORER)
2647 pf->corer_count++;
2648 else if (reset == ICE_RESET_GLOBR)
2649 pf->globr_count++;
2650 else if (reset == ICE_RESET_EMPR)
2651 pf->empr_count++;
2652 else
2653 dev_dbg(dev, "Invalid reset type %d\n", reset);
2654
2655 /* If a reset cycle isn't already in progress, we set a bit in
2656 * pf->state so that the service task can start a reset/rebuild.
2657 * We also make note of which reset happened so that peer
2658 * devices/drivers can be informed.
2659 */
2660 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
2661 if (reset == ICE_RESET_CORER)
2662 set_bit(__ICE_CORER_RECV, pf->state);
2663 else if (reset == ICE_RESET_GLOBR)
2664 set_bit(__ICE_GLOBR_RECV, pf->state);
2665 else
2666 set_bit(__ICE_EMPR_RECV, pf->state);
2667
2668 /* There are couple of different bits at play here.
2669 * hw->reset_ongoing indicates whether the hardware is
2670 * in reset. This is set to true when a reset interrupt
2671 * is received and set back to false after the driver
2672 * has determined that the hardware is out of reset.
2673 *
2674 * __ICE_RESET_OICR_RECV in pf->state indicates
2675 * that a post reset rebuild is required before the
2676 * driver is operational again. This is set above.
2677 *
2678 * As this is the start of the reset/rebuild cycle, set
2679 * both to indicate that.
2680 */
2681 hw->reset_ongoing = true;
2682 }
2683 }
2684
2685 if (oicr & PFINT_OICR_HMC_ERR_M) {
2686 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
2687 dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
2688 rd32(hw, PFHMC_ERRORINFO),
2689 rd32(hw, PFHMC_ERRORDATA));
2690 }
2691
2692 /* Report any remaining unexpected interrupts */
2693 oicr &= ena_mask;
2694 if (oicr) {
2695 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2696 /* If a critical error is pending there is no choice but to
2697 * reset the device.
2698 */
2699 if (oicr & (PFINT_OICR_PE_CRITERR_M |
2700 PFINT_OICR_PCI_EXCEPTION_M |
2701 PFINT_OICR_ECC_ERR_M)) {
2702 set_bit(__ICE_PFR_REQ, pf->state);
2703 ice_service_task_schedule(pf);
2704 }
2705 }
2706 ret = IRQ_HANDLED;
2707
2708 ice_service_task_schedule(pf);
2709 ice_irq_dynamic_ena(hw, NULL, NULL);
2710
2711 return ret;
2712}
2713
2714/**
2715 * ice_dis_ctrlq_interrupts - disable control queue interrupts
2716 * @hw: pointer to HW structure
2717 */
2718static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2719{
2720 /* disable Admin queue Interrupt causes */
2721 wr32(hw, PFINT_FW_CTL,
2722 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2723
2724 /* disable Mailbox queue Interrupt causes */
2725 wr32(hw, PFINT_MBX_CTL,
2726 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2727
2728 /* disable Control queue Interrupt causes */
2729 wr32(hw, PFINT_OICR_CTL,
2730 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2731
2732 ice_flush(hw);
2733}
2734
2735/**
2736 * ice_free_irq_msix_misc - Unroll misc vector setup
2737 * @pf: board private structure
2738 */
2739static void ice_free_irq_msix_misc(struct ice_pf *pf)
2740{
2741 struct ice_hw *hw = &pf->hw;
2742
2743 ice_dis_ctrlq_interrupts(hw);
2744
2745 /* disable OICR interrupt */
2746 wr32(hw, PFINT_OICR_ENA, 0);
2747 ice_flush(hw);
2748
2749 if (pf->msix_entries) {
2750 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2751 devm_free_irq(ice_pf_to_dev(pf),
2752 pf->msix_entries[pf->oicr_idx].vector, pf);
2753 }
2754
2755 pf->num_avail_sw_msix += 1;
2756 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2757}
2758
2759/**
2760 * ice_ena_ctrlq_interrupts - enable control queue interrupts
2761 * @hw: pointer to HW structure
2762 * @reg_idx: HW vector index to associate the control queue interrupts with
2763 */
2764static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2765{
2766 u32 val;
2767
2768 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2769 PFINT_OICR_CTL_CAUSE_ENA_M);
2770 wr32(hw, PFINT_OICR_CTL, val);
2771
2772 /* enable Admin queue Interrupt causes */
2773 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2774 PFINT_FW_CTL_CAUSE_ENA_M);
2775 wr32(hw, PFINT_FW_CTL, val);
2776
2777 /* enable Mailbox queue Interrupt causes */
2778 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2779 PFINT_MBX_CTL_CAUSE_ENA_M);
2780 wr32(hw, PFINT_MBX_CTL, val);
2781
2782 ice_flush(hw);
2783}
2784
2785/**
2786 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2787 * @pf: board private structure
2788 *
2789 * This sets up the handler for MSIX 0, which is used to manage the
2790 * non-queue interrupts, e.g. AdminQ and errors. This is not used
2791 * when in MSI or Legacy interrupt mode.
2792 */
2793static int ice_req_irq_msix_misc(struct ice_pf *pf)
2794{
2795 struct device *dev = ice_pf_to_dev(pf);
2796 struct ice_hw *hw = &pf->hw;
2797 int oicr_idx, err = 0;
2798
2799 if (!pf->int_name[0])
2800 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2801 dev_driver_string(dev), dev_name(dev));
2802
2803 /* Do not request IRQ but do enable OICR interrupt since settings are
2804 * lost during reset. Note that this function is called only during
2805 * rebuild path and not while reset is in progress.
2806 */
2807 if (ice_is_reset_in_progress(pf->state))
2808 goto skip_req_irq;
2809
2810 /* reserve one vector in irq_tracker for misc interrupts */
2811 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2812 if (oicr_idx < 0)
2813 return oicr_idx;
2814
2815 pf->num_avail_sw_msix -= 1;
2816 pf->oicr_idx = (u16)oicr_idx;
2817
2818 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2819 ice_misc_intr, 0, pf->int_name, pf);
2820 if (err) {
2821 dev_err(dev, "devm_request_irq for %s failed: %d\n",
2822 pf->int_name, err);
2823 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2824 pf->num_avail_sw_msix += 1;
2825 return err;
2826 }
2827
2828skip_req_irq:
2829 ice_ena_misc_vector(pf);
2830
2831 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2832 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2833 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2834
2835 ice_flush(hw);
2836 ice_irq_dynamic_ena(hw, NULL, NULL);
2837
2838 return 0;
2839}
2840
2841/**
2842 * ice_napi_add - register NAPI handler for the VSI
2843 * @vsi: VSI for which NAPI handler is to be registered
2844 *
2845 * This function is only called in the driver's load path. Registering the NAPI
2846 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2847 * reset/rebuild, etc.)
2848 */
2849static void ice_napi_add(struct ice_vsi *vsi)
2850{
2851 int v_idx;
2852
2853 if (!vsi->netdev)
2854 return;
2855
2856 ice_for_each_q_vector(vsi, v_idx)
2857 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
2858 ice_napi_poll, NAPI_POLL_WEIGHT);
2859}
2860
2861/**
2862 * ice_set_ops - set netdev and ethtools ops for the given netdev
2863 * @netdev: netdev instance
2864 */
2865static void ice_set_ops(struct net_device *netdev)
2866{
2867 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2868
2869 if (ice_is_safe_mode(pf)) {
2870 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
2871 ice_set_ethtool_safe_mode_ops(netdev);
2872 return;
2873 }
2874
2875 netdev->netdev_ops = &ice_netdev_ops;
2876 ice_set_ethtool_ops(netdev);
2877}
2878
2879/**
2880 * ice_set_netdev_features - set features for the given netdev
2881 * @netdev: netdev instance
2882 */
2883static void ice_set_netdev_features(struct net_device *netdev)
2884{
2885 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2886 netdev_features_t csumo_features;
2887 netdev_features_t vlano_features;
2888 netdev_features_t dflt_features;
2889 netdev_features_t tso_features;
2890
2891 if (ice_is_safe_mode(pf)) {
2892 /* safe mode */
2893 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
2894 netdev->hw_features = netdev->features;
2895 return;
2896 }
2897
2898 dflt_features = NETIF_F_SG |
2899 NETIF_F_HIGHDMA |
2900 NETIF_F_NTUPLE |
2901 NETIF_F_RXHASH;
2902
2903 csumo_features = NETIF_F_RXCSUM |
2904 NETIF_F_IP_CSUM |
2905 NETIF_F_SCTP_CRC |
2906 NETIF_F_IPV6_CSUM;
2907
2908 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2909 NETIF_F_HW_VLAN_CTAG_TX |
2910 NETIF_F_HW_VLAN_CTAG_RX;
2911
2912 tso_features = NETIF_F_TSO |
2913 NETIF_F_TSO_ECN |
2914 NETIF_F_TSO6 |
2915 NETIF_F_GSO_GRE |
2916 NETIF_F_GSO_UDP_TUNNEL |
2917 NETIF_F_GSO_GRE_CSUM |
2918 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2919 NETIF_F_GSO_PARTIAL |
2920 NETIF_F_GSO_IPXIP4 |
2921 NETIF_F_GSO_IPXIP6 |
2922 NETIF_F_GSO_UDP_L4;
2923
2924 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
2925 NETIF_F_GSO_GRE_CSUM;
2926 /* set features that user can change */
2927 netdev->hw_features = dflt_features | csumo_features |
2928 vlano_features | tso_features;
2929
2930 /* add support for HW_CSUM on packets with MPLS header */
2931 netdev->mpls_features = NETIF_F_HW_CSUM;
2932
2933 /* enable features */
2934 netdev->features |= netdev->hw_features;
2935 /* encap and VLAN devices inherit default, csumo and tso features */
2936 netdev->hw_enc_features |= dflt_features | csumo_features |
2937 tso_features;
2938 netdev->vlan_features |= dflt_features | csumo_features |
2939 tso_features;
2940}
2941
2942/**
2943 * ice_cfg_netdev - Allocate, configure and register a netdev
2944 * @vsi: the VSI associated with the new netdev
2945 *
2946 * Returns 0 on success, negative value on failure
2947 */
2948static int ice_cfg_netdev(struct ice_vsi *vsi)
2949{
2950 struct ice_pf *pf = vsi->back;
2951 struct ice_netdev_priv *np;
2952 struct net_device *netdev;
2953 u8 mac_addr[ETH_ALEN];
2954 int err;
2955
2956 err = ice_devlink_create_port(pf);
2957 if (err)
2958 return err;
2959
2960 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
2961 vsi->alloc_rxq);
2962 if (!netdev) {
2963 err = -ENOMEM;
2964 goto err_destroy_devlink_port;
2965 }
2966
2967 vsi->netdev = netdev;
2968 np = netdev_priv(netdev);
2969 np->vsi = vsi;
2970
2971 ice_set_netdev_features(netdev);
2972
2973 ice_set_ops(netdev);
2974
2975 if (vsi->type == ICE_VSI_PF) {
2976 SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
2977 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
2978 ether_addr_copy(netdev->dev_addr, mac_addr);
2979 ether_addr_copy(netdev->perm_addr, mac_addr);
2980 }
2981
2982 netdev->priv_flags |= IFF_UNICAST_FLT;
2983
2984 /* Setup netdev TC information */
2985 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
2986
2987 /* setup watchdog timeout value to be 5 second */
2988 netdev->watchdog_timeo = 5 * HZ;
2989
2990 netdev->min_mtu = ETH_MIN_MTU;
2991 netdev->max_mtu = ICE_MAX_MTU;
2992
2993 err = register_netdev(vsi->netdev);
2994 if (err)
2995 goto err_free_netdev;
2996
2997 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
2998
2999 netif_carrier_off(vsi->netdev);
3000
3001 /* make sure transmit queues start off as stopped */
3002 netif_tx_stop_all_queues(vsi->netdev);
3003
3004 return 0;
3005
3006err_free_netdev:
3007 free_netdev(vsi->netdev);
3008 vsi->netdev = NULL;
3009err_destroy_devlink_port:
3010 ice_devlink_destroy_port(pf);
3011 return err;
3012}
3013
3014/**
3015 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3016 * @lut: Lookup table
3017 * @rss_table_size: Lookup table size
3018 * @rss_size: Range of queue number for hashing
3019 */
3020void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3021{
3022 u16 i;
3023
3024 for (i = 0; i < rss_table_size; i++)
3025 lut[i] = i % rss_size;
3026}
3027
3028/**
3029 * ice_pf_vsi_setup - Set up a PF VSI
3030 * @pf: board private structure
3031 * @pi: pointer to the port_info instance
3032 *
3033 * Returns pointer to the successfully allocated VSI software struct
3034 * on success, otherwise returns NULL on failure.
3035 */
3036static struct ice_vsi *
3037ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3038{
3039 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3040}
3041
3042/**
3043 * ice_ctrl_vsi_setup - Set up a control VSI
3044 * @pf: board private structure
3045 * @pi: pointer to the port_info instance
3046 *
3047 * Returns pointer to the successfully allocated VSI software struct
3048 * on success, otherwise returns NULL on failure.
3049 */
3050static struct ice_vsi *
3051ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3052{
3053 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3054}
3055
3056/**
3057 * ice_lb_vsi_setup - Set up a loopback VSI
3058 * @pf: board private structure
3059 * @pi: pointer to the port_info instance
3060 *
3061 * Returns pointer to the successfully allocated VSI software struct
3062 * on success, otherwise returns NULL on failure.
3063 */
3064struct ice_vsi *
3065ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3066{
3067 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3068}
3069
3070/**
3071 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3072 * @netdev: network interface to be adjusted
3073 * @proto: unused protocol
3074 * @vid: VLAN ID to be added
3075 *
3076 * net_device_ops implementation for adding VLAN IDs
3077 */
3078static int
3079ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3080 u16 vid)
3081{
3082 struct ice_netdev_priv *np = netdev_priv(netdev);
3083 struct ice_vsi *vsi = np->vsi;
3084 int ret;
3085
3086 if (vid >= VLAN_N_VID) {
3087 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
3088 vid, VLAN_N_VID);
3089 return -EINVAL;
3090 }
3091
3092 if (vsi->info.pvid)
3093 return -EINVAL;
3094
3095 /* VLAN 0 is added by default during load/reset */
3096 if (!vid)
3097 return 0;
3098
3099 /* Enable VLAN pruning when a VLAN other than 0 is added */
3100 if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3101 ret = ice_cfg_vlan_pruning(vsi, true, false);
3102 if (ret)
3103 return ret;
3104 }
3105
3106 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3107 * packets aren't pruned by the device's internal switch on Rx
3108 */
3109 ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3110 if (!ret) {
3111 vsi->vlan_ena = true;
3112 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3113 }
3114
3115 return ret;
3116}
3117
3118/**
3119 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3120 * @netdev: network interface to be adjusted
3121 * @proto: unused protocol
3122 * @vid: VLAN ID to be removed
3123 *
3124 * net_device_ops implementation for removing VLAN IDs
3125 */
3126static int
3127ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3128 u16 vid)
3129{
3130 struct ice_netdev_priv *np = netdev_priv(netdev);
3131 struct ice_vsi *vsi = np->vsi;
3132 int ret;
3133
3134 if (vsi->info.pvid)
3135 return -EINVAL;
3136
3137 /* don't allow removal of VLAN 0 */
3138 if (!vid)
3139 return 0;
3140
3141 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3142 * information
3143 */
3144 ret = ice_vsi_kill_vlan(vsi, vid);
3145 if (ret)
3146 return ret;
3147
3148 /* Disable pruning when VLAN 0 is the only VLAN rule */
3149 if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3150 ret = ice_cfg_vlan_pruning(vsi, false, false);
3151
3152 vsi->vlan_ena = false;
3153 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3154 return ret;
3155}
3156
3157/**
3158 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3159 * @pf: board private structure
3160 *
3161 * Returns 0 on success, negative value on failure
3162 */
3163static int ice_setup_pf_sw(struct ice_pf *pf)
3164{
3165 struct ice_vsi *vsi;
3166 int status = 0;
3167
3168 if (ice_is_reset_in_progress(pf->state))
3169 return -EBUSY;
3170
3171 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3172 if (!vsi)
3173 return -ENOMEM;
3174
3175 status = ice_cfg_netdev(vsi);
3176 if (status) {
3177 status = -ENODEV;
3178 goto unroll_vsi_setup;
3179 }
3180 /* netdev has to be configured before setting frame size */
3181 ice_vsi_cfg_frame_size(vsi);
3182
3183 /* Setup DCB netlink interface */
3184 ice_dcbnl_setup(vsi);
3185
3186 /* registering the NAPI handler requires both the queues and
3187 * netdev to be created, which are done in ice_pf_vsi_setup()
3188 * and ice_cfg_netdev() respectively
3189 */
3190 ice_napi_add(vsi);
3191
3192 status = ice_set_cpu_rx_rmap(vsi);
3193 if (status) {
3194 dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3195 vsi->vsi_num, status);
3196 status = -EINVAL;
3197 goto unroll_napi_add;
3198 }
3199 status = ice_init_mac_fltr(pf);
3200 if (status)
3201 goto free_cpu_rx_map;
3202
3203 return status;
3204
3205free_cpu_rx_map:
3206 ice_free_cpu_rx_rmap(vsi);
3207
3208unroll_napi_add:
3209 if (vsi) {
3210 ice_napi_del(vsi);
3211 if (vsi->netdev) {
3212 if (vsi->netdev->reg_state == NETREG_REGISTERED)
3213 unregister_netdev(vsi->netdev);
3214 free_netdev(vsi->netdev);
3215 vsi->netdev = NULL;
3216 }
3217 }
3218
3219unroll_vsi_setup:
3220 ice_vsi_release(vsi);
3221 return status;
3222}
3223
3224/**
3225 * ice_get_avail_q_count - Get count of queues in use
3226 * @pf_qmap: bitmap to get queue use count from
3227 * @lock: pointer to a mutex that protects access to pf_qmap
3228 * @size: size of the bitmap
3229 */
3230static u16
3231ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3232{
3233 unsigned long bit;
3234 u16 count = 0;
3235
3236 mutex_lock(lock);
3237 for_each_clear_bit(bit, pf_qmap, size)
3238 count++;
3239 mutex_unlock(lock);
3240
3241 return count;
3242}
3243
3244/**
3245 * ice_get_avail_txq_count - Get count of Tx queues in use
3246 * @pf: pointer to an ice_pf instance
3247 */
3248u16 ice_get_avail_txq_count(struct ice_pf *pf)
3249{
3250 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3251 pf->max_pf_txqs);
3252}
3253
3254/**
3255 * ice_get_avail_rxq_count - Get count of Rx queues in use
3256 * @pf: pointer to an ice_pf instance
3257 */
3258u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3259{
3260 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3261 pf->max_pf_rxqs);
3262}
3263
3264/**
3265 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3266 * @pf: board private structure to initialize
3267 */
3268static void ice_deinit_pf(struct ice_pf *pf)
3269{
3270 ice_service_task_stop(pf);
3271 mutex_destroy(&pf->sw_mutex);
3272 mutex_destroy(&pf->tc_mutex);
3273 mutex_destroy(&pf->avail_q_mutex);
3274
3275 if (pf->avail_txqs) {
3276 bitmap_free(pf->avail_txqs);
3277 pf->avail_txqs = NULL;
3278 }
3279
3280 if (pf->avail_rxqs) {
3281 bitmap_free(pf->avail_rxqs);
3282 pf->avail_rxqs = NULL;
3283 }
3284}
3285
3286/**
3287 * ice_set_pf_caps - set PFs capability flags
3288 * @pf: pointer to the PF instance
3289 */
3290static void ice_set_pf_caps(struct ice_pf *pf)
3291{
3292 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3293
3294 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3295 if (func_caps->common_cap.dcb)
3296 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3297 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3298 if (func_caps->common_cap.sr_iov_1_1) {
3299 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3300 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3301 ICE_MAX_VF_COUNT);
3302 }
3303 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3304 if (func_caps->common_cap.rss_table_size)
3305 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3306
3307 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3308 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3309 u16 unused;
3310
3311 /* ctrl_vsi_idx will be set to a valid value when flow director
3312 * is setup by ice_init_fdir
3313 */
3314 pf->ctrl_vsi_idx = ICE_NO_VSI;
3315 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3316 /* force guaranteed filter pool for PF */
3317 ice_alloc_fd_guar_item(&pf->hw, &unused,
3318 func_caps->fd_fltr_guar);
3319 /* force shared filter pool for PF */
3320 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3321 func_caps->fd_fltr_best_effort);
3322 }
3323
3324 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3325 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3326}
3327
3328/**
3329 * ice_init_pf - Initialize general software structures (struct ice_pf)
3330 * @pf: board private structure to initialize
3331 */
3332static int ice_init_pf(struct ice_pf *pf)
3333{
3334 ice_set_pf_caps(pf);
3335
3336 mutex_init(&pf->sw_mutex);
3337 mutex_init(&pf->tc_mutex);
3338
3339 INIT_HLIST_HEAD(&pf->aq_wait_list);
3340 spin_lock_init(&pf->aq_wait_lock);
3341 init_waitqueue_head(&pf->aq_wait_queue);
3342
3343 /* setup service timer and periodic service task */
3344 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3345 pf->serv_tmr_period = HZ;
3346 INIT_WORK(&pf->serv_task, ice_service_task);
3347 clear_bit(__ICE_SERVICE_SCHED, pf->state);
3348
3349 mutex_init(&pf->avail_q_mutex);
3350 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3351 if (!pf->avail_txqs)
3352 return -ENOMEM;
3353
3354 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3355 if (!pf->avail_rxqs) {
3356 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3357 pf->avail_txqs = NULL;
3358 return -ENOMEM;
3359 }
3360
3361 return 0;
3362}
3363
3364/**
3365 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3366 * @pf: board private structure
3367 *
3368 * compute the number of MSIX vectors required (v_budget) and request from
3369 * the OS. Return the number of vectors reserved or negative on failure
3370 */
3371static int ice_ena_msix_range(struct ice_pf *pf)
3372{
3373 struct device *dev = ice_pf_to_dev(pf);
3374 int v_left, v_actual, v_budget = 0;
3375 int needed, err, i;
3376
3377 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3378
3379 /* reserve one vector for miscellaneous handler */
3380 needed = 1;
3381 if (v_left < needed)
3382 goto no_hw_vecs_left_err;
3383 v_budget += needed;
3384 v_left -= needed;
3385
3386 /* reserve vectors for LAN traffic */
3387 needed = min_t(int, num_online_cpus(), v_left);
3388 if (v_left < needed)
3389 goto no_hw_vecs_left_err;
3390 pf->num_lan_msix = needed;
3391 v_budget += needed;
3392 v_left -= needed;
3393
3394 /* reserve one vector for flow director */
3395 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3396 needed = ICE_FDIR_MSIX;
3397 if (v_left < needed)
3398 goto no_hw_vecs_left_err;
3399 v_budget += needed;
3400 v_left -= needed;
3401 }
3402
3403 pf->msix_entries = devm_kcalloc(dev, v_budget,
3404 sizeof(*pf->msix_entries), GFP_KERNEL);
3405
3406 if (!pf->msix_entries) {
3407 err = -ENOMEM;
3408 goto exit_err;
3409 }
3410
3411 for (i = 0; i < v_budget; i++)
3412 pf->msix_entries[i].entry = i;
3413
3414 /* actually reserve the vectors */
3415 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3416 ICE_MIN_MSIX, v_budget);
3417
3418 if (v_actual < 0) {
3419 dev_err(dev, "unable to reserve MSI-X vectors\n");
3420 err = v_actual;
3421 goto msix_err;
3422 }
3423
3424 if (v_actual < v_budget) {
3425 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3426 v_budget, v_actual);
3427/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
3428#define ICE_MIN_LAN_VECS 2
3429#define ICE_MIN_RDMA_VECS 2
3430#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
3431
3432 if (v_actual < ICE_MIN_LAN_VECS) {
3433 /* error if we can't get minimum vectors */
3434 pci_disable_msix(pf->pdev);
3435 err = -ERANGE;
3436 goto msix_err;
3437 } else {
3438 pf->num_lan_msix = ICE_MIN_LAN_VECS;
3439 }
3440 }
3441
3442 return v_actual;
3443
3444msix_err:
3445 devm_kfree(dev, pf->msix_entries);
3446 goto exit_err;
3447
3448no_hw_vecs_left_err:
3449 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3450 needed, v_left);
3451 err = -ERANGE;
3452exit_err:
3453 pf->num_lan_msix = 0;
3454 return err;
3455}
3456
3457/**
3458 * ice_dis_msix - Disable MSI-X interrupt setup in OS
3459 * @pf: board private structure
3460 */
3461static void ice_dis_msix(struct ice_pf *pf)
3462{
3463 pci_disable_msix(pf->pdev);
3464 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3465 pf->msix_entries = NULL;
3466}
3467
3468/**
3469 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3470 * @pf: board private structure
3471 */
3472static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3473{
3474 ice_dis_msix(pf);
3475
3476 if (pf->irq_tracker) {
3477 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3478 pf->irq_tracker = NULL;
3479 }
3480}
3481
3482/**
3483 * ice_init_interrupt_scheme - Determine proper interrupt scheme
3484 * @pf: board private structure to initialize
3485 */
3486static int ice_init_interrupt_scheme(struct ice_pf *pf)
3487{
3488 int vectors;
3489
3490 vectors = ice_ena_msix_range(pf);
3491
3492 if (vectors < 0)
3493 return vectors;
3494
3495 /* set up vector assignment tracking */
3496 pf->irq_tracker =
3497 devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
3498 (sizeof(u16) * vectors), GFP_KERNEL);
3499 if (!pf->irq_tracker) {
3500 ice_dis_msix(pf);
3501 return -ENOMEM;
3502 }
3503
3504 /* populate SW interrupts pool with number of OS granted IRQs. */
3505 pf->num_avail_sw_msix = (u16)vectors;
3506 pf->irq_tracker->num_entries = (u16)vectors;
3507 pf->irq_tracker->end = pf->irq_tracker->num_entries;
3508
3509 return 0;
3510}
3511
3512/**
3513 * ice_is_wol_supported - get NVM state of WoL
3514 * @pf: board private structure
3515 *
3516 * Check if WoL is supported based on the HW configuration.
3517 * Returns true if NVM supports and enables WoL for this port, false otherwise
3518 */
3519bool ice_is_wol_supported(struct ice_pf *pf)
3520{
3521 struct ice_hw *hw = &pf->hw;
3522 u16 wol_ctrl;
3523
3524 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3525 * word) indicates WoL is not supported on the corresponding PF ID.
3526 */
3527 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3528 return false;
3529
3530 return !(BIT(hw->pf_id) & wol_ctrl);
3531}
3532
3533/**
3534 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3535 * @vsi: VSI being changed
3536 * @new_rx: new number of Rx queues
3537 * @new_tx: new number of Tx queues
3538 *
3539 * Only change the number of queues if new_tx, or new_rx is non-0.
3540 *
3541 * Returns 0 on success.
3542 */
3543int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3544{
3545 struct ice_pf *pf = vsi->back;
3546 int err = 0, timeout = 50;
3547
3548 if (!new_rx && !new_tx)
3549 return -EINVAL;
3550
3551 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
3552 timeout--;
3553 if (!timeout)
3554 return -EBUSY;
3555 usleep_range(1000, 2000);
3556 }
3557
3558 if (new_tx)
3559 vsi->req_txq = (u16)new_tx;
3560 if (new_rx)
3561 vsi->req_rxq = (u16)new_rx;
3562
3563 /* set for the next time the netdev is started */
3564 if (!netif_running(vsi->netdev)) {
3565 ice_vsi_rebuild(vsi, false);
3566 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3567 goto done;
3568 }
3569
3570 ice_vsi_close(vsi);
3571 ice_vsi_rebuild(vsi, false);
3572 ice_pf_dcb_recfg(pf);
3573 ice_vsi_open(vsi);
3574done:
3575 clear_bit(__ICE_CFG_BUSY, pf->state);
3576 return err;
3577}
3578
3579/**
3580 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3581 * @pf: PF to configure
3582 *
3583 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3584 * VSI can still Tx/Rx VLAN tagged packets.
3585 */
3586static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3587{
3588 struct ice_vsi *vsi = ice_get_main_vsi(pf);
3589 struct ice_vsi_ctx *ctxt;
3590 enum ice_status status;
3591 struct ice_hw *hw;
3592
3593 if (!vsi)
3594 return;
3595
3596 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3597 if (!ctxt)
3598 return;
3599
3600 hw = &pf->hw;
3601 ctxt->info = vsi->info;
3602
3603 ctxt->info.valid_sections =
3604 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3605 ICE_AQ_VSI_PROP_SECURITY_VALID |
3606 ICE_AQ_VSI_PROP_SW_VALID);
3607
3608 /* disable VLAN anti-spoof */
3609 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3610 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3611
3612 /* disable VLAN pruning and keep all other settings */
3613 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3614
3615 /* allow all VLANs on Tx and don't strip on Rx */
3616 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3617 ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3618
3619 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3620 if (status) {
3621 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3622 ice_stat_str(status),
3623 ice_aq_str(hw->adminq.sq_last_status));
3624 } else {
3625 vsi->info.sec_flags = ctxt->info.sec_flags;
3626 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3627 vsi->info.vlan_flags = ctxt->info.vlan_flags;
3628 }
3629
3630 kfree(ctxt);
3631}
3632
3633/**
3634 * ice_log_pkg_init - log result of DDP package load
3635 * @hw: pointer to hardware info
3636 * @status: status of package load
3637 */
3638static void
3639ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3640{
3641 struct ice_pf *pf = (struct ice_pf *)hw->back;
3642 struct device *dev = ice_pf_to_dev(pf);
3643
3644 switch (*status) {
3645 case ICE_SUCCESS:
3646 /* The package download AdminQ command returned success because
3647 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3648 * already a package loaded on the device.
3649 */
3650 if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3651 hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3652 hw->pkg_ver.update == hw->active_pkg_ver.update &&
3653 hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3654 !memcmp(hw->pkg_name, hw->active_pkg_name,
3655 sizeof(hw->pkg_name))) {
3656 if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3657 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3658 hw->active_pkg_name,
3659 hw->active_pkg_ver.major,
3660 hw->active_pkg_ver.minor,
3661 hw->active_pkg_ver.update,
3662 hw->active_pkg_ver.draft);
3663 else
3664 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3665 hw->active_pkg_name,
3666 hw->active_pkg_ver.major,
3667 hw->active_pkg_ver.minor,
3668 hw->active_pkg_ver.update,
3669 hw->active_pkg_ver.draft);
3670 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3671 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3672 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
3673 hw->active_pkg_name,
3674 hw->active_pkg_ver.major,
3675 hw->active_pkg_ver.minor,
3676 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3677 *status = ICE_ERR_NOT_SUPPORTED;
3678 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3679 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3680 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3681 hw->active_pkg_name,
3682 hw->active_pkg_ver.major,
3683 hw->active_pkg_ver.minor,
3684 hw->active_pkg_ver.update,
3685 hw->active_pkg_ver.draft,
3686 hw->pkg_name,
3687 hw->pkg_ver.major,
3688 hw->pkg_ver.minor,
3689 hw->pkg_ver.update,
3690 hw->pkg_ver.draft);
3691 } else {
3692 dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
3693 *status = ICE_ERR_NOT_SUPPORTED;
3694 }
3695 break;
3696 case ICE_ERR_FW_DDP_MISMATCH:
3697 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
3698 break;
3699 case ICE_ERR_BUF_TOO_SHORT:
3700 case ICE_ERR_CFG:
3701 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3702 break;
3703 case ICE_ERR_NOT_SUPPORTED:
3704 /* Package File version not supported */
3705 if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3706 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3707 hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3708 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
3709 else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3710 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3711 hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3712 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
3713 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3714 break;
3715 case ICE_ERR_AQ_ERROR:
3716 switch (hw->pkg_dwnld_status) {
3717 case ICE_AQ_RC_ENOSEC:
3718 case ICE_AQ_RC_EBADSIG:
3719 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
3720 return;
3721 case ICE_AQ_RC_ESVN:
3722 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
3723 return;
3724 case ICE_AQ_RC_EBADMAN:
3725 case ICE_AQ_RC_EBADBUF:
3726 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
3727 /* poll for reset to complete */
3728 if (ice_check_reset(hw))
3729 dev_err(dev, "Error resetting device. Please reload the driver\n");
3730 return;
3731 default:
3732 break;
3733 }
3734 fallthrough;
3735 default:
3736 dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
3737 *status);
3738 break;
3739 }
3740}
3741
3742/**
3743 * ice_load_pkg - load/reload the DDP Package file
3744 * @firmware: firmware structure when firmware requested or NULL for reload
3745 * @pf: pointer to the PF instance
3746 *
3747 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3748 * initialize HW tables.
3749 */
3750static void
3751ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3752{
3753 enum ice_status status = ICE_ERR_PARAM;
3754 struct device *dev = ice_pf_to_dev(pf);
3755 struct ice_hw *hw = &pf->hw;
3756
3757 /* Load DDP Package */
3758 if (firmware && !hw->pkg_copy) {
3759 status = ice_copy_and_init_pkg(hw, firmware->data,
3760 firmware->size);
3761 ice_log_pkg_init(hw, &status);
3762 } else if (!firmware && hw->pkg_copy) {
3763 /* Reload package during rebuild after CORER/GLOBR reset */
3764 status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3765 ice_log_pkg_init(hw, &status);
3766 } else {
3767 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3768 }
3769
3770 if (status) {
3771 /* Safe Mode */
3772 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3773 return;
3774 }
3775
3776 /* Successful download package is the precondition for advanced
3777 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3778 */
3779 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3780}
3781
3782/**
3783 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3784 * @pf: pointer to the PF structure
3785 *
3786 * There is no error returned here because the driver should be able to handle
3787 * 128 Byte cache lines, so we only print a warning in case issues are seen,
3788 * specifically with Tx.
3789 */
3790static void ice_verify_cacheline_size(struct ice_pf *pf)
3791{
3792 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3793 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3794 ICE_CACHE_LINE_BYTES);
3795}
3796
3797/**
3798 * ice_send_version - update firmware with driver version
3799 * @pf: PF struct
3800 *
3801 * Returns ICE_SUCCESS on success, else error code
3802 */
3803static enum ice_status ice_send_version(struct ice_pf *pf)
3804{
3805 struct ice_driver_ver dv;
3806
3807 dv.major_ver = 0xff;
3808 dv.minor_ver = 0xff;
3809 dv.build_ver = 0xff;
3810 dv.subbuild_ver = 0;
3811 strscpy((char *)dv.driver_string, UTS_RELEASE,
3812 sizeof(dv.driver_string));
3813 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3814}
3815
3816/**
3817 * ice_init_fdir - Initialize flow director VSI and configuration
3818 * @pf: pointer to the PF instance
3819 *
3820 * returns 0 on success, negative on error
3821 */
3822static int ice_init_fdir(struct ice_pf *pf)
3823{
3824 struct device *dev = ice_pf_to_dev(pf);
3825 struct ice_vsi *ctrl_vsi;
3826 int err;
3827
3828 /* Side Band Flow Director needs to have a control VSI.
3829 * Allocate it and store it in the PF.
3830 */
3831 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3832 if (!ctrl_vsi) {
3833 dev_dbg(dev, "could not create control VSI\n");
3834 return -ENOMEM;
3835 }
3836
3837 err = ice_vsi_open_ctrl(ctrl_vsi);
3838 if (err) {
3839 dev_dbg(dev, "could not open control VSI\n");
3840 goto err_vsi_open;
3841 }
3842
3843 mutex_init(&pf->hw.fdir_fltr_lock);
3844
3845 err = ice_fdir_create_dflt_rules(pf);
3846 if (err)
3847 goto err_fdir_rule;
3848
3849 return 0;
3850
3851err_fdir_rule:
3852 ice_fdir_release_flows(&pf->hw);
3853 ice_vsi_close(ctrl_vsi);
3854err_vsi_open:
3855 ice_vsi_release(ctrl_vsi);
3856 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
3857 pf->vsi[pf->ctrl_vsi_idx] = NULL;
3858 pf->ctrl_vsi_idx = ICE_NO_VSI;
3859 }
3860 return err;
3861}
3862
3863/**
3864 * ice_get_opt_fw_name - return optional firmware file name or NULL
3865 * @pf: pointer to the PF instance
3866 */
3867static char *ice_get_opt_fw_name(struct ice_pf *pf)
3868{
3869 /* Optional firmware name same as default with additional dash
3870 * followed by a EUI-64 identifier (PCIe Device Serial Number)
3871 */
3872 struct pci_dev *pdev = pf->pdev;
3873 char *opt_fw_filename;
3874 u64 dsn;
3875
3876 /* Determine the name of the optional file using the DSN (two
3877 * dwords following the start of the DSN Capability).
3878 */
3879 dsn = pci_get_dsn(pdev);
3880 if (!dsn)
3881 return NULL;
3882
3883 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
3884 if (!opt_fw_filename)
3885 return NULL;
3886
3887 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
3888 ICE_DDP_PKG_PATH, dsn);
3889
3890 return opt_fw_filename;
3891}
3892
3893/**
3894 * ice_request_fw - Device initialization routine
3895 * @pf: pointer to the PF instance
3896 */
3897static void ice_request_fw(struct ice_pf *pf)
3898{
3899 char *opt_fw_filename = ice_get_opt_fw_name(pf);
3900 const struct firmware *firmware = NULL;
3901 struct device *dev = ice_pf_to_dev(pf);
3902 int err = 0;
3903
3904 /* optional device-specific DDP (if present) overrides the default DDP
3905 * package file. kernel logs a debug message if the file doesn't exist,
3906 * and warning messages for other errors.
3907 */
3908 if (opt_fw_filename) {
3909 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
3910 if (err) {
3911 kfree(opt_fw_filename);
3912 goto dflt_pkg_load;
3913 }
3914
3915 /* request for firmware was successful. Download to device */
3916 ice_load_pkg(firmware, pf);
3917 kfree(opt_fw_filename);
3918 release_firmware(firmware);
3919 return;
3920 }
3921
3922dflt_pkg_load:
3923 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
3924 if (err) {
3925 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
3926 return;
3927 }
3928
3929 /* request for firmware was successful. Download to device */
3930 ice_load_pkg(firmware, pf);
3931 release_firmware(firmware);
3932}
3933
3934/**
3935 * ice_print_wake_reason - show the wake up cause in the log
3936 * @pf: pointer to the PF struct
3937 */
3938static void ice_print_wake_reason(struct ice_pf *pf)
3939{
3940 u32 wus = pf->wakeup_reason;
3941 const char *wake_str;
3942
3943 /* if no wake event, nothing to print */
3944 if (!wus)
3945 return;
3946
3947 if (wus & PFPM_WUS_LNKC_M)
3948 wake_str = "Link\n";
3949 else if (wus & PFPM_WUS_MAG_M)
3950 wake_str = "Magic Packet\n";
3951 else if (wus & PFPM_WUS_MNG_M)
3952 wake_str = "Management\n";
3953 else if (wus & PFPM_WUS_FW_RST_WK_M)
3954 wake_str = "Firmware Reset\n";
3955 else
3956 wake_str = "Unknown\n";
3957
3958 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
3959}
3960
3961/**
3962 * ice_probe - Device initialization routine
3963 * @pdev: PCI device information struct
3964 * @ent: entry in ice_pci_tbl
3965 *
3966 * Returns 0 on success, negative on failure
3967 */
3968static int
3969ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
3970{
3971 struct device *dev = &pdev->dev;
3972 struct ice_pf *pf;
3973 struct ice_hw *hw;
3974 int err;
3975
3976 /* this driver uses devres, see
3977 * Documentation/driver-api/driver-model/devres.rst
3978 */
3979 err = pcim_enable_device(pdev);
3980 if (err)
3981 return err;
3982
3983 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3984 if (err) {
3985 dev_err(dev, "BAR0 I/O map error %d\n", err);
3986 return err;
3987 }
3988
3989 pf = ice_allocate_pf(dev);
3990 if (!pf)
3991 return -ENOMEM;
3992
3993 /* set up for high or low DMA */
3994 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3995 if (err)
3996 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3997 if (err) {
3998 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
3999 return err;
4000 }
4001
4002 pci_enable_pcie_error_reporting(pdev);
4003 pci_set_master(pdev);
4004
4005 pf->pdev = pdev;
4006 pci_set_drvdata(pdev, pf);
4007 set_bit(__ICE_DOWN, pf->state);
4008 /* Disable service task until DOWN bit is cleared */
4009 set_bit(__ICE_SERVICE_DIS, pf->state);
4010
4011 hw = &pf->hw;
4012 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4013 pci_save_state(pdev);
4014
4015 hw->back = pf;
4016 hw->vendor_id = pdev->vendor;
4017 hw->device_id = pdev->device;
4018 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4019 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4020 hw->subsystem_device_id = pdev->subsystem_device;
4021 hw->bus.device = PCI_SLOT(pdev->devfn);
4022 hw->bus.func = PCI_FUNC(pdev->devfn);
4023 ice_set_ctrlq_len(hw);
4024
4025 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4026
4027 err = ice_devlink_register(pf);
4028 if (err) {
4029 dev_err(dev, "ice_devlink_register failed: %d\n", err);
4030 goto err_exit_unroll;
4031 }
4032
4033#ifndef CONFIG_DYNAMIC_DEBUG
4034 if (debug < -1)
4035 hw->debug_mask = debug;
4036#endif
4037
4038 err = ice_init_hw(hw);
4039 if (err) {
4040 dev_err(dev, "ice_init_hw failed: %d\n", err);
4041 err = -EIO;
4042 goto err_exit_unroll;
4043 }
4044
4045 ice_request_fw(pf);
4046
4047 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4048 * set in pf->state, which will cause ice_is_safe_mode to return
4049 * true
4050 */
4051 if (ice_is_safe_mode(pf)) {
4052 dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4053 /* we already got function/device capabilities but these don't
4054 * reflect what the driver needs to do in safe mode. Instead of
4055 * adding conditional logic everywhere to ignore these
4056 * device/function capabilities, override them.
4057 */
4058 ice_set_safe_mode_caps(hw);
4059 }
4060
4061 err = ice_init_pf(pf);
4062 if (err) {
4063 dev_err(dev, "ice_init_pf failed: %d\n", err);
4064 goto err_init_pf_unroll;
4065 }
4066
4067 ice_devlink_init_regions(pf);
4068
4069 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4070 if (!pf->num_alloc_vsi) {
4071 err = -EIO;
4072 goto err_init_pf_unroll;
4073 }
4074
4075 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4076 GFP_KERNEL);
4077 if (!pf->vsi) {
4078 err = -ENOMEM;
4079 goto err_init_pf_unroll;
4080 }
4081
4082 err = ice_init_interrupt_scheme(pf);
4083 if (err) {
4084 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4085 err = -EIO;
4086 goto err_init_vsi_unroll;
4087 }
4088
4089 /* In case of MSIX we are going to setup the misc vector right here
4090 * to handle admin queue events etc. In case of legacy and MSI
4091 * the misc functionality and queue processing is combined in
4092 * the same vector and that gets setup at open.
4093 */
4094 err = ice_req_irq_msix_misc(pf);
4095 if (err) {
4096 dev_err(dev, "setup of misc vector failed: %d\n", err);
4097 goto err_init_interrupt_unroll;
4098 }
4099
4100 /* create switch struct for the switch element created by FW on boot */
4101 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4102 if (!pf->first_sw) {
4103 err = -ENOMEM;
4104 goto err_msix_misc_unroll;
4105 }
4106
4107 if (hw->evb_veb)
4108 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4109 else
4110 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4111
4112 pf->first_sw->pf = pf;
4113
4114 /* record the sw_id available for later use */
4115 pf->first_sw->sw_id = hw->port_info->sw_id;
4116
4117 err = ice_setup_pf_sw(pf);
4118 if (err) {
4119 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4120 goto err_alloc_sw_unroll;
4121 }
4122
4123 clear_bit(__ICE_SERVICE_DIS, pf->state);
4124
4125 /* tell the firmware we are up */
4126 err = ice_send_version(pf);
4127 if (err) {
4128 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4129 UTS_RELEASE, err);
4130 goto err_send_version_unroll;
4131 }
4132
4133 /* since everything is good, start the service timer */
4134 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4135
4136 err = ice_init_link_events(pf->hw.port_info);
4137 if (err) {
4138 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4139 goto err_send_version_unroll;
4140 }
4141
4142 err = ice_init_nvm_phy_type(pf->hw.port_info);
4143 if (err) {
4144 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4145 goto err_send_version_unroll;
4146 }
4147
4148 err = ice_update_link_info(pf->hw.port_info);
4149 if (err) {
4150 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4151 goto err_send_version_unroll;
4152 }
4153
4154 ice_init_link_dflt_override(pf->hw.port_info);
4155
4156 /* if media available, initialize PHY settings */
4157 if (pf->hw.port_info->phy.link_info.link_info &
4158 ICE_AQ_MEDIA_AVAILABLE) {
4159 err = ice_init_phy_user_cfg(pf->hw.port_info);
4160 if (err) {
4161 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4162 goto err_send_version_unroll;
4163 }
4164
4165 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4166 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4167
4168 if (vsi)
4169 ice_configure_phy(vsi);
4170 }
4171 } else {
4172 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4173 }
4174
4175 ice_verify_cacheline_size(pf);
4176
4177 /* Save wakeup reason register for later use */
4178 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4179
4180 /* check for a power management event */
4181 ice_print_wake_reason(pf);
4182
4183 /* clear wake status, all bits */
4184 wr32(hw, PFPM_WUS, U32_MAX);
4185
4186 /* Disable WoL at init, wait for user to enable */
4187 device_set_wakeup_enable(dev, false);
4188
4189 if (ice_is_safe_mode(pf)) {
4190 ice_set_safe_mode_vlan_cfg(pf);
4191 goto probe_done;
4192 }
4193
4194 /* initialize DDP driven features */
4195
4196 /* Note: Flow director init failure is non-fatal to load */
4197 if (ice_init_fdir(pf))
4198 dev_err(dev, "could not initialize flow director\n");
4199
4200 /* Note: DCB init failure is non-fatal to load */
4201 if (ice_init_pf_dcb(pf, false)) {
4202 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4203 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4204 } else {
4205 ice_cfg_lldp_mib_change(&pf->hw, true);
4206 }
4207
4208 /* print PCI link speed and width */
4209 pcie_print_link_status(pf->pdev);
4210
4211probe_done:
4212 /* ready to go, so clear down state bit */
4213 clear_bit(__ICE_DOWN, pf->state);
4214 return 0;
4215
4216err_send_version_unroll:
4217 ice_vsi_release_all(pf);
4218err_alloc_sw_unroll:
4219 ice_devlink_destroy_port(pf);
4220 set_bit(__ICE_SERVICE_DIS, pf->state);
4221 set_bit(__ICE_DOWN, pf->state);
4222 devm_kfree(dev, pf->first_sw);
4223err_msix_misc_unroll:
4224 ice_free_irq_msix_misc(pf);
4225err_init_interrupt_unroll:
4226 ice_clear_interrupt_scheme(pf);
4227err_init_vsi_unroll:
4228 devm_kfree(dev, pf->vsi);
4229err_init_pf_unroll:
4230 ice_deinit_pf(pf);
4231 ice_devlink_destroy_regions(pf);
4232 ice_deinit_hw(hw);
4233err_exit_unroll:
4234 ice_devlink_unregister(pf);
4235 pci_disable_pcie_error_reporting(pdev);
4236 pci_disable_device(pdev);
4237 return err;
4238}
4239
4240/**
4241 * ice_set_wake - enable or disable Wake on LAN
4242 * @pf: pointer to the PF struct
4243 *
4244 * Simple helper for WoL control
4245 */
4246static void ice_set_wake(struct ice_pf *pf)
4247{
4248 struct ice_hw *hw = &pf->hw;
4249 bool wol = pf->wol_ena;
4250
4251 /* clear wake state, otherwise new wake events won't fire */
4252 wr32(hw, PFPM_WUS, U32_MAX);
4253
4254 /* enable / disable APM wake up, no RMW needed */
4255 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4256
4257 /* set magic packet filter enabled */
4258 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4259}
4260
4261/**
4262 * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
4263 * @pf: pointer to the PF struct
4264 *
4265 * Issue firmware command to enable multicast magic wake, making
4266 * sure that any locally administered address (LAA) is used for
4267 * wake, and that PF reset doesn't undo the LAA.
4268 */
4269static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4270{
4271 struct device *dev = ice_pf_to_dev(pf);
4272 struct ice_hw *hw = &pf->hw;
4273 enum ice_status status;
4274 u8 mac_addr[ETH_ALEN];
4275 struct ice_vsi *vsi;
4276 u8 flags;
4277
4278 if (!pf->wol_ena)
4279 return;
4280
4281 vsi = ice_get_main_vsi(pf);
4282 if (!vsi)
4283 return;
4284
4285 /* Get current MAC address in case it's an LAA */
4286 if (vsi->netdev)
4287 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4288 else
4289 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4290
4291 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4292 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4293 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4294
4295 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4296 if (status)
4297 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4298 ice_stat_str(status),
4299 ice_aq_str(hw->adminq.sq_last_status));
4300}
4301
4302/**
4303 * ice_remove - Device removal routine
4304 * @pdev: PCI device information struct
4305 */
4306static void ice_remove(struct pci_dev *pdev)
4307{
4308 struct ice_pf *pf = pci_get_drvdata(pdev);
4309 int i;
4310
4311 if (!pf)
4312 return;
4313
4314 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4315 if (!ice_is_reset_in_progress(pf->state))
4316 break;
4317 msleep(100);
4318 }
4319
4320 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4321 set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
4322 ice_free_vfs(pf);
4323 }
4324
4325 set_bit(__ICE_DOWN, pf->state);
4326 ice_service_task_stop(pf);
4327
4328 ice_aq_cancel_waiting_tasks(pf);
4329
4330 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4331 if (!ice_is_safe_mode(pf))
4332 ice_remove_arfs(pf);
4333 ice_setup_mc_magic_wake(pf);
4334 ice_devlink_destroy_port(pf);
4335 ice_vsi_release_all(pf);
4336 ice_set_wake(pf);
4337 ice_free_irq_msix_misc(pf);
4338 ice_for_each_vsi(pf, i) {
4339 if (!pf->vsi[i])
4340 continue;
4341 ice_vsi_free_q_vectors(pf->vsi[i]);
4342 }
4343 ice_deinit_pf(pf);
4344 ice_devlink_destroy_regions(pf);
4345 ice_deinit_hw(&pf->hw);
4346 ice_devlink_unregister(pf);
4347
4348 /* Issue a PFR as part of the prescribed driver unload flow. Do not
4349 * do it via ice_schedule_reset() since there is no need to rebuild
4350 * and the service task is already stopped.
4351 */
4352 ice_reset(&pf->hw, ICE_RESET_PFR);
4353 pci_wait_for_pending_transaction(pdev);
4354 ice_clear_interrupt_scheme(pf);
4355 pci_disable_pcie_error_reporting(pdev);
4356 pci_disable_device(pdev);
4357}
4358
4359/**
4360 * ice_shutdown - PCI callback for shutting down device
4361 * @pdev: PCI device information struct
4362 */
4363static void ice_shutdown(struct pci_dev *pdev)
4364{
4365 struct ice_pf *pf = pci_get_drvdata(pdev);
4366
4367 ice_remove(pdev);
4368
4369 if (system_state == SYSTEM_POWER_OFF) {
4370 pci_wake_from_d3(pdev, pf->wol_ena);
4371 pci_set_power_state(pdev, PCI_D3hot);
4372 }
4373}
4374
4375#ifdef CONFIG_PM
4376/**
4377 * ice_prepare_for_shutdown - prep for PCI shutdown
4378 * @pf: board private structure
4379 *
4380 * Inform or close all dependent features in prep for PCI device shutdown
4381 */
4382static void ice_prepare_for_shutdown(struct ice_pf *pf)
4383{
4384 struct ice_hw *hw = &pf->hw;
4385 u32 v;
4386
4387 /* Notify VFs of impending reset */
4388 if (ice_check_sq_alive(hw, &hw->mailboxq))
4389 ice_vc_notify_reset(pf);
4390
4391 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4392
4393 /* disable the VSIs and their queues that are not already DOWN */
4394 ice_pf_dis_all_vsi(pf, false);
4395
4396 ice_for_each_vsi(pf, v)
4397 if (pf->vsi[v])
4398 pf->vsi[v]->vsi_num = 0;
4399
4400 ice_shutdown_all_ctrlq(hw);
4401}
4402
4403/**
4404 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4405 * @pf: board private structure to reinitialize
4406 *
4407 * This routine reinitialize interrupt scheme that was cleared during
4408 * power management suspend callback.
4409 *
4410 * This should be called during resume routine to re-allocate the q_vectors
4411 * and reacquire interrupts.
4412 */
4413static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4414{
4415 struct device *dev = ice_pf_to_dev(pf);
4416 int ret, v;
4417
4418 /* Since we clear MSIX flag during suspend, we need to
4419 * set it back during resume...
4420 */
4421
4422 ret = ice_init_interrupt_scheme(pf);
4423 if (ret) {
4424 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4425 return ret;
4426 }
4427
4428 /* Remap vectors and rings, after successful re-init interrupts */
4429 ice_for_each_vsi(pf, v) {
4430 if (!pf->vsi[v])
4431 continue;
4432
4433 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4434 if (ret)
4435 goto err_reinit;
4436 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4437 }
4438
4439 ret = ice_req_irq_msix_misc(pf);
4440 if (ret) {
4441 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4442 ret);
4443 goto err_reinit;
4444 }
4445
4446 return 0;
4447
4448err_reinit:
4449 while (v--)
4450 if (pf->vsi[v])
4451 ice_vsi_free_q_vectors(pf->vsi[v]);
4452
4453 return ret;
4454}
4455
4456/**
4457 * ice_suspend
4458 * @dev: generic device information structure
4459 *
4460 * Power Management callback to quiesce the device and prepare
4461 * for D3 transition.
4462 */
4463static int __maybe_unused ice_suspend(struct device *dev)
4464{
4465 struct pci_dev *pdev = to_pci_dev(dev);
4466 struct ice_pf *pf;
4467 int disabled, v;
4468
4469 pf = pci_get_drvdata(pdev);
4470
4471 if (!ice_pf_state_is_nominal(pf)) {
4472 dev_err(dev, "Device is not ready, no need to suspend it\n");
4473 return -EBUSY;
4474 }
4475
4476 /* Stop watchdog tasks until resume completion.
4477 * Even though it is most likely that the service task is
4478 * disabled if the device is suspended or down, the service task's
4479 * state is controlled by a different state bit, and we should
4480 * store and honor whatever state that bit is in at this point.
4481 */
4482 disabled = ice_service_task_stop(pf);
4483
4484 /* Already suspended?, then there is nothing to do */
4485 if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
4486 if (!disabled)
4487 ice_service_task_restart(pf);
4488 return 0;
4489 }
4490
4491 if (test_bit(__ICE_DOWN, pf->state) ||
4492 ice_is_reset_in_progress(pf->state)) {
4493 dev_err(dev, "can't suspend device in reset or already down\n");
4494 if (!disabled)
4495 ice_service_task_restart(pf);
4496 return 0;
4497 }
4498
4499 ice_setup_mc_magic_wake(pf);
4500
4501 ice_prepare_for_shutdown(pf);
4502
4503 ice_set_wake(pf);
4504
4505 /* Free vectors, clear the interrupt scheme and release IRQs
4506 * for proper hibernation, especially with large number of CPUs.
4507 * Otherwise hibernation might fail when mapping all the vectors back
4508 * to CPU0.
4509 */
4510 ice_free_irq_msix_misc(pf);
4511 ice_for_each_vsi(pf, v) {
4512 if (!pf->vsi[v])
4513 continue;
4514 ice_vsi_free_q_vectors(pf->vsi[v]);
4515 }
4516 ice_clear_interrupt_scheme(pf);
4517
4518 pci_save_state(pdev);
4519 pci_wake_from_d3(pdev, pf->wol_ena);
4520 pci_set_power_state(pdev, PCI_D3hot);
4521 return 0;
4522}
4523
4524/**
4525 * ice_resume - PM callback for waking up from D3
4526 * @dev: generic device information structure
4527 */
4528static int __maybe_unused ice_resume(struct device *dev)
4529{
4530 struct pci_dev *pdev = to_pci_dev(dev);
4531 enum ice_reset_req reset_type;
4532 struct ice_pf *pf;
4533 struct ice_hw *hw;
4534 int ret;
4535
4536 pci_set_power_state(pdev, PCI_D0);
4537 pci_restore_state(pdev);
4538 pci_save_state(pdev);
4539
4540 if (!pci_device_is_present(pdev))
4541 return -ENODEV;
4542
4543 ret = pci_enable_device_mem(pdev);
4544 if (ret) {
4545 dev_err(dev, "Cannot enable device after suspend\n");
4546 return ret;
4547 }
4548
4549 pf = pci_get_drvdata(pdev);
4550 hw = &pf->hw;
4551
4552 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4553 ice_print_wake_reason(pf);
4554
4555 /* We cleared the interrupt scheme when we suspended, so we need to
4556 * restore it now to resume device functionality.
4557 */
4558 ret = ice_reinit_interrupt_scheme(pf);
4559 if (ret)
4560 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4561
4562 clear_bit(__ICE_DOWN, pf->state);
4563 /* Now perform PF reset and rebuild */
4564 reset_type = ICE_RESET_PFR;
4565 /* re-enable service task for reset, but allow reset to schedule it */
4566 clear_bit(__ICE_SERVICE_DIS, pf->state);
4567
4568 if (ice_schedule_reset(pf, reset_type))
4569 dev_err(dev, "Reset during resume failed.\n");
4570
4571 clear_bit(__ICE_SUSPENDED, pf->state);
4572 ice_service_task_restart(pf);
4573
4574 /* Restart the service task */
4575 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4576
4577 return 0;
4578}
4579#endif /* CONFIG_PM */
4580
4581/**
4582 * ice_pci_err_detected - warning that PCI error has been detected
4583 * @pdev: PCI device information struct
4584 * @err: the type of PCI error
4585 *
4586 * Called to warn that something happened on the PCI bus and the error handling
4587 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
4588 */
4589static pci_ers_result_t
4590ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4591{
4592 struct ice_pf *pf = pci_get_drvdata(pdev);
4593
4594 if (!pf) {
4595 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4596 __func__, err);
4597 return PCI_ERS_RESULT_DISCONNECT;
4598 }
4599
4600 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4601 ice_service_task_stop(pf);
4602
4603 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4604 set_bit(__ICE_PFR_REQ, pf->state);
4605 ice_prepare_for_reset(pf);
4606 }
4607 }
4608
4609 return PCI_ERS_RESULT_NEED_RESET;
4610}
4611
4612/**
4613 * ice_pci_err_slot_reset - a PCI slot reset has just happened
4614 * @pdev: PCI device information struct
4615 *
4616 * Called to determine if the driver can recover from the PCI slot reset by
4617 * using a register read to determine if the device is recoverable.
4618 */
4619static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4620{
4621 struct ice_pf *pf = pci_get_drvdata(pdev);
4622 pci_ers_result_t result;
4623 int err;
4624 u32 reg;
4625
4626 err = pci_enable_device_mem(pdev);
4627 if (err) {
4628 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4629 err);
4630 result = PCI_ERS_RESULT_DISCONNECT;
4631 } else {
4632 pci_set_master(pdev);
4633 pci_restore_state(pdev);
4634 pci_save_state(pdev);
4635 pci_wake_from_d3(pdev, false);
4636
4637 /* Check for life */
4638 reg = rd32(&pf->hw, GLGEN_RTRIG);
4639 if (!reg)
4640 result = PCI_ERS_RESULT_RECOVERED;
4641 else
4642 result = PCI_ERS_RESULT_DISCONNECT;
4643 }
4644
4645 err = pci_aer_clear_nonfatal_status(pdev);
4646 if (err)
4647 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4648 err);
4649 /* non-fatal, continue */
4650
4651 return result;
4652}
4653
4654/**
4655 * ice_pci_err_resume - restart operations after PCI error recovery
4656 * @pdev: PCI device information struct
4657 *
4658 * Called to allow the driver to bring things back up after PCI error and/or
4659 * reset recovery have finished
4660 */
4661static void ice_pci_err_resume(struct pci_dev *pdev)
4662{
4663 struct ice_pf *pf = pci_get_drvdata(pdev);
4664
4665 if (!pf) {
4666 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4667 __func__);
4668 return;
4669 }
4670
4671 if (test_bit(__ICE_SUSPENDED, pf->state)) {
4672 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4673 __func__);
4674 return;
4675 }
4676
4677 ice_restore_all_vfs_msi_state(pdev);
4678
4679 ice_do_reset(pf, ICE_RESET_PFR);
4680 ice_service_task_restart(pf);
4681 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4682}
4683
4684/**
4685 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4686 * @pdev: PCI device information struct
4687 */
4688static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4689{
4690 struct ice_pf *pf = pci_get_drvdata(pdev);
4691
4692 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4693 ice_service_task_stop(pf);
4694
4695 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4696 set_bit(__ICE_PFR_REQ, pf->state);
4697 ice_prepare_for_reset(pf);
4698 }
4699 }
4700}
4701
4702/**
4703 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4704 * @pdev: PCI device information struct
4705 */
4706static void ice_pci_err_reset_done(struct pci_dev *pdev)
4707{
4708 ice_pci_err_resume(pdev);
4709}
4710
4711/* ice_pci_tbl - PCI Device ID Table
4712 *
4713 * Wildcard entries (PCI_ANY_ID) should come last
4714 * Last entry must be all 0s
4715 *
4716 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
4717 * Class, Class Mask, private data (not used) }
4718 */
4719static const struct pci_device_id ice_pci_tbl[] = {
4720 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4721 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4722 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4723 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4724 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4725 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4726 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4727 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4728 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4729 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4730 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4731 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4732 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4733 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4734 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
4735 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
4736 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
4737 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
4738 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
4739 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
4740 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
4741 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
4742 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
4743 /* required last entry */
4744 { 0, }
4745};
4746MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
4747
4748static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
4749
4750static const struct pci_error_handlers ice_pci_err_handler = {
4751 .error_detected = ice_pci_err_detected,
4752 .slot_reset = ice_pci_err_slot_reset,
4753 .reset_prepare = ice_pci_err_reset_prepare,
4754 .reset_done = ice_pci_err_reset_done,
4755 .resume = ice_pci_err_resume
4756};
4757
4758static struct pci_driver ice_driver = {
4759 .name = KBUILD_MODNAME,
4760 .id_table = ice_pci_tbl,
4761 .probe = ice_probe,
4762 .remove = ice_remove,
4763#ifdef CONFIG_PM
4764 .driver.pm = &ice_pm_ops,
4765#endif /* CONFIG_PM */
4766 .shutdown = ice_shutdown,
4767 .sriov_configure = ice_sriov_configure,
4768 .err_handler = &ice_pci_err_handler
4769};
4770
4771/**
4772 * ice_module_init - Driver registration routine
4773 *
4774 * ice_module_init is the first routine called when the driver is
4775 * loaded. All it does is register with the PCI subsystem.
4776 */
4777static int __init ice_module_init(void)
4778{
4779 int status;
4780
4781 pr_info("%s\n", ice_driver_string);
4782 pr_info("%s\n", ice_copyright);
4783
4784 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
4785 if (!ice_wq) {
4786 pr_err("Failed to create workqueue\n");
4787 return -ENOMEM;
4788 }
4789
4790 status = pci_register_driver(&ice_driver);
4791 if (status) {
4792 pr_err("failed to register PCI driver, err %d\n", status);
4793 destroy_workqueue(ice_wq);
4794 }
4795
4796 return status;
4797}
4798module_init(ice_module_init);
4799
4800/**
4801 * ice_module_exit - Driver exit cleanup routine
4802 *
4803 * ice_module_exit is called just before the driver is removed
4804 * from memory.
4805 */
4806static void __exit ice_module_exit(void)
4807{
4808 pci_unregister_driver(&ice_driver);
4809 destroy_workqueue(ice_wq);
4810 pr_info("module unloaded\n");
4811}
4812module_exit(ice_module_exit);
4813
4814/**
4815 * ice_set_mac_address - NDO callback to set MAC address
4816 * @netdev: network interface device structure
4817 * @pi: pointer to an address structure
4818 *
4819 * Returns 0 on success, negative on failure
4820 */
4821static int ice_set_mac_address(struct net_device *netdev, void *pi)
4822{
4823 struct ice_netdev_priv *np = netdev_priv(netdev);
4824 struct ice_vsi *vsi = np->vsi;
4825 struct ice_pf *pf = vsi->back;
4826 struct ice_hw *hw = &pf->hw;
4827 struct sockaddr *addr = pi;
4828 enum ice_status status;
4829 u8 flags = 0;
4830 int err = 0;
4831 u8 *mac;
4832
4833 mac = (u8 *)addr->sa_data;
4834
4835 if (!is_valid_ether_addr(mac))
4836 return -EADDRNOTAVAIL;
4837
4838 if (ether_addr_equal(netdev->dev_addr, mac)) {
4839 netdev_warn(netdev, "already using mac %pM\n", mac);
4840 return 0;
4841 }
4842
4843 if (test_bit(__ICE_DOWN, pf->state) ||
4844 ice_is_reset_in_progress(pf->state)) {
4845 netdev_err(netdev, "can't set mac %pM. device not ready\n",
4846 mac);
4847 return -EBUSY;
4848 }
4849
4850 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
4851 status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
4852 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
4853 err = -EADDRNOTAVAIL;
4854 goto err_update_filters;
4855 }
4856
4857 /* Add filter for new MAC. If filter exists, just return success */
4858 status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
4859 if (status == ICE_ERR_ALREADY_EXISTS) {
4860 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
4861 return 0;
4862 }
4863
4864 /* error if the new filter addition failed */
4865 if (status)
4866 err = -EADDRNOTAVAIL;
4867
4868err_update_filters:
4869 if (err) {
4870 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
4871 mac);
4872 return err;
4873 }
4874
4875 /* change the netdev's MAC address */
4876 memcpy(netdev->dev_addr, mac, netdev->addr_len);
4877 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
4878 netdev->dev_addr);
4879
4880 /* write new MAC address to the firmware */
4881 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4882 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
4883 if (status) {
4884 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
4885 mac, ice_stat_str(status));
4886 }
4887 return 0;
4888}
4889
4890/**
4891 * ice_set_rx_mode - NDO callback to set the netdev filters
4892 * @netdev: network interface device structure
4893 */
4894static void ice_set_rx_mode(struct net_device *netdev)
4895{
4896 struct ice_netdev_priv *np = netdev_priv(netdev);
4897 struct ice_vsi *vsi = np->vsi;
4898
4899 if (!vsi)
4900 return;
4901
4902 /* Set the flags to synchronize filters
4903 * ndo_set_rx_mode may be triggered even without a change in netdev
4904 * flags
4905 */
4906 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
4907 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
4908 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
4909
4910 /* schedule our worker thread which will take care of
4911 * applying the new filter changes
4912 */
4913 ice_service_task_schedule(vsi->back);
4914}
4915
4916/**
4917 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
4918 * @netdev: network interface device structure
4919 * @queue_index: Queue ID
4920 * @maxrate: maximum bandwidth in Mbps
4921 */
4922static int
4923ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
4924{
4925 struct ice_netdev_priv *np = netdev_priv(netdev);
4926 struct ice_vsi *vsi = np->vsi;
4927 enum ice_status status;
4928 u16 q_handle;
4929 u8 tc;
4930
4931 /* Validate maxrate requested is within permitted range */
4932 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
4933 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
4934 maxrate, queue_index);
4935 return -EINVAL;
4936 }
4937
4938 q_handle = vsi->tx_rings[queue_index]->q_handle;
4939 tc = ice_dcb_get_tc(vsi, queue_index);
4940
4941 /* Set BW back to default, when user set maxrate to 0 */
4942 if (!maxrate)
4943 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
4944 q_handle, ICE_MAX_BW);
4945 else
4946 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
4947 q_handle, ICE_MAX_BW, maxrate * 1000);
4948 if (status) {
4949 netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
4950 ice_stat_str(status));
4951 return -EIO;
4952 }
4953
4954 return 0;
4955}
4956
4957/**
4958 * ice_fdb_add - add an entry to the hardware database
4959 * @ndm: the input from the stack
4960 * @tb: pointer to array of nladdr (unused)
4961 * @dev: the net device pointer
4962 * @addr: the MAC address entry being added
4963 * @vid: VLAN ID
4964 * @flags: instructions from stack about fdb operation
4965 * @extack: netlink extended ack
4966 */
4967static int
4968ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
4969 struct net_device *dev, const unsigned char *addr, u16 vid,
4970 u16 flags, struct netlink_ext_ack __always_unused *extack)
4971{
4972 int err;
4973
4974 if (vid) {
4975 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
4976 return -EINVAL;
4977 }
4978 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4979 netdev_err(dev, "FDB only supports static addresses\n");
4980 return -EINVAL;
4981 }
4982
4983 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4984 err = dev_uc_add_excl(dev, addr);
4985 else if (is_multicast_ether_addr(addr))
4986 err = dev_mc_add_excl(dev, addr);
4987 else
4988 err = -EINVAL;
4989
4990 /* Only return duplicate errors if NLM_F_EXCL is set */
4991 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4992 err = 0;
4993
4994 return err;
4995}
4996
4997/**
4998 * ice_fdb_del - delete an entry from the hardware database
4999 * @ndm: the input from the stack
5000 * @tb: pointer to array of nladdr (unused)
5001 * @dev: the net device pointer
5002 * @addr: the MAC address entry being added
5003 * @vid: VLAN ID
5004 */
5005static int
5006ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5007 struct net_device *dev, const unsigned char *addr,
5008 __always_unused u16 vid)
5009{
5010 int err;
5011
5012 if (ndm->ndm_state & NUD_PERMANENT) {
5013 netdev_err(dev, "FDB only supports static addresses\n");
5014 return -EINVAL;
5015 }
5016
5017 if (is_unicast_ether_addr(addr))
5018 err = dev_uc_del(dev, addr);
5019 else if (is_multicast_ether_addr(addr))
5020 err = dev_mc_del(dev, addr);
5021 else
5022 err = -EINVAL;
5023
5024 return err;
5025}
5026
5027/**
5028 * ice_set_features - set the netdev feature flags
5029 * @netdev: ptr to the netdev being adjusted
5030 * @features: the feature set that the stack is suggesting
5031 */
5032static int
5033ice_set_features(struct net_device *netdev, netdev_features_t features)
5034{
5035 struct ice_netdev_priv *np = netdev_priv(netdev);
5036 struct ice_vsi *vsi = np->vsi;
5037 struct ice_pf *pf = vsi->back;
5038 int ret = 0;
5039
5040 /* Don't set any netdev advanced features with device in Safe Mode */
5041 if (ice_is_safe_mode(vsi->back)) {
5042 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5043 return ret;
5044 }
5045
5046 /* Do not change setting during reset */
5047 if (ice_is_reset_in_progress(pf->state)) {
5048 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5049 return -EBUSY;
5050 }
5051
5052 /* Multiple features can be changed in one call so keep features in
5053 * separate if/else statements to guarantee each feature is checked
5054 */
5055 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5056 ret = ice_vsi_manage_rss_lut(vsi, true);
5057 else if (!(features & NETIF_F_RXHASH) &&
5058 netdev->features & NETIF_F_RXHASH)
5059 ret = ice_vsi_manage_rss_lut(vsi, false);
5060
5061 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5062 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5063 ret = ice_vsi_manage_vlan_stripping(vsi, true);
5064 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5065 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5066 ret = ice_vsi_manage_vlan_stripping(vsi, false);
5067
5068 if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5069 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5070 ret = ice_vsi_manage_vlan_insertion(vsi);
5071 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5072 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5073 ret = ice_vsi_manage_vlan_insertion(vsi);
5074
5075 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5076 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5077 ret = ice_cfg_vlan_pruning(vsi, true, false);
5078 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5079 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5080 ret = ice_cfg_vlan_pruning(vsi, false, false);
5081
5082 if ((features & NETIF_F_NTUPLE) &&
5083 !(netdev->features & NETIF_F_NTUPLE)) {
5084 ice_vsi_manage_fdir(vsi, true);
5085 ice_init_arfs(vsi);
5086 } else if (!(features & NETIF_F_NTUPLE) &&
5087 (netdev->features & NETIF_F_NTUPLE)) {
5088 ice_vsi_manage_fdir(vsi, false);
5089 ice_clear_arfs(vsi);
5090 }
5091
5092 return ret;
5093}
5094
5095/**
5096 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5097 * @vsi: VSI to setup VLAN properties for
5098 */
5099static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5100{
5101 int ret = 0;
5102
5103 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5104 ret = ice_vsi_manage_vlan_stripping(vsi, true);
5105 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5106 ret = ice_vsi_manage_vlan_insertion(vsi);
5107
5108 return ret;
5109}
5110
5111/**
5112 * ice_vsi_cfg - Setup the VSI
5113 * @vsi: the VSI being configured
5114 *
5115 * Return 0 on success and negative value on error
5116 */
5117int ice_vsi_cfg(struct ice_vsi *vsi)
5118{
5119 int err;
5120
5121 if (vsi->netdev) {
5122 ice_set_rx_mode(vsi->netdev);
5123
5124 err = ice_vsi_vlan_setup(vsi);
5125
5126 if (err)
5127 return err;
5128 }
5129 ice_vsi_cfg_dcb_rings(vsi);
5130
5131 err = ice_vsi_cfg_lan_txqs(vsi);
5132 if (!err && ice_is_xdp_ena_vsi(vsi))
5133 err = ice_vsi_cfg_xdp_txqs(vsi);
5134 if (!err)
5135 err = ice_vsi_cfg_rxqs(vsi);
5136
5137 return err;
5138}
5139
5140/**
5141 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5142 * @vsi: the VSI being configured
5143 */
5144static void ice_napi_enable_all(struct ice_vsi *vsi)
5145{
5146 int q_idx;
5147
5148 if (!vsi->netdev)
5149 return;
5150
5151 ice_for_each_q_vector(vsi, q_idx) {
5152 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5153
5154 if (q_vector->rx.ring || q_vector->tx.ring)
5155 napi_enable(&q_vector->napi);
5156 }
5157}
5158
5159/**
5160 * ice_up_complete - Finish the last steps of bringing up a connection
5161 * @vsi: The VSI being configured
5162 *
5163 * Return 0 on success and negative value on error
5164 */
5165static int ice_up_complete(struct ice_vsi *vsi)
5166{
5167 struct ice_pf *pf = vsi->back;
5168 int err;
5169
5170 ice_vsi_cfg_msix(vsi);
5171
5172 /* Enable only Rx rings, Tx rings were enabled by the FW when the
5173 * Tx queue group list was configured and the context bits were
5174 * programmed using ice_vsi_cfg_txqs
5175 */
5176 err = ice_vsi_start_all_rx_rings(vsi);
5177 if (err)
5178 return err;
5179
5180 clear_bit(__ICE_DOWN, vsi->state);
5181 ice_napi_enable_all(vsi);
5182 ice_vsi_ena_irq(vsi);
5183
5184 if (vsi->port_info &&
5185 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5186 vsi->netdev) {
5187 ice_print_link_msg(vsi, true);
5188 netif_tx_start_all_queues(vsi->netdev);
5189 netif_carrier_on(vsi->netdev);
5190 }
5191
5192 ice_service_task_schedule(pf);
5193
5194 return 0;
5195}
5196
5197/**
5198 * ice_up - Bring the connection back up after being down
5199 * @vsi: VSI being configured
5200 */
5201int ice_up(struct ice_vsi *vsi)
5202{
5203 int err;
5204
5205 err = ice_vsi_cfg(vsi);
5206 if (!err)
5207 err = ice_up_complete(vsi);
5208
5209 return err;
5210}
5211
5212/**
5213 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5214 * @ring: Tx or Rx ring to read stats from
5215 * @pkts: packets stats counter
5216 * @bytes: bytes stats counter
5217 *
5218 * This function fetches stats from the ring considering the atomic operations
5219 * that needs to be performed to read u64 values in 32 bit machine.
5220 */
5221static void
5222ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5223{
5224 unsigned int start;
5225 *pkts = 0;
5226 *bytes = 0;
5227
5228 if (!ring)
5229 return;
5230 do {
5231 start = u64_stats_fetch_begin_irq(&ring->syncp);
5232 *pkts = ring->stats.pkts;
5233 *bytes = ring->stats.bytes;
5234 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5235}
5236
5237/**
5238 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5239 * @vsi: the VSI to be updated
5240 * @rings: rings to work on
5241 * @count: number of rings
5242 */
5243static void
5244ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5245 u16 count)
5246{
5247 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5248 u16 i;
5249
5250 for (i = 0; i < count; i++) {
5251 struct ice_ring *ring;
5252 u64 pkts, bytes;
5253
5254 ring = READ_ONCE(rings[i]);
5255 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5256 vsi_stats->tx_packets += pkts;
5257 vsi_stats->tx_bytes += bytes;
5258 vsi->tx_restart += ring->tx_stats.restart_q;
5259 vsi->tx_busy += ring->tx_stats.tx_busy;
5260 vsi->tx_linearize += ring->tx_stats.tx_linearize;
5261 }
5262}
5263
5264/**
5265 * ice_update_vsi_ring_stats - Update VSI stats counters
5266 * @vsi: the VSI to be updated
5267 */
5268static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5269{
5270 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5271 struct ice_ring *ring;
5272 u64 pkts, bytes;
5273 int i;
5274
5275 /* reset netdev stats */
5276 vsi_stats->tx_packets = 0;
5277 vsi_stats->tx_bytes = 0;
5278 vsi_stats->rx_packets = 0;
5279 vsi_stats->rx_bytes = 0;
5280
5281 /* reset non-netdev (extended) stats */
5282 vsi->tx_restart = 0;
5283 vsi->tx_busy = 0;
5284 vsi->tx_linearize = 0;
5285 vsi->rx_buf_failed = 0;
5286 vsi->rx_page_failed = 0;
5287 vsi->rx_gro_dropped = 0;
5288
5289 rcu_read_lock();
5290
5291 /* update Tx rings counters */
5292 ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5293
5294 /* update Rx rings counters */
5295 ice_for_each_rxq(vsi, i) {
5296 ring = READ_ONCE(vsi->rx_rings[i]);
5297 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5298 vsi_stats->rx_packets += pkts;
5299 vsi_stats->rx_bytes += bytes;
5300 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5301 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5302 vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
5303 }
5304
5305 /* update XDP Tx rings counters */
5306 if (ice_is_xdp_ena_vsi(vsi))
5307 ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5308 vsi->num_xdp_txq);
5309
5310 rcu_read_unlock();
5311}
5312
5313/**
5314 * ice_update_vsi_stats - Update VSI stats counters
5315 * @vsi: the VSI to be updated
5316 */
5317void ice_update_vsi_stats(struct ice_vsi *vsi)
5318{
5319 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5320 struct ice_eth_stats *cur_es = &vsi->eth_stats;
5321 struct ice_pf *pf = vsi->back;
5322
5323 if (test_bit(__ICE_DOWN, vsi->state) ||
5324 test_bit(__ICE_CFG_BUSY, pf->state))
5325 return;
5326
5327 /* get stats as recorded by Tx/Rx rings */
5328 ice_update_vsi_ring_stats(vsi);
5329
5330 /* get VSI stats as recorded by the hardware */
5331 ice_update_eth_stats(vsi);
5332
5333 cur_ns->tx_errors = cur_es->tx_errors;
5334 cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
5335 cur_ns->tx_dropped = cur_es->tx_discards;
5336 cur_ns->multicast = cur_es->rx_multicast;
5337
5338 /* update some more netdev stats if this is main VSI */
5339 if (vsi->type == ICE_VSI_PF) {
5340 cur_ns->rx_crc_errors = pf->stats.crc_errors;
5341 cur_ns->rx_errors = pf->stats.crc_errors +
5342 pf->stats.illegal_bytes +
5343 pf->stats.rx_len_errors +
5344 pf->stats.rx_undersize +
5345 pf->hw_csum_rx_error +
5346 pf->stats.rx_jabber +
5347 pf->stats.rx_fragments +
5348 pf->stats.rx_oversize;
5349 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5350 /* record drops from the port level */
5351 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5352 }
5353}
5354
5355/**
5356 * ice_update_pf_stats - Update PF port stats counters
5357 * @pf: PF whose stats needs to be updated
5358 */
5359void ice_update_pf_stats(struct ice_pf *pf)
5360{
5361 struct ice_hw_port_stats *prev_ps, *cur_ps;
5362 struct ice_hw *hw = &pf->hw;
5363 u16 fd_ctr_base;
5364 u8 port;
5365
5366 port = hw->port_info->lport;
5367 prev_ps = &pf->stats_prev;
5368 cur_ps = &pf->stats;
5369
5370 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5371 &prev_ps->eth.rx_bytes,
5372 &cur_ps->eth.rx_bytes);
5373
5374 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5375 &prev_ps->eth.rx_unicast,
5376 &cur_ps->eth.rx_unicast);
5377
5378 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5379 &prev_ps->eth.rx_multicast,
5380 &cur_ps->eth.rx_multicast);
5381
5382 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5383 &prev_ps->eth.rx_broadcast,
5384 &cur_ps->eth.rx_broadcast);
5385
5386 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5387 &prev_ps->eth.rx_discards,
5388 &cur_ps->eth.rx_discards);
5389
5390 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5391 &prev_ps->eth.tx_bytes,
5392 &cur_ps->eth.tx_bytes);
5393
5394 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5395 &prev_ps->eth.tx_unicast,
5396 &cur_ps->eth.tx_unicast);
5397
5398 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5399 &prev_ps->eth.tx_multicast,
5400 &cur_ps->eth.tx_multicast);
5401
5402 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5403 &prev_ps->eth.tx_broadcast,
5404 &cur_ps->eth.tx_broadcast);
5405
5406 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5407 &prev_ps->tx_dropped_link_down,
5408 &cur_ps->tx_dropped_link_down);
5409
5410 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5411 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5412
5413 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5414 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5415
5416 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5417 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5418
5419 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5420 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5421
5422 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5423 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5424
5425 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5426 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5427
5428 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5429 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5430
5431 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5432 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5433
5434 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5435 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5436
5437 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5438 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5439
5440 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5441 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5442
5443 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5444 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5445
5446 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5447 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5448
5449 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5450 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5451
5452 fd_ctr_base = hw->fd_ctr_base;
5453
5454 ice_stat_update40(hw,
5455 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5456 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5457 &cur_ps->fd_sb_match);
5458 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5459 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5460
5461 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5462 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5463
5464 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5465 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5466
5467 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5468 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5469
5470 ice_update_dcb_stats(pf);
5471
5472 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5473 &prev_ps->crc_errors, &cur_ps->crc_errors);
5474
5475 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5476 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5477
5478 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5479 &prev_ps->mac_local_faults,
5480 &cur_ps->mac_local_faults);
5481
5482 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5483 &prev_ps->mac_remote_faults,
5484 &cur_ps->mac_remote_faults);
5485
5486 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5487 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5488
5489 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5490 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5491
5492 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5493 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5494
5495 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5496 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5497
5498 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5499 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5500
5501 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5502
5503 pf->stat_prev_loaded = true;
5504}
5505
5506/**
5507 * ice_get_stats64 - get statistics for network device structure
5508 * @netdev: network interface device structure
5509 * @stats: main device statistics structure
5510 */
5511static
5512void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5513{
5514 struct ice_netdev_priv *np = netdev_priv(netdev);
5515 struct rtnl_link_stats64 *vsi_stats;
5516 struct ice_vsi *vsi = np->vsi;
5517
5518 vsi_stats = &vsi->net_stats;
5519
5520 if (!vsi->num_txq || !vsi->num_rxq)
5521 return;
5522
5523 /* netdev packet/byte stats come from ring counter. These are obtained
5524 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5525 * But, only call the update routine and read the registers if VSI is
5526 * not down.
5527 */
5528 if (!test_bit(__ICE_DOWN, vsi->state))
5529 ice_update_vsi_ring_stats(vsi);
5530 stats->tx_packets = vsi_stats->tx_packets;
5531 stats->tx_bytes = vsi_stats->tx_bytes;
5532 stats->rx_packets = vsi_stats->rx_packets;
5533 stats->rx_bytes = vsi_stats->rx_bytes;
5534
5535 /* The rest of the stats can be read from the hardware but instead we
5536 * just return values that the watchdog task has already obtained from
5537 * the hardware.
5538 */
5539 stats->multicast = vsi_stats->multicast;
5540 stats->tx_errors = vsi_stats->tx_errors;
5541 stats->tx_dropped = vsi_stats->tx_dropped;
5542 stats->rx_errors = vsi_stats->rx_errors;
5543 stats->rx_dropped = vsi_stats->rx_dropped;
5544 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5545 stats->rx_length_errors = vsi_stats->rx_length_errors;
5546}
5547
5548/**
5549 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5550 * @vsi: VSI having NAPI disabled
5551 */
5552static void ice_napi_disable_all(struct ice_vsi *vsi)
5553{
5554 int q_idx;
5555
5556 if (!vsi->netdev)
5557 return;
5558
5559 ice_for_each_q_vector(vsi, q_idx) {
5560 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5561
5562 if (q_vector->rx.ring || q_vector->tx.ring)
5563 napi_disable(&q_vector->napi);
5564 }
5565}
5566
5567/**
5568 * ice_down - Shutdown the connection
5569 * @vsi: The VSI being stopped
5570 */
5571int ice_down(struct ice_vsi *vsi)
5572{
5573 int i, tx_err, rx_err, link_err = 0;
5574
5575 /* Caller of this function is expected to set the
5576 * vsi->state __ICE_DOWN bit
5577 */
5578 if (vsi->netdev) {
5579 netif_carrier_off(vsi->netdev);
5580 netif_tx_disable(vsi->netdev);
5581 }
5582
5583 ice_vsi_dis_irq(vsi);
5584
5585 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5586 if (tx_err)
5587 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5588 vsi->vsi_num, tx_err);
5589 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5590 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5591 if (tx_err)
5592 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5593 vsi->vsi_num, tx_err);
5594 }
5595
5596 rx_err = ice_vsi_stop_all_rx_rings(vsi);
5597 if (rx_err)
5598 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5599 vsi->vsi_num, rx_err);
5600
5601 ice_napi_disable_all(vsi);
5602
5603 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5604 link_err = ice_force_phys_link_state(vsi, false);
5605 if (link_err)
5606 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5607 vsi->vsi_num, link_err);
5608 }
5609
5610 ice_for_each_txq(vsi, i)
5611 ice_clean_tx_ring(vsi->tx_rings[i]);
5612
5613 ice_for_each_rxq(vsi, i)
5614 ice_clean_rx_ring(vsi->rx_rings[i]);
5615
5616 if (tx_err || rx_err || link_err) {
5617 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5618 vsi->vsi_num, vsi->vsw->sw_id);
5619 return -EIO;
5620 }
5621
5622 return 0;
5623}
5624
5625/**
5626 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5627 * @vsi: VSI having resources allocated
5628 *
5629 * Return 0 on success, negative on failure
5630 */
5631int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5632{
5633 int i, err = 0;
5634
5635 if (!vsi->num_txq) {
5636 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
5637 vsi->vsi_num);
5638 return -EINVAL;
5639 }
5640
5641 ice_for_each_txq(vsi, i) {
5642 struct ice_ring *ring = vsi->tx_rings[i];
5643
5644 if (!ring)
5645 return -EINVAL;
5646
5647 ring->netdev = vsi->netdev;
5648 err = ice_setup_tx_ring(ring);
5649 if (err)
5650 break;
5651 }
5652
5653 return err;
5654}
5655
5656/**
5657 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5658 * @vsi: VSI having resources allocated
5659 *
5660 * Return 0 on success, negative on failure
5661 */
5662int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5663{
5664 int i, err = 0;
5665
5666 if (!vsi->num_rxq) {
5667 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
5668 vsi->vsi_num);
5669 return -EINVAL;
5670 }
5671
5672 ice_for_each_rxq(vsi, i) {
5673 struct ice_ring *ring = vsi->rx_rings[i];
5674
5675 if (!ring)
5676 return -EINVAL;
5677
5678 ring->netdev = vsi->netdev;
5679 err = ice_setup_rx_ring(ring);
5680 if (err)
5681 break;
5682 }
5683
5684 return err;
5685}
5686
5687/**
5688 * ice_vsi_open_ctrl - open control VSI for use
5689 * @vsi: the VSI to open
5690 *
5691 * Initialization of the Control VSI
5692 *
5693 * Returns 0 on success, negative value on error
5694 */
5695int ice_vsi_open_ctrl(struct ice_vsi *vsi)
5696{
5697 char int_name[ICE_INT_NAME_STR_LEN];
5698 struct ice_pf *pf = vsi->back;
5699 struct device *dev;
5700 int err;
5701
5702 dev = ice_pf_to_dev(pf);
5703 /* allocate descriptors */
5704 err = ice_vsi_setup_tx_rings(vsi);
5705 if (err)
5706 goto err_setup_tx;
5707
5708 err = ice_vsi_setup_rx_rings(vsi);
5709 if (err)
5710 goto err_setup_rx;
5711
5712 err = ice_vsi_cfg(vsi);
5713 if (err)
5714 goto err_setup_rx;
5715
5716 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
5717 dev_driver_string(dev), dev_name(dev));
5718 err = ice_vsi_req_irq_msix(vsi, int_name);
5719 if (err)
5720 goto err_setup_rx;
5721
5722 ice_vsi_cfg_msix(vsi);
5723
5724 err = ice_vsi_start_all_rx_rings(vsi);
5725 if (err)
5726 goto err_up_complete;
5727
5728 clear_bit(__ICE_DOWN, vsi->state);
5729 ice_vsi_ena_irq(vsi);
5730
5731 return 0;
5732
5733err_up_complete:
5734 ice_down(vsi);
5735err_setup_rx:
5736 ice_vsi_free_rx_rings(vsi);
5737err_setup_tx:
5738 ice_vsi_free_tx_rings(vsi);
5739
5740 return err;
5741}
5742
5743/**
5744 * ice_vsi_open - Called when a network interface is made active
5745 * @vsi: the VSI to open
5746 *
5747 * Initialization of the VSI
5748 *
5749 * Returns 0 on success, negative value on error
5750 */
5751static int ice_vsi_open(struct ice_vsi *vsi)
5752{
5753 char int_name[ICE_INT_NAME_STR_LEN];
5754 struct ice_pf *pf = vsi->back;
5755 int err;
5756
5757 /* allocate descriptors */
5758 err = ice_vsi_setup_tx_rings(vsi);
5759 if (err)
5760 goto err_setup_tx;
5761
5762 err = ice_vsi_setup_rx_rings(vsi);
5763 if (err)
5764 goto err_setup_rx;
5765
5766 err = ice_vsi_cfg(vsi);
5767 if (err)
5768 goto err_setup_rx;
5769
5770 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5771 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
5772 err = ice_vsi_req_irq_msix(vsi, int_name);
5773 if (err)
5774 goto err_setup_rx;
5775
5776 /* Notify the stack of the actual queue counts. */
5777 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5778 if (err)
5779 goto err_set_qs;
5780
5781 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5782 if (err)
5783 goto err_set_qs;
5784
5785 err = ice_up_complete(vsi);
5786 if (err)
5787 goto err_up_complete;
5788
5789 return 0;
5790
5791err_up_complete:
5792 ice_down(vsi);
5793err_set_qs:
5794 ice_vsi_free_irq(vsi);
5795err_setup_rx:
5796 ice_vsi_free_rx_rings(vsi);
5797err_setup_tx:
5798 ice_vsi_free_tx_rings(vsi);
5799
5800 return err;
5801}
5802
5803/**
5804 * ice_vsi_release_all - Delete all VSIs
5805 * @pf: PF from which all VSIs are being removed
5806 */
5807static void ice_vsi_release_all(struct ice_pf *pf)
5808{
5809 int err, i;
5810
5811 if (!pf->vsi)
5812 return;
5813
5814 ice_for_each_vsi(pf, i) {
5815 if (!pf->vsi[i])
5816 continue;
5817
5818 err = ice_vsi_release(pf->vsi[i]);
5819 if (err)
5820 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
5821 i, err, pf->vsi[i]->vsi_num);
5822 }
5823}
5824
5825/**
5826 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
5827 * @pf: pointer to the PF instance
5828 * @type: VSI type to rebuild
5829 *
5830 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
5831 */
5832static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
5833{
5834 struct device *dev = ice_pf_to_dev(pf);
5835 enum ice_status status;
5836 int i, err;
5837
5838 ice_for_each_vsi(pf, i) {
5839 struct ice_vsi *vsi = pf->vsi[i];
5840
5841 if (!vsi || vsi->type != type)
5842 continue;
5843
5844 /* rebuild the VSI */
5845 err = ice_vsi_rebuild(vsi, true);
5846 if (err) {
5847 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
5848 err, vsi->idx, ice_vsi_type_str(type));
5849 return err;
5850 }
5851
5852 /* replay filters for the VSI */
5853 status = ice_replay_vsi(&pf->hw, vsi->idx);
5854 if (status) {
5855 dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
5856 ice_stat_str(status), vsi->idx,
5857 ice_vsi_type_str(type));
5858 return -EIO;
5859 }
5860
5861 /* Re-map HW VSI number, using VSI handle that has been
5862 * previously validated in ice_replay_vsi() call above
5863 */
5864 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
5865
5866 /* enable the VSI */
5867 err = ice_ena_vsi(vsi, false);
5868 if (err) {
5869 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
5870 err, vsi->idx, ice_vsi_type_str(type));
5871 return err;
5872 }
5873
5874 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
5875 ice_vsi_type_str(type));
5876 }
5877
5878 return 0;
5879}
5880
5881/**
5882 * ice_update_pf_netdev_link - Update PF netdev link status
5883 * @pf: pointer to the PF instance
5884 */
5885static void ice_update_pf_netdev_link(struct ice_pf *pf)
5886{
5887 bool link_up;
5888 int i;
5889
5890 ice_for_each_vsi(pf, i) {
5891 struct ice_vsi *vsi = pf->vsi[i];
5892
5893 if (!vsi || vsi->type != ICE_VSI_PF)
5894 return;
5895
5896 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
5897 if (link_up) {
5898 netif_carrier_on(pf->vsi[i]->netdev);
5899 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
5900 } else {
5901 netif_carrier_off(pf->vsi[i]->netdev);
5902 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
5903 }
5904 }
5905}
5906
5907/**
5908 * ice_rebuild - rebuild after reset
5909 * @pf: PF to rebuild
5910 * @reset_type: type of reset
5911 *
5912 * Do not rebuild VF VSI in this flow because that is already handled via
5913 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
5914 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
5915 * to reset/rebuild all the VF VSI twice.
5916 */
5917static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
5918{
5919 struct device *dev = ice_pf_to_dev(pf);
5920 struct ice_hw *hw = &pf->hw;
5921 enum ice_status ret;
5922 int err;
5923
5924 if (test_bit(__ICE_DOWN, pf->state))
5925 goto clear_recovery;
5926
5927 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
5928
5929 ret = ice_init_all_ctrlq(hw);
5930 if (ret) {
5931 dev_err(dev, "control queues init failed %s\n",
5932 ice_stat_str(ret));
5933 goto err_init_ctrlq;
5934 }
5935
5936 /* if DDP was previously loaded successfully */
5937 if (!ice_is_safe_mode(pf)) {
5938 /* reload the SW DB of filter tables */
5939 if (reset_type == ICE_RESET_PFR)
5940 ice_fill_blk_tbls(hw);
5941 else
5942 /* Reload DDP Package after CORER/GLOBR reset */
5943 ice_load_pkg(NULL, pf);
5944 }
5945
5946 ret = ice_clear_pf_cfg(hw);
5947 if (ret) {
5948 dev_err(dev, "clear PF configuration failed %s\n",
5949 ice_stat_str(ret));
5950 goto err_init_ctrlq;
5951 }
5952
5953 if (pf->first_sw->dflt_vsi_ena)
5954 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
5955 /* clear the default VSI configuration if it exists */
5956 pf->first_sw->dflt_vsi = NULL;
5957 pf->first_sw->dflt_vsi_ena = false;
5958
5959 ice_clear_pxe_mode(hw);
5960
5961 ret = ice_get_caps(hw);
5962 if (ret) {
5963 dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
5964 goto err_init_ctrlq;
5965 }
5966
5967 ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
5968 if (ret) {
5969 dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
5970 goto err_init_ctrlq;
5971 }
5972
5973 err = ice_sched_init_port(hw->port_info);
5974 if (err)
5975 goto err_sched_init_port;
5976
5977 /* start misc vector */
5978 err = ice_req_irq_msix_misc(pf);
5979 if (err) {
5980 dev_err(dev, "misc vector setup failed: %d\n", err);
5981 goto err_sched_init_port;
5982 }
5983
5984 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
5985 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
5986 if (!rd32(hw, PFQF_FD_SIZE)) {
5987 u16 unused, guar, b_effort;
5988
5989 guar = hw->func_caps.fd_fltr_guar;
5990 b_effort = hw->func_caps.fd_fltr_best_effort;
5991
5992 /* force guaranteed filter pool for PF */
5993 ice_alloc_fd_guar_item(hw, &unused, guar);
5994 /* force shared filter pool for PF */
5995 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
5996 }
5997 }
5998
5999 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6000 ice_dcb_rebuild(pf);
6001
6002 /* rebuild PF VSI */
6003 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6004 if (err) {
6005 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6006 goto err_vsi_rebuild;
6007 }
6008
6009 /* If Flow Director is active */
6010 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6011 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6012 if (err) {
6013 dev_err(dev, "control VSI rebuild failed: %d\n", err);
6014 goto err_vsi_rebuild;
6015 }
6016
6017 /* replay HW Flow Director recipes */
6018 if (hw->fdir_prof)
6019 ice_fdir_replay_flows(hw);
6020
6021 /* replay Flow Director filters */
6022 ice_fdir_replay_fltrs(pf);
6023
6024 ice_rebuild_arfs(pf);
6025 }
6026
6027 ice_update_pf_netdev_link(pf);
6028
6029 /* tell the firmware we are up */
6030 ret = ice_send_version(pf);
6031 if (ret) {
6032 dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6033 ice_stat_str(ret));
6034 goto err_vsi_rebuild;
6035 }
6036
6037 ice_replay_post(hw);
6038
6039 /* if we get here, reset flow is successful */
6040 clear_bit(__ICE_RESET_FAILED, pf->state);
6041 return;
6042
6043err_vsi_rebuild:
6044err_sched_init_port:
6045 ice_sched_cleanup_all(hw);
6046err_init_ctrlq:
6047 ice_shutdown_all_ctrlq(hw);
6048 set_bit(__ICE_RESET_FAILED, pf->state);
6049clear_recovery:
6050 /* set this bit in PF state to control service task scheduling */
6051 set_bit(__ICE_NEEDS_RESTART, pf->state);
6052 dev_err(dev, "Rebuild failed, unload and reload driver\n");
6053}
6054
6055/**
6056 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6057 * @vsi: Pointer to VSI structure
6058 */
6059static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6060{
6061 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6062 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6063 else
6064 return ICE_RXBUF_3072;
6065}
6066
6067/**
6068 * ice_change_mtu - NDO callback to change the MTU
6069 * @netdev: network interface device structure
6070 * @new_mtu: new value for maximum frame size
6071 *
6072 * Returns 0 on success, negative on failure
6073 */
6074static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6075{
6076 struct ice_netdev_priv *np = netdev_priv(netdev);
6077 struct ice_vsi *vsi = np->vsi;
6078 struct ice_pf *pf = vsi->back;
6079 u8 count = 0;
6080
6081 if (new_mtu == (int)netdev->mtu) {
6082 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6083 return 0;
6084 }
6085
6086 if (ice_is_xdp_ena_vsi(vsi)) {
6087 int frame_size = ice_max_xdp_frame_size(vsi);
6088
6089 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6090 netdev_err(netdev, "max MTU for XDP usage is %d\n",
6091 frame_size - ICE_ETH_PKT_HDR_PAD);
6092 return -EINVAL;
6093 }
6094 }
6095
6096 if (new_mtu < (int)netdev->min_mtu) {
6097 netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
6098 netdev->min_mtu);
6099 return -EINVAL;
6100 } else if (new_mtu > (int)netdev->max_mtu) {
6101 netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
6102 netdev->min_mtu);
6103 return -EINVAL;
6104 }
6105 /* if a reset is in progress, wait for some time for it to complete */
6106 do {
6107 if (ice_is_reset_in_progress(pf->state)) {
6108 count++;
6109 usleep_range(1000, 2000);
6110 } else {
6111 break;
6112 }
6113
6114 } while (count < 100);
6115
6116 if (count == 100) {
6117 netdev_err(netdev, "can't change MTU. Device is busy\n");
6118 return -EBUSY;
6119 }
6120
6121 netdev->mtu = (unsigned int)new_mtu;
6122
6123 /* if VSI is up, bring it down and then back up */
6124 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
6125 int err;
6126
6127 err = ice_down(vsi);
6128 if (err) {
6129 netdev_err(netdev, "change MTU if_up err %d\n", err);
6130 return err;
6131 }
6132
6133 err = ice_up(vsi);
6134 if (err) {
6135 netdev_err(netdev, "change MTU if_up err %d\n", err);
6136 return err;
6137 }
6138 }
6139
6140 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6141 return 0;
6142}
6143
6144/**
6145 * ice_aq_str - convert AQ err code to a string
6146 * @aq_err: the AQ error code to convert
6147 */
6148const char *ice_aq_str(enum ice_aq_err aq_err)
6149{
6150 switch (aq_err) {
6151 case ICE_AQ_RC_OK:
6152 return "OK";
6153 case ICE_AQ_RC_EPERM:
6154 return "ICE_AQ_RC_EPERM";
6155 case ICE_AQ_RC_ENOENT:
6156 return "ICE_AQ_RC_ENOENT";
6157 case ICE_AQ_RC_ENOMEM:
6158 return "ICE_AQ_RC_ENOMEM";
6159 case ICE_AQ_RC_EBUSY:
6160 return "ICE_AQ_RC_EBUSY";
6161 case ICE_AQ_RC_EEXIST:
6162 return "ICE_AQ_RC_EEXIST";
6163 case ICE_AQ_RC_EINVAL:
6164 return "ICE_AQ_RC_EINVAL";
6165 case ICE_AQ_RC_ENOSPC:
6166 return "ICE_AQ_RC_ENOSPC";
6167 case ICE_AQ_RC_ENOSYS:
6168 return "ICE_AQ_RC_ENOSYS";
6169 case ICE_AQ_RC_EMODE:
6170 return "ICE_AQ_RC_EMODE";
6171 case ICE_AQ_RC_ENOSEC:
6172 return "ICE_AQ_RC_ENOSEC";
6173 case ICE_AQ_RC_EBADSIG:
6174 return "ICE_AQ_RC_EBADSIG";
6175 case ICE_AQ_RC_ESVN:
6176 return "ICE_AQ_RC_ESVN";
6177 case ICE_AQ_RC_EBADMAN:
6178 return "ICE_AQ_RC_EBADMAN";
6179 case ICE_AQ_RC_EBADBUF:
6180 return "ICE_AQ_RC_EBADBUF";
6181 }
6182
6183 return "ICE_AQ_RC_UNKNOWN";
6184}
6185
6186/**
6187 * ice_stat_str - convert status err code to a string
6188 * @stat_err: the status error code to convert
6189 */
6190const char *ice_stat_str(enum ice_status stat_err)
6191{
6192 switch (stat_err) {
6193 case ICE_SUCCESS:
6194 return "OK";
6195 case ICE_ERR_PARAM:
6196 return "ICE_ERR_PARAM";
6197 case ICE_ERR_NOT_IMPL:
6198 return "ICE_ERR_NOT_IMPL";
6199 case ICE_ERR_NOT_READY:
6200 return "ICE_ERR_NOT_READY";
6201 case ICE_ERR_NOT_SUPPORTED:
6202 return "ICE_ERR_NOT_SUPPORTED";
6203 case ICE_ERR_BAD_PTR:
6204 return "ICE_ERR_BAD_PTR";
6205 case ICE_ERR_INVAL_SIZE:
6206 return "ICE_ERR_INVAL_SIZE";
6207 case ICE_ERR_DEVICE_NOT_SUPPORTED:
6208 return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6209 case ICE_ERR_RESET_FAILED:
6210 return "ICE_ERR_RESET_FAILED";
6211 case ICE_ERR_FW_API_VER:
6212 return "ICE_ERR_FW_API_VER";
6213 case ICE_ERR_NO_MEMORY:
6214 return "ICE_ERR_NO_MEMORY";
6215 case ICE_ERR_CFG:
6216 return "ICE_ERR_CFG";
6217 case ICE_ERR_OUT_OF_RANGE:
6218 return "ICE_ERR_OUT_OF_RANGE";
6219 case ICE_ERR_ALREADY_EXISTS:
6220 return "ICE_ERR_ALREADY_EXISTS";
6221 case ICE_ERR_NVM_CHECKSUM:
6222 return "ICE_ERR_NVM_CHECKSUM";
6223 case ICE_ERR_BUF_TOO_SHORT:
6224 return "ICE_ERR_BUF_TOO_SHORT";
6225 case ICE_ERR_NVM_BLANK_MODE:
6226 return "ICE_ERR_NVM_BLANK_MODE";
6227 case ICE_ERR_IN_USE:
6228 return "ICE_ERR_IN_USE";
6229 case ICE_ERR_MAX_LIMIT:
6230 return "ICE_ERR_MAX_LIMIT";
6231 case ICE_ERR_RESET_ONGOING:
6232 return "ICE_ERR_RESET_ONGOING";
6233 case ICE_ERR_HW_TABLE:
6234 return "ICE_ERR_HW_TABLE";
6235 case ICE_ERR_DOES_NOT_EXIST:
6236 return "ICE_ERR_DOES_NOT_EXIST";
6237 case ICE_ERR_FW_DDP_MISMATCH:
6238 return "ICE_ERR_FW_DDP_MISMATCH";
6239 case ICE_ERR_AQ_ERROR:
6240 return "ICE_ERR_AQ_ERROR";
6241 case ICE_ERR_AQ_TIMEOUT:
6242 return "ICE_ERR_AQ_TIMEOUT";
6243 case ICE_ERR_AQ_FULL:
6244 return "ICE_ERR_AQ_FULL";
6245 case ICE_ERR_AQ_NO_WORK:
6246 return "ICE_ERR_AQ_NO_WORK";
6247 case ICE_ERR_AQ_EMPTY:
6248 return "ICE_ERR_AQ_EMPTY";
6249 case ICE_ERR_AQ_FW_CRITICAL:
6250 return "ICE_ERR_AQ_FW_CRITICAL";
6251 }
6252
6253 return "ICE_ERR_UNKNOWN";
6254}
6255
6256/**
6257 * ice_set_rss - Set RSS keys and lut
6258 * @vsi: Pointer to VSI structure
6259 * @seed: RSS hash seed
6260 * @lut: Lookup table
6261 * @lut_size: Lookup table size
6262 *
6263 * Returns 0 on success, negative on failure
6264 */
6265int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6266{
6267 struct ice_pf *pf = vsi->back;
6268 struct ice_hw *hw = &pf->hw;
6269 enum ice_status status;
6270 struct device *dev;
6271
6272 dev = ice_pf_to_dev(pf);
6273 if (seed) {
6274 struct ice_aqc_get_set_rss_keys *buf =
6275 (struct ice_aqc_get_set_rss_keys *)seed;
6276
6277 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
6278
6279 if (status) {
6280 dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
6281 ice_stat_str(status),
6282 ice_aq_str(hw->adminq.sq_last_status));
6283 return -EIO;
6284 }
6285 }
6286
6287 if (lut) {
6288 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6289 lut, lut_size);
6290 if (status) {
6291 dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
6292 ice_stat_str(status),
6293 ice_aq_str(hw->adminq.sq_last_status));
6294 return -EIO;
6295 }
6296 }
6297
6298 return 0;
6299}
6300
6301/**
6302 * ice_get_rss - Get RSS keys and lut
6303 * @vsi: Pointer to VSI structure
6304 * @seed: Buffer to store the keys
6305 * @lut: Buffer to store the lookup table entries
6306 * @lut_size: Size of buffer to store the lookup table entries
6307 *
6308 * Returns 0 on success, negative on failure
6309 */
6310int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6311{
6312 struct ice_pf *pf = vsi->back;
6313 struct ice_hw *hw = &pf->hw;
6314 enum ice_status status;
6315 struct device *dev;
6316
6317 dev = ice_pf_to_dev(pf);
6318 if (seed) {
6319 struct ice_aqc_get_set_rss_keys *buf =
6320 (struct ice_aqc_get_set_rss_keys *)seed;
6321
6322 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
6323 if (status) {
6324 dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
6325 ice_stat_str(status),
6326 ice_aq_str(hw->adminq.sq_last_status));
6327 return -EIO;
6328 }
6329 }
6330
6331 if (lut) {
6332 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6333 lut, lut_size);
6334 if (status) {
6335 dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
6336 ice_stat_str(status),
6337 ice_aq_str(hw->adminq.sq_last_status));
6338 return -EIO;
6339 }
6340 }
6341
6342 return 0;
6343}
6344
6345/**
6346 * ice_bridge_getlink - Get the hardware bridge mode
6347 * @skb: skb buff
6348 * @pid: process ID
6349 * @seq: RTNL message seq
6350 * @dev: the netdev being configured
6351 * @filter_mask: filter mask passed in
6352 * @nlflags: netlink flags passed in
6353 *
6354 * Return the bridge mode (VEB/VEPA)
6355 */
6356static int
6357ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6358 struct net_device *dev, u32 filter_mask, int nlflags)
6359{
6360 struct ice_netdev_priv *np = netdev_priv(dev);
6361 struct ice_vsi *vsi = np->vsi;
6362 struct ice_pf *pf = vsi->back;
6363 u16 bmode;
6364
6365 bmode = pf->first_sw->bridge_mode;
6366
6367 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6368 filter_mask, NULL);
6369}
6370
6371/**
6372 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6373 * @vsi: Pointer to VSI structure
6374 * @bmode: Hardware bridge mode (VEB/VEPA)
6375 *
6376 * Returns 0 on success, negative on failure
6377 */
6378static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6379{
6380 struct ice_aqc_vsi_props *vsi_props;
6381 struct ice_hw *hw = &vsi->back->hw;
6382 struct ice_vsi_ctx *ctxt;
6383 enum ice_status status;
6384 int ret = 0;
6385
6386 vsi_props = &vsi->info;
6387
6388 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6389 if (!ctxt)
6390 return -ENOMEM;
6391
6392 ctxt->info = vsi->info;
6393
6394 if (bmode == BRIDGE_MODE_VEB)
6395 /* change from VEPA to VEB mode */
6396 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6397 else
6398 /* change from VEB to VEPA mode */
6399 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6400 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6401
6402 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6403 if (status) {
6404 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6405 bmode, ice_stat_str(status),
6406 ice_aq_str(hw->adminq.sq_last_status));
6407 ret = -EIO;
6408 goto out;
6409 }
6410 /* Update sw flags for book keeping */
6411 vsi_props->sw_flags = ctxt->info.sw_flags;
6412
6413out:
6414 kfree(ctxt);
6415 return ret;
6416}
6417
6418/**
6419 * ice_bridge_setlink - Set the hardware bridge mode
6420 * @dev: the netdev being configured
6421 * @nlh: RTNL message
6422 * @flags: bridge setlink flags
6423 * @extack: netlink extended ack
6424 *
6425 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6426 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6427 * not already set for all VSIs connected to this switch. And also update the
6428 * unicast switch filter rules for the corresponding switch of the netdev.
6429 */
6430static int
6431ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6432 u16 __always_unused flags,
6433 struct netlink_ext_ack __always_unused *extack)
6434{
6435 struct ice_netdev_priv *np = netdev_priv(dev);
6436 struct ice_pf *pf = np->vsi->back;
6437 struct nlattr *attr, *br_spec;
6438 struct ice_hw *hw = &pf->hw;
6439 enum ice_status status;
6440 struct ice_sw *pf_sw;
6441 int rem, v, err = 0;
6442
6443 pf_sw = pf->first_sw;
6444 /* find the attribute in the netlink message */
6445 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6446
6447 nla_for_each_nested(attr, br_spec, rem) {
6448 __u16 mode;
6449
6450 if (nla_type(attr) != IFLA_BRIDGE_MODE)
6451 continue;
6452 mode = nla_get_u16(attr);
6453 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6454 return -EINVAL;
6455 /* Continue if bridge mode is not being flipped */
6456 if (mode == pf_sw->bridge_mode)
6457 continue;
6458 /* Iterates through the PF VSI list and update the loopback
6459 * mode of the VSI
6460 */
6461 ice_for_each_vsi(pf, v) {
6462 if (!pf->vsi[v])
6463 continue;
6464 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6465 if (err)
6466 return err;
6467 }
6468
6469 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6470 /* Update the unicast switch filter rules for the corresponding
6471 * switch of the netdev
6472 */
6473 status = ice_update_sw_rule_bridge_mode(hw);
6474 if (status) {
6475 netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6476 mode, ice_stat_str(status),
6477 ice_aq_str(hw->adminq.sq_last_status));
6478 /* revert hw->evb_veb */
6479 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6480 return -EIO;
6481 }
6482
6483 pf_sw->bridge_mode = mode;
6484 }
6485
6486 return 0;
6487}
6488
6489/**
6490 * ice_tx_timeout - Respond to a Tx Hang
6491 * @netdev: network interface device structure
6492 * @txqueue: Tx queue
6493 */
6494static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6495{
6496 struct ice_netdev_priv *np = netdev_priv(netdev);
6497 struct ice_ring *tx_ring = NULL;
6498 struct ice_vsi *vsi = np->vsi;
6499 struct ice_pf *pf = vsi->back;
6500 u32 i;
6501
6502 pf->tx_timeout_count++;
6503
6504 /* Check if PFC is enabled for the TC to which the queue belongs
6505 * to. If yes then Tx timeout is not caused by a hung queue, no
6506 * need to reset and rebuild
6507 */
6508 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6509 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6510 txqueue);
6511 return;
6512 }
6513
6514 /* now that we have an index, find the tx_ring struct */
6515 for (i = 0; i < vsi->num_txq; i++)
6516 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6517 if (txqueue == vsi->tx_rings[i]->q_index) {
6518 tx_ring = vsi->tx_rings[i];
6519 break;
6520 }
6521
6522 /* Reset recovery level if enough time has elapsed after last timeout.
6523 * Also ensure no new reset action happens before next timeout period.
6524 */
6525 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6526 pf->tx_timeout_recovery_level = 1;
6527 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6528 netdev->watchdog_timeo)))
6529 return;
6530
6531 if (tx_ring) {
6532 struct ice_hw *hw = &pf->hw;
6533 u32 head, val = 0;
6534
6535 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6536 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6537 /* Read interrupt register */
6538 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6539
6540 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6541 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6542 head, tx_ring->next_to_use, val);
6543 }
6544
6545 pf->tx_timeout_last_recovery = jiffies;
6546 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6547 pf->tx_timeout_recovery_level, txqueue);
6548
6549 switch (pf->tx_timeout_recovery_level) {
6550 case 1:
6551 set_bit(__ICE_PFR_REQ, pf->state);
6552 break;
6553 case 2:
6554 set_bit(__ICE_CORER_REQ, pf->state);
6555 break;
6556 case 3:
6557 set_bit(__ICE_GLOBR_REQ, pf->state);
6558 break;
6559 default:
6560 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
6561 set_bit(__ICE_DOWN, pf->state);
6562 set_bit(__ICE_NEEDS_RESTART, vsi->state);
6563 set_bit(__ICE_SERVICE_DIS, pf->state);
6564 break;
6565 }
6566
6567 ice_service_task_schedule(pf);
6568 pf->tx_timeout_recovery_level++;
6569}
6570
6571/**
6572 * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
6573 * @netdev: This physical port's netdev
6574 * @ti: Tunnel endpoint information
6575 */
6576static void
6577ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
6578{
6579 struct ice_netdev_priv *np = netdev_priv(netdev);
6580 struct ice_vsi *vsi = np->vsi;
6581 struct ice_pf *pf = vsi->back;
6582 enum ice_tunnel_type tnl_type;
6583 u16 port = ntohs(ti->port);
6584 enum ice_status status;
6585
6586 switch (ti->type) {
6587 case UDP_TUNNEL_TYPE_VXLAN:
6588 tnl_type = TNL_VXLAN;
6589 break;
6590 case UDP_TUNNEL_TYPE_GENEVE:
6591 tnl_type = TNL_GENEVE;
6592 break;
6593 default:
6594 netdev_err(netdev, "Unknown tunnel type\n");
6595 return;
6596 }
6597
6598 status = ice_create_tunnel(&pf->hw, tnl_type, port);
6599 if (status == ICE_ERR_OUT_OF_RANGE)
6600 netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
6601 port);
6602 else if (status)
6603 netdev_err(netdev, "Error adding UDP tunnel - %s\n",
6604 ice_stat_str(status));
6605}
6606
6607/**
6608 * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
6609 * @netdev: This physical port's netdev
6610 * @ti: Tunnel endpoint information
6611 */
6612static void
6613ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
6614{
6615 struct ice_netdev_priv *np = netdev_priv(netdev);
6616 struct ice_vsi *vsi = np->vsi;
6617 struct ice_pf *pf = vsi->back;
6618 u16 port = ntohs(ti->port);
6619 enum ice_status status;
6620 bool retval;
6621
6622 retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
6623 if (!retval) {
6624 netdev_info(netdev, "port %d not found in UDP tunnels list\n",
6625 port);
6626 return;
6627 }
6628
6629 status = ice_destroy_tunnel(&pf->hw, port, false);
6630 if (status)
6631 netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
6632 port);
6633}
6634
6635/**
6636 * ice_open - Called when a network interface becomes active
6637 * @netdev: network interface device structure
6638 *
6639 * The open entry point is called when a network interface is made
6640 * active by the system (IFF_UP). At this point all resources needed
6641 * for transmit and receive operations are allocated, the interrupt
6642 * handler is registered with the OS, the netdev watchdog is enabled,
6643 * and the stack is notified that the interface is ready.
6644 *
6645 * Returns 0 on success, negative value on failure
6646 */
6647int ice_open(struct net_device *netdev)
6648{
6649 struct ice_netdev_priv *np = netdev_priv(netdev);
6650 struct ice_vsi *vsi = np->vsi;
6651 struct ice_pf *pf = vsi->back;
6652 struct ice_port_info *pi;
6653 int err;
6654
6655 if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
6656 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6657 return -EIO;
6658 }
6659
6660 if (test_bit(__ICE_DOWN, pf->state)) {
6661 netdev_err(netdev, "device is not ready yet\n");
6662 return -EBUSY;
6663 }
6664
6665 netif_carrier_off(netdev);
6666
6667 pi = vsi->port_info;
6668 err = ice_update_link_info(pi);
6669 if (err) {
6670 netdev_err(netdev, "Failed to get link info, error %d\n",
6671 err);
6672 return err;
6673 }
6674
6675 /* Set PHY if there is media, otherwise, turn off PHY */
6676 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
6677 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6678 if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
6679 err = ice_init_phy_user_cfg(pi);
6680 if (err) {
6681 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
6682 err);
6683 return err;
6684 }
6685 }
6686
6687 err = ice_configure_phy(vsi);
6688 if (err) {
6689 netdev_err(netdev, "Failed to set physical link up, error %d\n",
6690 err);
6691 return err;
6692 }
6693 } else {
6694 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6695 err = ice_aq_set_link_restart_an(pi, false, NULL);
6696 if (err) {
6697 netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
6698 vsi->vsi_num, err);
6699 return err;
6700 }
6701 }
6702
6703 err = ice_vsi_open(vsi);
6704 if (err)
6705 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6706 vsi->vsi_num, vsi->vsw->sw_id);
6707
6708 /* Update existing tunnels information */
6709 udp_tunnel_get_rx_info(netdev);
6710
6711 return err;
6712}
6713
6714/**
6715 * ice_stop - Disables a network interface
6716 * @netdev: network interface device structure
6717 *
6718 * The stop entry point is called when an interface is de-activated by the OS,
6719 * and the netdevice enters the DOWN state. The hardware is still under the
6720 * driver's control, but the netdev interface is disabled.
6721 *
6722 * Returns success only - not allowed to fail
6723 */
6724int ice_stop(struct net_device *netdev)
6725{
6726 struct ice_netdev_priv *np = netdev_priv(netdev);
6727 struct ice_vsi *vsi = np->vsi;
6728
6729 ice_vsi_close(vsi);
6730
6731 return 0;
6732}
6733
6734/**
6735 * ice_features_check - Validate encapsulated packet conforms to limits
6736 * @skb: skb buffer
6737 * @netdev: This port's netdev
6738 * @features: Offload features that the stack believes apply
6739 */
6740static netdev_features_t
6741ice_features_check(struct sk_buff *skb,
6742 struct net_device __always_unused *netdev,
6743 netdev_features_t features)
6744{
6745 size_t len;
6746
6747 /* No point in doing any of this if neither checksum nor GSO are
6748 * being requested for this frame. We can rule out both by just
6749 * checking for CHECKSUM_PARTIAL
6750 */
6751 if (skb->ip_summed != CHECKSUM_PARTIAL)
6752 return features;
6753
6754 /* We cannot support GSO if the MSS is going to be less than
6755 * 64 bytes. If it is then we need to drop support for GSO.
6756 */
6757 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
6758 features &= ~NETIF_F_GSO_MASK;
6759
6760 len = skb_network_header(skb) - skb->data;
6761 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
6762 goto out_rm_features;
6763
6764 len = skb_transport_header(skb) - skb_network_header(skb);
6765 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6766 goto out_rm_features;
6767
6768 if (skb->encapsulation) {
6769 len = skb_inner_network_header(skb) - skb_transport_header(skb);
6770 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
6771 goto out_rm_features;
6772
6773 len = skb_inner_transport_header(skb) -
6774 skb_inner_network_header(skb);
6775 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6776 goto out_rm_features;
6777 }
6778
6779 return features;
6780out_rm_features:
6781 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6782}
6783
6784static const struct net_device_ops ice_netdev_safe_mode_ops = {
6785 .ndo_open = ice_open,
6786 .ndo_stop = ice_stop,
6787 .ndo_start_xmit = ice_start_xmit,
6788 .ndo_set_mac_address = ice_set_mac_address,
6789 .ndo_validate_addr = eth_validate_addr,
6790 .ndo_change_mtu = ice_change_mtu,
6791 .ndo_get_stats64 = ice_get_stats64,
6792 .ndo_tx_timeout = ice_tx_timeout,
6793};
6794
6795static const struct net_device_ops ice_netdev_ops = {
6796 .ndo_open = ice_open,
6797 .ndo_stop = ice_stop,
6798 .ndo_start_xmit = ice_start_xmit,
6799 .ndo_features_check = ice_features_check,
6800 .ndo_set_rx_mode = ice_set_rx_mode,
6801 .ndo_set_mac_address = ice_set_mac_address,
6802 .ndo_validate_addr = eth_validate_addr,
6803 .ndo_change_mtu = ice_change_mtu,
6804 .ndo_get_stats64 = ice_get_stats64,
6805 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
6806 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
6807 .ndo_set_vf_mac = ice_set_vf_mac,
6808 .ndo_get_vf_config = ice_get_vf_cfg,
6809 .ndo_set_vf_trust = ice_set_vf_trust,
6810 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
6811 .ndo_set_vf_link_state = ice_set_vf_link_state,
6812 .ndo_get_vf_stats = ice_get_vf_stats,
6813 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6814 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6815 .ndo_set_features = ice_set_features,
6816 .ndo_bridge_getlink = ice_bridge_getlink,
6817 .ndo_bridge_setlink = ice_bridge_setlink,
6818 .ndo_fdb_add = ice_fdb_add,
6819 .ndo_fdb_del = ice_fdb_del,
6820#ifdef CONFIG_RFS_ACCEL
6821 .ndo_rx_flow_steer = ice_rx_flow_steer,
6822#endif
6823 .ndo_tx_timeout = ice_tx_timeout,
6824 .ndo_bpf = ice_xdp,
6825 .ndo_xdp_xmit = ice_xdp_xmit,
6826 .ndo_xsk_wakeup = ice_xsk_wakeup,
6827 .ndo_udp_tunnel_add = ice_udp_tunnel_add,
6828 .ndo_udp_tunnel_del = ice_udp_tunnel_del,
6829};