Loading...
Note: File does not exist in v3.5.6.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7
8#define ICE_PF_RESET_WAIT_COUNT 200
9
10#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
18#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
29/**
30 * ice_set_mac_type - Sets MAC type
31 * @hw: pointer to the HW structure
32 *
33 * This function sets the MAC type of the adapter based on the
34 * vendor ID and device ID stored in the hw structure.
35 */
36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37{
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43}
44
45/**
46 * ice_clear_pf_cfg - Clear PF configuration
47 * @hw: pointer to the hardware structure
48 */
49enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
50{
51 struct ice_aq_desc desc;
52
53 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
54
55 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
56}
57
58/**
59 * ice_aq_manage_mac_read - manage MAC address read command
60 * @hw: pointer to the hw struct
61 * @buf: a virtual buffer to hold the manage MAC read response
62 * @buf_size: Size of the virtual buffer
63 * @cd: pointer to command details structure or NULL
64 *
65 * This function is used to return per PF station MAC address (0x0107).
66 * NOTE: Upon successful completion of this command, MAC address information
67 * is returned in user specified buffer. Please interpret user specified
68 * buffer as "manage_mac_read" response.
69 * Response such as various MAC addresses are stored in HW struct (port.mac)
70 * ice_aq_discover_caps is expected to be called before this function is called.
71 */
72static enum ice_status
73ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
74 struct ice_sq_cd *cd)
75{
76 struct ice_aqc_manage_mac_read_resp *resp;
77 struct ice_aqc_manage_mac_read *cmd;
78 struct ice_aq_desc desc;
79 enum ice_status status;
80 u16 flags;
81 u8 i;
82
83 cmd = &desc.params.mac_read;
84
85 if (buf_size < sizeof(*resp))
86 return ICE_ERR_BUF_TOO_SHORT;
87
88 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
89
90 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
91 if (status)
92 return status;
93
94 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
95 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
96
97 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
98 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
99 return ICE_ERR_CFG;
100 }
101
102 /* A single port can report up to two (LAN and WoL) addresses */
103 for (i = 0; i < cmd->num_addr; i++)
104 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
105 ether_addr_copy(hw->port_info->mac.lan_addr,
106 resp[i].mac_addr);
107 ether_addr_copy(hw->port_info->mac.perm_addr,
108 resp[i].mac_addr);
109 break;
110 }
111
112 return 0;
113}
114
115/**
116 * ice_aq_get_phy_caps - returns PHY capabilities
117 * @pi: port information structure
118 * @qual_mods: report qualified modules
119 * @report_mode: report mode capabilities
120 * @pcaps: structure for PHY capabilities to be filled
121 * @cd: pointer to command details structure or NULL
122 *
123 * Returns the various PHY capabilities supported on the Port (0x0600)
124 */
125static enum ice_status
126ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
127 struct ice_aqc_get_phy_caps_data *pcaps,
128 struct ice_sq_cd *cd)
129{
130 struct ice_aqc_get_phy_caps *cmd;
131 u16 pcaps_size = sizeof(*pcaps);
132 struct ice_aq_desc desc;
133 enum ice_status status;
134
135 cmd = &desc.params.get_phy;
136
137 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
138 return ICE_ERR_PARAM;
139
140 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
141
142 if (qual_mods)
143 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
144
145 cmd->param0 |= cpu_to_le16(report_mode);
146 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
147
148 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
149 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
150
151 return status;
152}
153
154/**
155 * ice_get_media_type - Gets media type
156 * @pi: port information structure
157 */
158static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
159{
160 struct ice_link_status *hw_link_info;
161
162 if (!pi)
163 return ICE_MEDIA_UNKNOWN;
164
165 hw_link_info = &pi->phy.link_info;
166
167 if (hw_link_info->phy_type_low) {
168 switch (hw_link_info->phy_type_low) {
169 case ICE_PHY_TYPE_LOW_1000BASE_SX:
170 case ICE_PHY_TYPE_LOW_1000BASE_LX:
171 case ICE_PHY_TYPE_LOW_10GBASE_SR:
172 case ICE_PHY_TYPE_LOW_10GBASE_LR:
173 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
174 case ICE_PHY_TYPE_LOW_25GBASE_SR:
175 case ICE_PHY_TYPE_LOW_25GBASE_LR:
176 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
177 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
178 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
179 return ICE_MEDIA_FIBER;
180 case ICE_PHY_TYPE_LOW_100BASE_TX:
181 case ICE_PHY_TYPE_LOW_1000BASE_T:
182 case ICE_PHY_TYPE_LOW_2500BASE_T:
183 case ICE_PHY_TYPE_LOW_5GBASE_T:
184 case ICE_PHY_TYPE_LOW_10GBASE_T:
185 case ICE_PHY_TYPE_LOW_25GBASE_T:
186 return ICE_MEDIA_BASET;
187 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
188 case ICE_PHY_TYPE_LOW_25GBASE_CR:
189 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
190 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
191 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
192 return ICE_MEDIA_DA;
193 case ICE_PHY_TYPE_LOW_1000BASE_KX:
194 case ICE_PHY_TYPE_LOW_2500BASE_KX:
195 case ICE_PHY_TYPE_LOW_2500BASE_X:
196 case ICE_PHY_TYPE_LOW_5GBASE_KR:
197 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
198 case ICE_PHY_TYPE_LOW_25GBASE_KR:
199 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
200 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
201 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
202 return ICE_MEDIA_BACKPLANE;
203 }
204 }
205
206 return ICE_MEDIA_UNKNOWN;
207}
208
209/**
210 * ice_aq_get_link_info
211 * @pi: port information structure
212 * @ena_lse: enable/disable LinkStatusEvent reporting
213 * @link: pointer to link status structure - optional
214 * @cd: pointer to command details structure or NULL
215 *
216 * Get Link Status (0x607). Returns the link status of the adapter.
217 */
218enum ice_status
219ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
220 struct ice_link_status *link, struct ice_sq_cd *cd)
221{
222 struct ice_link_status *hw_link_info_old, *hw_link_info;
223 struct ice_aqc_get_link_status_data link_data = { 0 };
224 struct ice_aqc_get_link_status *resp;
225 enum ice_media_type *hw_media_type;
226 struct ice_fc_info *hw_fc_info;
227 bool tx_pause, rx_pause;
228 struct ice_aq_desc desc;
229 enum ice_status status;
230 u16 cmd_flags;
231
232 if (!pi)
233 return ICE_ERR_PARAM;
234 hw_link_info_old = &pi->phy.link_info_old;
235 hw_media_type = &pi->phy.media_type;
236 hw_link_info = &pi->phy.link_info;
237 hw_fc_info = &pi->fc;
238
239 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
240 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
241 resp = &desc.params.get_link_status;
242 resp->cmd_flags = cpu_to_le16(cmd_flags);
243 resp->lport_num = pi->lport;
244
245 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
246 cd);
247
248 if (status)
249 return status;
250
251 /* save off old link status information */
252 *hw_link_info_old = *hw_link_info;
253
254 /* update current link status information */
255 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
256 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
257 *hw_media_type = ice_get_media_type(pi);
258 hw_link_info->link_info = link_data.link_info;
259 hw_link_info->an_info = link_data.an_info;
260 hw_link_info->ext_info = link_data.ext_info;
261 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
262 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
263
264 /* update fc info */
265 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
266 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
267 if (tx_pause && rx_pause)
268 hw_fc_info->current_mode = ICE_FC_FULL;
269 else if (tx_pause)
270 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
271 else if (rx_pause)
272 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
273 else
274 hw_fc_info->current_mode = ICE_FC_NONE;
275
276 hw_link_info->lse_ena =
277 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
278
279 /* save link status information */
280 if (link)
281 *link = *hw_link_info;
282
283 /* flag cleared so calling functions don't call AQ again */
284 pi->phy.get_link_info = false;
285
286 return status;
287}
288
289/**
290 * ice_init_flex_parser - initialize rx flex parser
291 * @hw: pointer to the hardware structure
292 *
293 * Function to initialize flex descriptors
294 */
295static void ice_init_flex_parser(struct ice_hw *hw)
296{
297 u8 idx = 0;
298
299 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
300 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
301 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
302 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
303 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
304 ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
305 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
306 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
307 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
308 ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
309 idx++);
310 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
311 ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
312 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
313 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
314}
315
316/**
317 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
318 * @hw: pointer to the hw struct
319 */
320static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
321{
322 struct ice_switch_info *sw;
323
324 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
325 sizeof(*hw->switch_info), GFP_KERNEL);
326 sw = hw->switch_info;
327
328 if (!sw)
329 return ICE_ERR_NO_MEMORY;
330
331 INIT_LIST_HEAD(&sw->vsi_list_map_head);
332
333 mutex_init(&sw->mac_list_lock);
334 INIT_LIST_HEAD(&sw->mac_list_head);
335
336 mutex_init(&sw->vlan_list_lock);
337 INIT_LIST_HEAD(&sw->vlan_list_head);
338
339 mutex_init(&sw->eth_m_list_lock);
340 INIT_LIST_HEAD(&sw->eth_m_list_head);
341
342 mutex_init(&sw->promisc_list_lock);
343 INIT_LIST_HEAD(&sw->promisc_list_head);
344
345 mutex_init(&sw->mac_vlan_list_lock);
346 INIT_LIST_HEAD(&sw->mac_vlan_list_head);
347
348 return 0;
349}
350
351/**
352 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
353 * @hw: pointer to the hw struct
354 */
355static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
356{
357 struct ice_switch_info *sw = hw->switch_info;
358 struct ice_vsi_list_map_info *v_pos_map;
359 struct ice_vsi_list_map_info *v_tmp_map;
360
361 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
362 list_entry) {
363 list_del(&v_pos_map->list_entry);
364 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
365 }
366
367 mutex_destroy(&sw->mac_list_lock);
368 mutex_destroy(&sw->vlan_list_lock);
369 mutex_destroy(&sw->eth_m_list_lock);
370 mutex_destroy(&sw->promisc_list_lock);
371 mutex_destroy(&sw->mac_vlan_list_lock);
372
373 devm_kfree(ice_hw_to_dev(hw), sw);
374}
375
376/**
377 * ice_init_hw - main hardware initialization routine
378 * @hw: pointer to the hardware structure
379 */
380enum ice_status ice_init_hw(struct ice_hw *hw)
381{
382 struct ice_aqc_get_phy_caps_data *pcaps;
383 enum ice_status status;
384 u16 mac_buf_len;
385 void *mac_buf;
386
387 /* Set MAC type based on DeviceID */
388 status = ice_set_mac_type(hw);
389 if (status)
390 return status;
391
392 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
393 PF_FUNC_RID_FUNC_NUM_M) >>
394 PF_FUNC_RID_FUNC_NUM_S;
395
396 status = ice_reset(hw, ICE_RESET_PFR);
397 if (status)
398 return status;
399
400 /* set these values to minimum allowed */
401 hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
402 hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
403 hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
404 hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
405
406 status = ice_init_all_ctrlq(hw);
407 if (status)
408 goto err_unroll_cqinit;
409
410 status = ice_clear_pf_cfg(hw);
411 if (status)
412 goto err_unroll_cqinit;
413
414 ice_clear_pxe_mode(hw);
415
416 status = ice_init_nvm(hw);
417 if (status)
418 goto err_unroll_cqinit;
419
420 status = ice_get_caps(hw);
421 if (status)
422 goto err_unroll_cqinit;
423
424 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
425 sizeof(*hw->port_info), GFP_KERNEL);
426 if (!hw->port_info) {
427 status = ICE_ERR_NO_MEMORY;
428 goto err_unroll_cqinit;
429 }
430
431 /* set the back pointer to hw */
432 hw->port_info->hw = hw;
433
434 /* Initialize port_info struct with switch configuration data */
435 status = ice_get_initial_sw_cfg(hw);
436 if (status)
437 goto err_unroll_alloc;
438
439 hw->evb_veb = true;
440
441 /* Query the allocated resources for tx scheduler */
442 status = ice_sched_query_res_alloc(hw);
443 if (status) {
444 ice_debug(hw, ICE_DBG_SCHED,
445 "Failed to get scheduler allocated resources\n");
446 goto err_unroll_alloc;
447 }
448
449 /* Initialize port_info struct with scheduler data */
450 status = ice_sched_init_port(hw->port_info);
451 if (status)
452 goto err_unroll_sched;
453
454 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
455 if (!pcaps) {
456 status = ICE_ERR_NO_MEMORY;
457 goto err_unroll_sched;
458 }
459
460 /* Initialize port_info struct with PHY capabilities */
461 status = ice_aq_get_phy_caps(hw->port_info, false,
462 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
463 devm_kfree(ice_hw_to_dev(hw), pcaps);
464 if (status)
465 goto err_unroll_sched;
466
467 /* Initialize port_info struct with link information */
468 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
469 if (status)
470 goto err_unroll_sched;
471
472 status = ice_init_fltr_mgmt_struct(hw);
473 if (status)
474 goto err_unroll_sched;
475
476 /* Get MAC information */
477 /* A single port can report up to two (LAN and WoL) addresses */
478 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
479 sizeof(struct ice_aqc_manage_mac_read_resp),
480 GFP_KERNEL);
481 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
482
483 if (!mac_buf) {
484 status = ICE_ERR_NO_MEMORY;
485 goto err_unroll_fltr_mgmt_struct;
486 }
487
488 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
489 devm_kfree(ice_hw_to_dev(hw), mac_buf);
490
491 if (status)
492 goto err_unroll_fltr_mgmt_struct;
493
494 ice_init_flex_parser(hw);
495
496 return 0;
497
498err_unroll_fltr_mgmt_struct:
499 ice_cleanup_fltr_mgmt_struct(hw);
500err_unroll_sched:
501 ice_sched_cleanup_all(hw);
502err_unroll_alloc:
503 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
504err_unroll_cqinit:
505 ice_shutdown_all_ctrlq(hw);
506 return status;
507}
508
509/**
510 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
511 * @hw: pointer to the hardware structure
512 */
513void ice_deinit_hw(struct ice_hw *hw)
514{
515 ice_sched_cleanup_all(hw);
516 ice_shutdown_all_ctrlq(hw);
517
518 if (hw->port_info) {
519 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
520 hw->port_info = NULL;
521 }
522
523 ice_cleanup_fltr_mgmt_struct(hw);
524}
525
526/**
527 * ice_check_reset - Check to see if a global reset is complete
528 * @hw: pointer to the hardware structure
529 */
530enum ice_status ice_check_reset(struct ice_hw *hw)
531{
532 u32 cnt, reg = 0, grst_delay;
533
534 /* Poll for Device Active state in case a recent CORER, GLOBR,
535 * or EMPR has occurred. The grst delay value is in 100ms units.
536 * Add 1sec for outstanding AQ commands that can take a long time.
537 */
538 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
539 GLGEN_RSTCTL_GRSTDEL_S) + 10;
540
541 for (cnt = 0; cnt < grst_delay; cnt++) {
542 mdelay(100);
543 reg = rd32(hw, GLGEN_RSTAT);
544 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
545 break;
546 }
547
548 if (cnt == grst_delay) {
549 ice_debug(hw, ICE_DBG_INIT,
550 "Global reset polling failed to complete.\n");
551 return ICE_ERR_RESET_FAILED;
552 }
553
554#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
555 GLNVM_ULD_GLOBR_DONE_M)
556
557 /* Device is Active; check Global Reset processes are done */
558 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
559 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
560 if (reg == ICE_RESET_DONE_MASK) {
561 ice_debug(hw, ICE_DBG_INIT,
562 "Global reset processes done. %d\n", cnt);
563 break;
564 }
565 mdelay(10);
566 }
567
568 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
569 ice_debug(hw, ICE_DBG_INIT,
570 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
571 reg);
572 return ICE_ERR_RESET_FAILED;
573 }
574
575 return 0;
576}
577
578/**
579 * ice_pf_reset - Reset the PF
580 * @hw: pointer to the hardware structure
581 *
582 * If a global reset has been triggered, this function checks
583 * for its completion and then issues the PF reset
584 */
585static enum ice_status ice_pf_reset(struct ice_hw *hw)
586{
587 u32 cnt, reg;
588
589 /* If at function entry a global reset was already in progress, i.e.
590 * state is not 'device active' or any of the reset done bits are not
591 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
592 * global reset is done.
593 */
594 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
595 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
596 /* poll on global reset currently in progress until done */
597 if (ice_check_reset(hw))
598 return ICE_ERR_RESET_FAILED;
599
600 return 0;
601 }
602
603 /* Reset the PF */
604 reg = rd32(hw, PFGEN_CTRL);
605
606 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
607
608 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
609 reg = rd32(hw, PFGEN_CTRL);
610 if (!(reg & PFGEN_CTRL_PFSWR_M))
611 break;
612
613 mdelay(1);
614 }
615
616 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
617 ice_debug(hw, ICE_DBG_INIT,
618 "PF reset polling failed to complete.\n");
619 return ICE_ERR_RESET_FAILED;
620 }
621
622 return 0;
623}
624
625/**
626 * ice_reset - Perform different types of reset
627 * @hw: pointer to the hardware structure
628 * @req: reset request
629 *
630 * This function triggers a reset as specified by the req parameter.
631 *
632 * Note:
633 * If anything other than a PF reset is triggered, PXE mode is restored.
634 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
635 * interface has been restored in the rebuild flow.
636 */
637enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
638{
639 u32 val = 0;
640
641 switch (req) {
642 case ICE_RESET_PFR:
643 return ice_pf_reset(hw);
644 case ICE_RESET_CORER:
645 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
646 val = GLGEN_RTRIG_CORER_M;
647 break;
648 case ICE_RESET_GLOBR:
649 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
650 val = GLGEN_RTRIG_GLOBR_M;
651 break;
652 }
653
654 val |= rd32(hw, GLGEN_RTRIG);
655 wr32(hw, GLGEN_RTRIG, val);
656 ice_flush(hw);
657
658 /* wait for the FW to be ready */
659 return ice_check_reset(hw);
660}
661
662/**
663 * ice_copy_rxq_ctx_to_hw
664 * @hw: pointer to the hardware structure
665 * @ice_rxq_ctx: pointer to the rxq context
666 * @rxq_index: the index of the rx queue
667 *
668 * Copies rxq context from dense structure to hw register space
669 */
670static enum ice_status
671ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
672{
673 u8 i;
674
675 if (!ice_rxq_ctx)
676 return ICE_ERR_BAD_PTR;
677
678 if (rxq_index > QRX_CTRL_MAX_INDEX)
679 return ICE_ERR_PARAM;
680
681 /* Copy each dword separately to hw */
682 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
683 wr32(hw, QRX_CONTEXT(i, rxq_index),
684 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
685
686 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
687 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
688 }
689
690 return 0;
691}
692
693/* LAN Rx Queue Context */
694static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
695 /* Field Width LSB */
696 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
697 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
698 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
699 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
700 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
701 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
702 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
703 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
704 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
705 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
706 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
707 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
708 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
709 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
710 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
711 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
712 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
713 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
714 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
715 { 0 }
716};
717
718/**
719 * ice_write_rxq_ctx
720 * @hw: pointer to the hardware structure
721 * @rlan_ctx: pointer to the rxq context
722 * @rxq_index: the index of the rx queue
723 *
724 * Converts rxq context from sparse to dense structure and then writes
725 * it to hw register space
726 */
727enum ice_status
728ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
729 u32 rxq_index)
730{
731 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
732
733 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
734 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
735}
736
737/* LAN Tx Queue Context */
738const struct ice_ctx_ele ice_tlan_ctx_info[] = {
739 /* Field Width LSB */
740 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
741 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
742 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
743 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
744 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
745 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
746 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
747 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
748 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
749 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
750 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
751 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
752 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
753 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
754 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
755 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
756 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
757 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
758 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
759 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
760 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
761 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
762 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
763 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
764 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
765 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
766 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
767 { 0 }
768};
769
770/**
771 * ice_debug_cq
772 * @hw: pointer to the hardware structure
773 * @mask: debug mask
774 * @desc: pointer to control queue descriptor
775 * @buf: pointer to command buffer
776 * @buf_len: max length of buf
777 *
778 * Dumps debug log about control command with descriptor contents.
779 */
780void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
781 void *buf, u16 buf_len)
782{
783 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
784 u16 len;
785
786#ifndef CONFIG_DYNAMIC_DEBUG
787 if (!(mask & hw->debug_mask))
788 return;
789#endif
790
791 if (!desc)
792 return;
793
794 len = le16_to_cpu(cq_desc->datalen);
795
796 ice_debug(hw, mask,
797 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
798 le16_to_cpu(cq_desc->opcode),
799 le16_to_cpu(cq_desc->flags),
800 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
801 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
802 le32_to_cpu(cq_desc->cookie_high),
803 le32_to_cpu(cq_desc->cookie_low));
804 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
805 le32_to_cpu(cq_desc->params.generic.param0),
806 le32_to_cpu(cq_desc->params.generic.param1));
807 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
808 le32_to_cpu(cq_desc->params.generic.addr_high),
809 le32_to_cpu(cq_desc->params.generic.addr_low));
810 if (buf && cq_desc->datalen != 0) {
811 ice_debug(hw, mask, "Buffer:\n");
812 if (buf_len < len)
813 len = buf_len;
814
815 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
816 }
817}
818
819/* FW Admin Queue command wrappers */
820
821/**
822 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
823 * @hw: pointer to the hw struct
824 * @desc: descriptor describing the command
825 * @buf: buffer to use for indirect commands (NULL for direct commands)
826 * @buf_size: size of buffer for indirect commands (0 for direct commands)
827 * @cd: pointer to command details structure
828 *
829 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
830 */
831enum ice_status
832ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
833 u16 buf_size, struct ice_sq_cd *cd)
834{
835 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
836}
837
838/**
839 * ice_aq_get_fw_ver
840 * @hw: pointer to the hw struct
841 * @cd: pointer to command details structure or NULL
842 *
843 * Get the firmware version (0x0001) from the admin queue commands
844 */
845enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
846{
847 struct ice_aqc_get_ver *resp;
848 struct ice_aq_desc desc;
849 enum ice_status status;
850
851 resp = &desc.params.get_ver;
852
853 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
854
855 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
856
857 if (!status) {
858 hw->fw_branch = resp->fw_branch;
859 hw->fw_maj_ver = resp->fw_major;
860 hw->fw_min_ver = resp->fw_minor;
861 hw->fw_patch = resp->fw_patch;
862 hw->fw_build = le32_to_cpu(resp->fw_build);
863 hw->api_branch = resp->api_branch;
864 hw->api_maj_ver = resp->api_major;
865 hw->api_min_ver = resp->api_minor;
866 hw->api_patch = resp->api_patch;
867 }
868
869 return status;
870}
871
872/**
873 * ice_aq_q_shutdown
874 * @hw: pointer to the hw struct
875 * @unloading: is the driver unloading itself
876 *
877 * Tell the Firmware that we're shutting down the AdminQ and whether
878 * or not the driver is unloading as well (0x0003).
879 */
880enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
881{
882 struct ice_aqc_q_shutdown *cmd;
883 struct ice_aq_desc desc;
884
885 cmd = &desc.params.q_shutdown;
886
887 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
888
889 if (unloading)
890 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
891
892 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
893}
894
895/**
896 * ice_aq_req_res
897 * @hw: pointer to the hw struct
898 * @res: resource id
899 * @access: access type
900 * @sdp_number: resource number
901 * @timeout: the maximum time in ms that the driver may hold the resource
902 * @cd: pointer to command details structure or NULL
903 *
904 * requests common resource using the admin queue commands (0x0008)
905 */
906static enum ice_status
907ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
908 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
909 struct ice_sq_cd *cd)
910{
911 struct ice_aqc_req_res *cmd_resp;
912 struct ice_aq_desc desc;
913 enum ice_status status;
914
915 cmd_resp = &desc.params.res_owner;
916
917 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
918
919 cmd_resp->res_id = cpu_to_le16(res);
920 cmd_resp->access_type = cpu_to_le16(access);
921 cmd_resp->res_number = cpu_to_le32(sdp_number);
922
923 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
924 /* The completion specifies the maximum time in ms that the driver
925 * may hold the resource in the Timeout field.
926 * If the resource is held by someone else, the command completes with
927 * busy return value and the timeout field indicates the maximum time
928 * the current owner of the resource has to free it.
929 */
930 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
931 *timeout = le32_to_cpu(cmd_resp->timeout);
932
933 return status;
934}
935
936/**
937 * ice_aq_release_res
938 * @hw: pointer to the hw struct
939 * @res: resource id
940 * @sdp_number: resource number
941 * @cd: pointer to command details structure or NULL
942 *
943 * release common resource using the admin queue commands (0x0009)
944 */
945static enum ice_status
946ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
947 struct ice_sq_cd *cd)
948{
949 struct ice_aqc_req_res *cmd;
950 struct ice_aq_desc desc;
951
952 cmd = &desc.params.res_owner;
953
954 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
955
956 cmd->res_id = cpu_to_le16(res);
957 cmd->res_number = cpu_to_le32(sdp_number);
958
959 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
960}
961
962/**
963 * ice_acquire_res
964 * @hw: pointer to the HW structure
965 * @res: resource id
966 * @access: access type (read or write)
967 *
968 * This function will attempt to acquire the ownership of a resource.
969 */
970enum ice_status
971ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
972 enum ice_aq_res_access_type access)
973{
974#define ICE_RES_POLLING_DELAY_MS 10
975 u32 delay = ICE_RES_POLLING_DELAY_MS;
976 enum ice_status status;
977 u32 time_left = 0;
978 u32 timeout;
979
980 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
981
982 /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
983 * driver has previously acquired the resource and performed any
984 * necessary updates; in this case the caller does not obtain the
985 * resource and has no further work to do.
986 */
987 if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
988 status = ICE_ERR_AQ_NO_WORK;
989 goto ice_acquire_res_exit;
990 }
991
992 if (status)
993 ice_debug(hw, ICE_DBG_RES,
994 "resource %d acquire type %d failed.\n", res, access);
995
996 /* If necessary, poll until the current lock owner timeouts */
997 timeout = time_left;
998 while (status && timeout && time_left) {
999 mdelay(delay);
1000 timeout = (timeout > delay) ? timeout - delay : 0;
1001 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1002
1003 if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
1004 /* lock free, but no work to do */
1005 status = ICE_ERR_AQ_NO_WORK;
1006 break;
1007 }
1008
1009 if (!status)
1010 /* lock acquired */
1011 break;
1012 }
1013 if (status && status != ICE_ERR_AQ_NO_WORK)
1014 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1015
1016ice_acquire_res_exit:
1017 if (status == ICE_ERR_AQ_NO_WORK) {
1018 if (access == ICE_RES_WRITE)
1019 ice_debug(hw, ICE_DBG_RES,
1020 "resource indicates no work to do.\n");
1021 else
1022 ice_debug(hw, ICE_DBG_RES,
1023 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1024 }
1025 return status;
1026}
1027
1028/**
1029 * ice_release_res
1030 * @hw: pointer to the HW structure
1031 * @res: resource id
1032 *
1033 * This function will release a resource using the proper Admin Command.
1034 */
1035void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1036{
1037 enum ice_status status;
1038 u32 total_delay = 0;
1039
1040 status = ice_aq_release_res(hw, res, 0, NULL);
1041
1042 /* there are some rare cases when trying to release the resource
1043 * results in an admin Q timeout, so handle them correctly
1044 */
1045 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1046 (total_delay < hw->adminq.sq_cmd_timeout)) {
1047 mdelay(1);
1048 status = ice_aq_release_res(hw, res, 0, NULL);
1049 total_delay++;
1050 }
1051}
1052
1053/**
1054 * ice_parse_caps - parse function/device capabilities
1055 * @hw: pointer to the hw struct
1056 * @buf: pointer to a buffer containing function/device capability records
1057 * @cap_count: number of capability records in the list
1058 * @opc: type of capabilities list to parse
1059 *
1060 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1061 */
1062static void
1063ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1064 enum ice_adminq_opc opc)
1065{
1066 struct ice_aqc_list_caps_elem *cap_resp;
1067 struct ice_hw_func_caps *func_p = NULL;
1068 struct ice_hw_dev_caps *dev_p = NULL;
1069 struct ice_hw_common_caps *caps;
1070 u32 i;
1071
1072 if (!buf)
1073 return;
1074
1075 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1076
1077 if (opc == ice_aqc_opc_list_dev_caps) {
1078 dev_p = &hw->dev_caps;
1079 caps = &dev_p->common_cap;
1080 } else if (opc == ice_aqc_opc_list_func_caps) {
1081 func_p = &hw->func_caps;
1082 caps = &func_p->common_cap;
1083 } else {
1084 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1085 return;
1086 }
1087
1088 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1089 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1090 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1091 u32 number = le32_to_cpu(cap_resp->number);
1092 u16 cap = le16_to_cpu(cap_resp->cap);
1093
1094 switch (cap) {
1095 case ICE_AQC_CAPS_VSI:
1096 if (dev_p) {
1097 dev_p->num_vsi_allocd_to_host = number;
1098 ice_debug(hw, ICE_DBG_INIT,
1099 "HW caps: Dev.VSI cnt = %d\n",
1100 dev_p->num_vsi_allocd_to_host);
1101 } else if (func_p) {
1102 func_p->guaranteed_num_vsi = number;
1103 ice_debug(hw, ICE_DBG_INIT,
1104 "HW caps: Func.VSI cnt = %d\n",
1105 func_p->guaranteed_num_vsi);
1106 }
1107 break;
1108 case ICE_AQC_CAPS_RSS:
1109 caps->rss_table_size = number;
1110 caps->rss_table_entry_width = logical_id;
1111 ice_debug(hw, ICE_DBG_INIT,
1112 "HW caps: RSS table size = %d\n",
1113 caps->rss_table_size);
1114 ice_debug(hw, ICE_DBG_INIT,
1115 "HW caps: RSS table width = %d\n",
1116 caps->rss_table_entry_width);
1117 break;
1118 case ICE_AQC_CAPS_RXQS:
1119 caps->num_rxq = number;
1120 caps->rxq_first_id = phys_id;
1121 ice_debug(hw, ICE_DBG_INIT,
1122 "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1123 ice_debug(hw, ICE_DBG_INIT,
1124 "HW caps: Rx first queue ID = %d\n",
1125 caps->rxq_first_id);
1126 break;
1127 case ICE_AQC_CAPS_TXQS:
1128 caps->num_txq = number;
1129 caps->txq_first_id = phys_id;
1130 ice_debug(hw, ICE_DBG_INIT,
1131 "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1132 ice_debug(hw, ICE_DBG_INIT,
1133 "HW caps: Tx first queue ID = %d\n",
1134 caps->txq_first_id);
1135 break;
1136 case ICE_AQC_CAPS_MSIX:
1137 caps->num_msix_vectors = number;
1138 caps->msix_vector_first_id = phys_id;
1139 ice_debug(hw, ICE_DBG_INIT,
1140 "HW caps: MSIX vector count = %d\n",
1141 caps->num_msix_vectors);
1142 ice_debug(hw, ICE_DBG_INIT,
1143 "HW caps: MSIX first vector index = %d\n",
1144 caps->msix_vector_first_id);
1145 break;
1146 case ICE_AQC_CAPS_MAX_MTU:
1147 caps->max_mtu = number;
1148 if (dev_p)
1149 ice_debug(hw, ICE_DBG_INIT,
1150 "HW caps: Dev.MaxMTU = %d\n",
1151 caps->max_mtu);
1152 else if (func_p)
1153 ice_debug(hw, ICE_DBG_INIT,
1154 "HW caps: func.MaxMTU = %d\n",
1155 caps->max_mtu);
1156 break;
1157 default:
1158 ice_debug(hw, ICE_DBG_INIT,
1159 "HW caps: Unknown capability[%d]: 0x%x\n", i,
1160 cap);
1161 break;
1162 }
1163 }
1164}
1165
1166/**
1167 * ice_aq_discover_caps - query function/device capabilities
1168 * @hw: pointer to the hw struct
1169 * @buf: a virtual buffer to hold the capabilities
1170 * @buf_size: Size of the virtual buffer
1171 * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
1172 * @opc: capabilities type to discover - pass in the command opcode
1173 * @cd: pointer to command details structure or NULL
1174 *
1175 * Get the function(0x000a)/device(0x000b) capabilities description from
1176 * the firmware.
1177 */
1178static enum ice_status
1179ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
1180 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1181{
1182 struct ice_aqc_list_caps *cmd;
1183 struct ice_aq_desc desc;
1184 enum ice_status status;
1185
1186 cmd = &desc.params.get_cap;
1187
1188 if (opc != ice_aqc_opc_list_func_caps &&
1189 opc != ice_aqc_opc_list_dev_caps)
1190 return ICE_ERR_PARAM;
1191
1192 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1193
1194 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1195 if (!status)
1196 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1197 *data_size = le16_to_cpu(desc.datalen);
1198
1199 return status;
1200}
1201
1202/**
1203 * ice_get_caps - get info about the HW
1204 * @hw: pointer to the hardware structure
1205 */
1206enum ice_status ice_get_caps(struct ice_hw *hw)
1207{
1208 enum ice_status status;
1209 u16 data_size = 0;
1210 u16 cbuf_len;
1211 u8 retries;
1212
1213 /* The driver doesn't know how many capabilities the device will return
1214 * so the buffer size required isn't known ahead of time. The driver
1215 * starts with cbuf_len and if this turns out to be insufficient, the
1216 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
1217 * The driver then allocates the buffer of this size and retries the
1218 * operation. So it follows that the retry count is 2.
1219 */
1220#define ICE_GET_CAP_BUF_COUNT 40
1221#define ICE_GET_CAP_RETRY_COUNT 2
1222
1223 cbuf_len = ICE_GET_CAP_BUF_COUNT *
1224 sizeof(struct ice_aqc_list_caps_elem);
1225
1226 retries = ICE_GET_CAP_RETRY_COUNT;
1227
1228 do {
1229 void *cbuf;
1230
1231 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1232 if (!cbuf)
1233 return ICE_ERR_NO_MEMORY;
1234
1235 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
1236 ice_aqc_opc_list_func_caps, NULL);
1237 devm_kfree(ice_hw_to_dev(hw), cbuf);
1238
1239 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1240 break;
1241
1242 /* If ENOMEM is returned, try again with bigger buffer */
1243 cbuf_len = data_size;
1244 } while (--retries);
1245
1246 return status;
1247}
1248
1249/**
1250 * ice_aq_manage_mac_write - manage MAC address write command
1251 * @hw: pointer to the hw struct
1252 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1253 * @flags: flags to control write behavior
1254 * @cd: pointer to command details structure or NULL
1255 *
1256 * This function is used to write MAC address to the NVM (0x0108).
1257 */
1258enum ice_status
1259ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1260 struct ice_sq_cd *cd)
1261{
1262 struct ice_aqc_manage_mac_write *cmd;
1263 struct ice_aq_desc desc;
1264
1265 cmd = &desc.params.mac_write;
1266 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1267
1268 cmd->flags = flags;
1269
1270 /* Prep values for flags, sah, sal */
1271 cmd->sah = htons(*((u16 *)mac_addr));
1272 cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1273
1274 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1275}
1276
1277/**
1278 * ice_aq_clear_pxe_mode
1279 * @hw: pointer to the hw struct
1280 *
1281 * Tell the firmware that the driver is taking over from PXE (0x0110).
1282 */
1283static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1284{
1285 struct ice_aq_desc desc;
1286
1287 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1288 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1289
1290 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1291}
1292
1293/**
1294 * ice_clear_pxe_mode - clear pxe operations mode
1295 * @hw: pointer to the hw struct
1296 *
1297 * Make sure all PXE mode settings are cleared, including things
1298 * like descriptor fetch/write-back mode.
1299 */
1300void ice_clear_pxe_mode(struct ice_hw *hw)
1301{
1302 if (ice_check_sq_alive(hw, &hw->adminq))
1303 ice_aq_clear_pxe_mode(hw);
1304}
1305
1306/**
1307 * ice_aq_set_phy_cfg
1308 * @hw: pointer to the hw struct
1309 * @lport: logical port number
1310 * @cfg: structure with PHY configuration data to be set
1311 * @cd: pointer to command details structure or NULL
1312 *
1313 * Set the various PHY configuration parameters supported on the Port.
1314 * One or more of the Set PHY config parameters may be ignored in an MFP
1315 * mode as the PF may not have the privilege to set some of the PHY Config
1316 * parameters. This status will be indicated by the command response (0x0601).
1317 */
1318static enum ice_status
1319ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1320 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1321{
1322 struct ice_aqc_set_phy_cfg *cmd;
1323 struct ice_aq_desc desc;
1324
1325 if (!cfg)
1326 return ICE_ERR_PARAM;
1327
1328 cmd = &desc.params.set_phy;
1329 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1330 cmd->lport_num = lport;
1331
1332 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1333}
1334
1335/**
1336 * ice_update_link_info - update status of the HW network link
1337 * @pi: port info structure of the interested logical port
1338 */
1339static enum ice_status
1340ice_update_link_info(struct ice_port_info *pi)
1341{
1342 struct ice_aqc_get_phy_caps_data *pcaps;
1343 struct ice_phy_info *phy_info;
1344 enum ice_status status;
1345 struct ice_hw *hw;
1346
1347 if (!pi)
1348 return ICE_ERR_PARAM;
1349
1350 hw = pi->hw;
1351
1352 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1353 if (!pcaps)
1354 return ICE_ERR_NO_MEMORY;
1355
1356 phy_info = &pi->phy;
1357 status = ice_aq_get_link_info(pi, true, NULL, NULL);
1358 if (status)
1359 goto out;
1360
1361 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1362 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1363 pcaps, NULL);
1364 if (status)
1365 goto out;
1366
1367 memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1368 sizeof(phy_info->link_info.module_type));
1369 }
1370out:
1371 devm_kfree(ice_hw_to_dev(hw), pcaps);
1372 return status;
1373}
1374
1375/**
1376 * ice_set_fc
1377 * @pi: port information structure
1378 * @aq_failures: pointer to status code, specific to ice_set_fc routine
1379 * @atomic_restart: enable automatic link update
1380 *
1381 * Set the requested flow control mode.
1382 */
1383enum ice_status
1384ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
1385{
1386 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1387 struct ice_aqc_get_phy_caps_data *pcaps;
1388 enum ice_status status;
1389 u8 pause_mask = 0x0;
1390 struct ice_hw *hw;
1391
1392 if (!pi)
1393 return ICE_ERR_PARAM;
1394 hw = pi->hw;
1395 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1396
1397 switch (pi->fc.req_mode) {
1398 case ICE_FC_FULL:
1399 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1400 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1401 break;
1402 case ICE_FC_RX_PAUSE:
1403 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1404 break;
1405 case ICE_FC_TX_PAUSE:
1406 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1407 break;
1408 default:
1409 break;
1410 }
1411
1412 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1413 if (!pcaps)
1414 return ICE_ERR_NO_MEMORY;
1415
1416 /* Get the current phy config */
1417 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1418 NULL);
1419 if (status) {
1420 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1421 goto out;
1422 }
1423
1424 /* clear the old pause settings */
1425 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1426 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1427 /* set the new capabilities */
1428 cfg.caps |= pause_mask;
1429 /* If the capabilities have changed, then set the new config */
1430 if (cfg.caps != pcaps->caps) {
1431 int retry_count, retry_max = 10;
1432
1433 /* Auto restart link so settings take effect */
1434 if (atomic_restart)
1435 cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
1436 /* Copy over all the old settings */
1437 cfg.phy_type_low = pcaps->phy_type_low;
1438 cfg.low_power_ctrl = pcaps->low_power_ctrl;
1439 cfg.eee_cap = pcaps->eee_cap;
1440 cfg.eeer_value = pcaps->eeer_value;
1441 cfg.link_fec_opt = pcaps->link_fec_options;
1442
1443 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1444 if (status) {
1445 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1446 goto out;
1447 }
1448
1449 /* Update the link info
1450 * It sometimes takes a really long time for link to
1451 * come back from the atomic reset. Thus, we wait a
1452 * little bit.
1453 */
1454 for (retry_count = 0; retry_count < retry_max; retry_count++) {
1455 status = ice_update_link_info(pi);
1456
1457 if (!status)
1458 break;
1459
1460 mdelay(100);
1461 }
1462
1463 if (status)
1464 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1465 }
1466
1467out:
1468 devm_kfree(ice_hw_to_dev(hw), pcaps);
1469 return status;
1470}
1471
1472/**
1473 * ice_get_link_status - get status of the HW network link
1474 * @pi: port information structure
1475 * @link_up: pointer to bool (true/false = linkup/linkdown)
1476 *
1477 * Variable link_up is true if link is up, false if link is down.
1478 * The variable link_up is invalid if status is non zero. As a
1479 * result of this call, link status reporting becomes enabled
1480 */
1481enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1482{
1483 struct ice_phy_info *phy_info;
1484 enum ice_status status = 0;
1485
1486 if (!pi)
1487 return ICE_ERR_PARAM;
1488
1489 phy_info = &pi->phy;
1490
1491 if (phy_info->get_link_info) {
1492 status = ice_update_link_info(pi);
1493
1494 if (status)
1495 ice_debug(pi->hw, ICE_DBG_LINK,
1496 "get link status error, status = %d\n",
1497 status);
1498 }
1499
1500 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1501
1502 return status;
1503}
1504
1505/**
1506 * ice_aq_set_link_restart_an
1507 * @pi: pointer to the port information structure
1508 * @ena_link: if true: enable link, if false: disable link
1509 * @cd: pointer to command details structure or NULL
1510 *
1511 * Sets up the link and restarts the Auto-Negotiation over the link.
1512 */
1513enum ice_status
1514ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1515 struct ice_sq_cd *cd)
1516{
1517 struct ice_aqc_restart_an *cmd;
1518 struct ice_aq_desc desc;
1519
1520 cmd = &desc.params.restart_an;
1521
1522 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1523
1524 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
1525 cmd->lport_num = pi->lport;
1526 if (ena_link)
1527 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
1528 else
1529 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
1530
1531 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
1532}
1533
1534/**
1535 * ice_aq_set_event_mask
1536 * @hw: pointer to the hw struct
1537 * @port_num: port number of the physical function
1538 * @mask: event mask to be set
1539 * @cd: pointer to command details structure or NULL
1540 *
1541 * Set event mask (0x0613)
1542 */
1543enum ice_status
1544ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
1545 struct ice_sq_cd *cd)
1546{
1547 struct ice_aqc_set_event_mask *cmd;
1548 struct ice_aq_desc desc;
1549
1550 cmd = &desc.params.set_event_mask;
1551
1552 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
1553
1554 cmd->lport_num = port_num;
1555
1556 cmd->event_mask = cpu_to_le16(mask);
1557
1558 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1559}
1560
1561/**
1562 * __ice_aq_get_set_rss_lut
1563 * @hw: pointer to the hardware structure
1564 * @vsi_id: VSI FW index
1565 * @lut_type: LUT table type
1566 * @lut: pointer to the LUT buffer provided by the caller
1567 * @lut_size: size of the LUT buffer
1568 * @glob_lut_idx: global LUT index
1569 * @set: set true to set the table, false to get the table
1570 *
1571 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
1572 */
1573static enum ice_status
1574__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1575 u16 lut_size, u8 glob_lut_idx, bool set)
1576{
1577 struct ice_aqc_get_set_rss_lut *cmd_resp;
1578 struct ice_aq_desc desc;
1579 enum ice_status status;
1580 u16 flags = 0;
1581
1582 cmd_resp = &desc.params.get_set_rss_lut;
1583
1584 if (set) {
1585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
1586 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1587 } else {
1588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
1589 }
1590
1591 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
1592 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
1593 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
1594 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
1595
1596 switch (lut_type) {
1597 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
1598 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
1599 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
1600 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
1601 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
1602 break;
1603 default:
1604 status = ICE_ERR_PARAM;
1605 goto ice_aq_get_set_rss_lut_exit;
1606 }
1607
1608 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
1609 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
1610 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
1611
1612 if (!set)
1613 goto ice_aq_get_set_rss_lut_send;
1614 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
1615 if (!set)
1616 goto ice_aq_get_set_rss_lut_send;
1617 } else {
1618 goto ice_aq_get_set_rss_lut_send;
1619 }
1620
1621 /* LUT size is only valid for Global and PF table types */
1622 if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
1623 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
1624 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1625 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1626 } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
1627 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
1628 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1629 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1630 } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
1631 (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
1632 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
1633 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1634 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1635 } else {
1636 status = ICE_ERR_PARAM;
1637 goto ice_aq_get_set_rss_lut_exit;
1638 }
1639
1640ice_aq_get_set_rss_lut_send:
1641 cmd_resp->flags = cpu_to_le16(flags);
1642 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
1643
1644ice_aq_get_set_rss_lut_exit:
1645 return status;
1646}
1647
1648/**
1649 * ice_aq_get_rss_lut
1650 * @hw: pointer to the hardware structure
1651 * @vsi_id: VSI FW index
1652 * @lut_type: LUT table type
1653 * @lut: pointer to the LUT buffer provided by the caller
1654 * @lut_size: size of the LUT buffer
1655 *
1656 * get the RSS lookup table, PF or VSI type
1657 */
1658enum ice_status
1659ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1660 u16 lut_size)
1661{
1662 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1663 false);
1664}
1665
1666/**
1667 * ice_aq_set_rss_lut
1668 * @hw: pointer to the hardware structure
1669 * @vsi_id: VSI FW index
1670 * @lut_type: LUT table type
1671 * @lut: pointer to the LUT buffer provided by the caller
1672 * @lut_size: size of the LUT buffer
1673 *
1674 * set the RSS lookup table, PF or VSI type
1675 */
1676enum ice_status
1677ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1678 u16 lut_size)
1679{
1680 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1681 true);
1682}
1683
1684/**
1685 * __ice_aq_get_set_rss_key
1686 * @hw: pointer to the hw struct
1687 * @vsi_id: VSI FW index
1688 * @key: pointer to key info struct
1689 * @set: set true to set the key, false to get the key
1690 *
1691 * get (0x0B04) or set (0x0B02) the RSS key per VSI
1692 */
1693static enum
1694ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
1695 struct ice_aqc_get_set_rss_keys *key,
1696 bool set)
1697{
1698 struct ice_aqc_get_set_rss_key *cmd_resp;
1699 u16 key_size = sizeof(*key);
1700 struct ice_aq_desc desc;
1701
1702 cmd_resp = &desc.params.get_set_rss_key;
1703
1704 if (set) {
1705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
1706 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1707 } else {
1708 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
1709 }
1710
1711 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
1712 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
1713 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
1714 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
1715
1716 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
1717}
1718
1719/**
1720 * ice_aq_get_rss_key
1721 * @hw: pointer to the hw struct
1722 * @vsi_id: VSI FW index
1723 * @key: pointer to key info struct
1724 *
1725 * get the RSS key per VSI
1726 */
1727enum ice_status
1728ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
1729 struct ice_aqc_get_set_rss_keys *key)
1730{
1731 return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
1732}
1733
1734/**
1735 * ice_aq_set_rss_key
1736 * @hw: pointer to the hw struct
1737 * @vsi_id: VSI FW index
1738 * @keys: pointer to key info struct
1739 *
1740 * set the RSS key per VSI
1741 */
1742enum ice_status
1743ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
1744 struct ice_aqc_get_set_rss_keys *keys)
1745{
1746 return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
1747}
1748
1749/**
1750 * ice_aq_add_lan_txq
1751 * @hw: pointer to the hardware structure
1752 * @num_qgrps: Number of added queue groups
1753 * @qg_list: list of queue groups to be added
1754 * @buf_size: size of buffer for indirect command
1755 * @cd: pointer to command details structure or NULL
1756 *
1757 * Add Tx LAN queue (0x0C30)
1758 *
1759 * NOTE:
1760 * Prior to calling add Tx LAN queue:
1761 * Initialize the following as part of the Tx queue context:
1762 * Completion queue ID if the queue uses Completion queue, Quanta profile,
1763 * Cache profile and Packet shaper profile.
1764 *
1765 * After add Tx LAN queue AQ command is completed:
1766 * Interrupts should be associated with specific queues,
1767 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
1768 * flow.
1769 */
1770static enum ice_status
1771ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1772 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
1773 struct ice_sq_cd *cd)
1774{
1775 u16 i, sum_header_size, sum_q_size = 0;
1776 struct ice_aqc_add_tx_qgrp *list;
1777 struct ice_aqc_add_txqs *cmd;
1778 struct ice_aq_desc desc;
1779
1780 cmd = &desc.params.add_txqs;
1781
1782 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
1783
1784 if (!qg_list)
1785 return ICE_ERR_PARAM;
1786
1787 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1788 return ICE_ERR_PARAM;
1789
1790 sum_header_size = num_qgrps *
1791 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
1792
1793 list = qg_list;
1794 for (i = 0; i < num_qgrps; i++) {
1795 struct ice_aqc_add_txqs_perq *q = list->txqs;
1796
1797 sum_q_size += list->num_txqs * sizeof(*q);
1798 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
1799 }
1800
1801 if (buf_size != (sum_header_size + sum_q_size))
1802 return ICE_ERR_PARAM;
1803
1804 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1805
1806 cmd->num_qgrps = num_qgrps;
1807
1808 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
1809}
1810
1811/**
1812 * ice_aq_dis_lan_txq
1813 * @hw: pointer to the hardware structure
1814 * @num_qgrps: number of groups in the list
1815 * @qg_list: the list of groups to disable
1816 * @buf_size: the total size of the qg_list buffer in bytes
1817 * @cd: pointer to command details structure or NULL
1818 *
1819 * Disable LAN Tx queue (0x0C31)
1820 */
1821static enum ice_status
1822ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1823 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
1824 struct ice_sq_cd *cd)
1825{
1826 struct ice_aqc_dis_txqs *cmd;
1827 struct ice_aq_desc desc;
1828 u16 i, sz = 0;
1829
1830 cmd = &desc.params.dis_txqs;
1831 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
1832
1833 if (!qg_list)
1834 return ICE_ERR_PARAM;
1835
1836 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1837 return ICE_ERR_PARAM;
1838 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1839 cmd->num_entries = num_qgrps;
1840
1841 for (i = 0; i < num_qgrps; ++i) {
1842 /* Calculate the size taken up by the queue IDs in this group */
1843 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
1844
1845 /* Add the size of the group header */
1846 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
1847
1848 /* If the num of queues is even, add 2 bytes of padding */
1849 if ((qg_list[i].num_qs % 2) == 0)
1850 sz += 2;
1851 }
1852
1853 if (buf_size != sz)
1854 return ICE_ERR_PARAM;
1855
1856 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
1857}
1858
1859/* End of FW Admin Queue command wrappers */
1860
1861/**
1862 * ice_write_byte - write a byte to a packed context structure
1863 * @src_ctx: the context structure to read from
1864 * @dest_ctx: the context to be written to
1865 * @ce_info: a description of the struct to be filled
1866 */
1867static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
1868 const struct ice_ctx_ele *ce_info)
1869{
1870 u8 src_byte, dest_byte, mask;
1871 u8 *from, *dest;
1872 u16 shift_width;
1873
1874 /* copy from the next struct field */
1875 from = src_ctx + ce_info->offset;
1876
1877 /* prepare the bits and mask */
1878 shift_width = ce_info->lsb % 8;
1879 mask = (u8)(BIT(ce_info->width) - 1);
1880
1881 src_byte = *from;
1882 src_byte &= mask;
1883
1884 /* shift to correct alignment */
1885 mask <<= shift_width;
1886 src_byte <<= shift_width;
1887
1888 /* get the current bits from the target bit string */
1889 dest = dest_ctx + (ce_info->lsb / 8);
1890
1891 memcpy(&dest_byte, dest, sizeof(dest_byte));
1892
1893 dest_byte &= ~mask; /* get the bits not changing */
1894 dest_byte |= src_byte; /* add in the new bits */
1895
1896 /* put it all back */
1897 memcpy(dest, &dest_byte, sizeof(dest_byte));
1898}
1899
1900/**
1901 * ice_write_word - write a word to a packed context structure
1902 * @src_ctx: the context structure to read from
1903 * @dest_ctx: the context to be written to
1904 * @ce_info: a description of the struct to be filled
1905 */
1906static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
1907 const struct ice_ctx_ele *ce_info)
1908{
1909 u16 src_word, mask;
1910 __le16 dest_word;
1911 u8 *from, *dest;
1912 u16 shift_width;
1913
1914 /* copy from the next struct field */
1915 from = src_ctx + ce_info->offset;
1916
1917 /* prepare the bits and mask */
1918 shift_width = ce_info->lsb % 8;
1919 mask = BIT(ce_info->width) - 1;
1920
1921 /* don't swizzle the bits until after the mask because the mask bits
1922 * will be in a different bit position on big endian machines
1923 */
1924 src_word = *(u16 *)from;
1925 src_word &= mask;
1926
1927 /* shift to correct alignment */
1928 mask <<= shift_width;
1929 src_word <<= shift_width;
1930
1931 /* get the current bits from the target bit string */
1932 dest = dest_ctx + (ce_info->lsb / 8);
1933
1934 memcpy(&dest_word, dest, sizeof(dest_word));
1935
1936 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
1937 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
1938
1939 /* put it all back */
1940 memcpy(dest, &dest_word, sizeof(dest_word));
1941}
1942
1943/**
1944 * ice_write_dword - write a dword to a packed context structure
1945 * @src_ctx: the context structure to read from
1946 * @dest_ctx: the context to be written to
1947 * @ce_info: a description of the struct to be filled
1948 */
1949static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
1950 const struct ice_ctx_ele *ce_info)
1951{
1952 u32 src_dword, mask;
1953 __le32 dest_dword;
1954 u8 *from, *dest;
1955 u16 shift_width;
1956
1957 /* copy from the next struct field */
1958 from = src_ctx + ce_info->offset;
1959
1960 /* prepare the bits and mask */
1961 shift_width = ce_info->lsb % 8;
1962
1963 /* if the field width is exactly 32 on an x86 machine, then the shift
1964 * operation will not work because the SHL instructions count is masked
1965 * to 5 bits so the shift will do nothing
1966 */
1967 if (ce_info->width < 32)
1968 mask = BIT(ce_info->width) - 1;
1969 else
1970 mask = (u32)~0;
1971
1972 /* don't swizzle the bits until after the mask because the mask bits
1973 * will be in a different bit position on big endian machines
1974 */
1975 src_dword = *(u32 *)from;
1976 src_dword &= mask;
1977
1978 /* shift to correct alignment */
1979 mask <<= shift_width;
1980 src_dword <<= shift_width;
1981
1982 /* get the current bits from the target bit string */
1983 dest = dest_ctx + (ce_info->lsb / 8);
1984
1985 memcpy(&dest_dword, dest, sizeof(dest_dword));
1986
1987 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
1988 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
1989
1990 /* put it all back */
1991 memcpy(dest, &dest_dword, sizeof(dest_dword));
1992}
1993
1994/**
1995 * ice_write_qword - write a qword to a packed context structure
1996 * @src_ctx: the context structure to read from
1997 * @dest_ctx: the context to be written to
1998 * @ce_info: a description of the struct to be filled
1999 */
2000static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2001 const struct ice_ctx_ele *ce_info)
2002{
2003 u64 src_qword, mask;
2004 __le64 dest_qword;
2005 u8 *from, *dest;
2006 u16 shift_width;
2007
2008 /* copy from the next struct field */
2009 from = src_ctx + ce_info->offset;
2010
2011 /* prepare the bits and mask */
2012 shift_width = ce_info->lsb % 8;
2013
2014 /* if the field width is exactly 64 on an x86 machine, then the shift
2015 * operation will not work because the SHL instructions count is masked
2016 * to 6 bits so the shift will do nothing
2017 */
2018 if (ce_info->width < 64)
2019 mask = BIT_ULL(ce_info->width) - 1;
2020 else
2021 mask = (u64)~0;
2022
2023 /* don't swizzle the bits until after the mask because the mask bits
2024 * will be in a different bit position on big endian machines
2025 */
2026 src_qword = *(u64 *)from;
2027 src_qword &= mask;
2028
2029 /* shift to correct alignment */
2030 mask <<= shift_width;
2031 src_qword <<= shift_width;
2032
2033 /* get the current bits from the target bit string */
2034 dest = dest_ctx + (ce_info->lsb / 8);
2035
2036 memcpy(&dest_qword, dest, sizeof(dest_qword));
2037
2038 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
2039 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
2040
2041 /* put it all back */
2042 memcpy(dest, &dest_qword, sizeof(dest_qword));
2043}
2044
2045/**
2046 * ice_set_ctx - set context bits in packed structure
2047 * @src_ctx: pointer to a generic non-packed context structure
2048 * @dest_ctx: pointer to memory for the packed structure
2049 * @ce_info: a description of the structure to be transformed
2050 */
2051enum ice_status
2052ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2053{
2054 int f;
2055
2056 for (f = 0; ce_info[f].width; f++) {
2057 /* We have to deal with each element of the FW response
2058 * using the correct size so that we are correct regardless
2059 * of the endianness of the machine.
2060 */
2061 switch (ce_info[f].size_of) {
2062 case sizeof(u8):
2063 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2064 break;
2065 case sizeof(u16):
2066 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2067 break;
2068 case sizeof(u32):
2069 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2070 break;
2071 case sizeof(u64):
2072 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2073 break;
2074 default:
2075 return ICE_ERR_INVAL_SIZE;
2076 }
2077 }
2078
2079 return 0;
2080}
2081
2082/**
2083 * ice_ena_vsi_txq
2084 * @pi: port information structure
2085 * @vsi_id: VSI id
2086 * @tc: tc number
2087 * @num_qgrps: Number of added queue groups
2088 * @buf: list of queue groups to be added
2089 * @buf_size: size of buffer for indirect command
2090 * @cd: pointer to command details structure or NULL
2091 *
2092 * This function adds one lan q
2093 */
2094enum ice_status
2095ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
2096 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2097 struct ice_sq_cd *cd)
2098{
2099 struct ice_aqc_txsched_elem_data node = { 0 };
2100 struct ice_sched_node *parent;
2101 enum ice_status status;
2102 struct ice_hw *hw;
2103
2104 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2105 return ICE_ERR_CFG;
2106
2107 if (num_qgrps > 1 || buf->num_txqs > 1)
2108 return ICE_ERR_MAX_LIMIT;
2109
2110 hw = pi->hw;
2111
2112 mutex_lock(&pi->sched_lock);
2113
2114 /* find a parent node */
2115 parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
2116 ICE_SCHED_NODE_OWNER_LAN);
2117 if (!parent) {
2118 status = ICE_ERR_PARAM;
2119 goto ena_txq_exit;
2120 }
2121 buf->parent_teid = parent->info.node_teid;
2122 node.parent_teid = parent->info.node_teid;
2123 /* Mark that the values in the "generic" section as valid. The default
2124 * value in the "generic" section is zero. This means that :
2125 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
2126 * - 0 priority among siblings, indicated by Bit 1-3.
2127 * - WFQ, indicated by Bit 4.
2128 * - 0 Adjustment value is used in PSM credit update flow, indicated by
2129 * Bit 5-6.
2130 * - Bit 7 is reserved.
2131 * Without setting the generic section as valid in valid_sections, the
2132 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
2133 */
2134 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
2135
2136 /* add the lan q */
2137 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2138 if (status)
2139 goto ena_txq_exit;
2140
2141 node.node_teid = buf->txqs[0].q_teid;
2142 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
2143
2144 /* add a leaf node into schduler tree q layer */
2145 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
2146
2147ena_txq_exit:
2148 mutex_unlock(&pi->sched_lock);
2149 return status;
2150}
2151
2152/**
2153 * ice_dis_vsi_txq
2154 * @pi: port information structure
2155 * @num_queues: number of queues
2156 * @q_ids: pointer to the q_id array
2157 * @q_teids: pointer to queue node teids
2158 * @cd: pointer to command details structure or NULL
2159 *
2160 * This function removes queues and their corresponding nodes in SW DB
2161 */
2162enum ice_status
2163ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2164 u32 *q_teids, struct ice_sq_cd *cd)
2165{
2166 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2167 struct ice_aqc_dis_txq_item qg_list;
2168 u16 i;
2169
2170 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2171 return ICE_ERR_CFG;
2172
2173 mutex_lock(&pi->sched_lock);
2174
2175 for (i = 0; i < num_queues; i++) {
2176 struct ice_sched_node *node;
2177
2178 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2179 if (!node)
2180 continue;
2181 qg_list.parent_teid = node->info.parent_teid;
2182 qg_list.num_qs = 1;
2183 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2184 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2185 sizeof(qg_list), cd);
2186
2187 if (status)
2188 break;
2189 ice_free_sched_node(pi, node);
2190 }
2191 mutex_unlock(&pi->sched_lock);
2192 return status;
2193}
2194
2195/**
2196 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
2197 * @pi: port information structure
2198 * @vsi_id: VSI Id
2199 * @tc_bitmap: TC bitmap
2200 * @maxqs: max queues array per TC
2201 * @owner: lan or rdma
2202 *
2203 * This function adds/updates the VSI queues per TC.
2204 */
2205static enum ice_status
2206ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2207 u16 *maxqs, u8 owner)
2208{
2209 enum ice_status status = 0;
2210 u8 i;
2211
2212 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2213 return ICE_ERR_CFG;
2214
2215 mutex_lock(&pi->sched_lock);
2216
2217 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2218 /* configuration is possible only if TC node is present */
2219 if (!ice_sched_get_tc_node(pi, i))
2220 continue;
2221
2222 status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
2223 ice_is_tc_ena(tc_bitmap, i));
2224 if (status)
2225 break;
2226 }
2227
2228 mutex_unlock(&pi->sched_lock);
2229 return status;
2230}
2231
2232/**
2233 * ice_cfg_vsi_lan - configure VSI lan queues
2234 * @pi: port information structure
2235 * @vsi_id: VSI Id
2236 * @tc_bitmap: TC bitmap
2237 * @max_lanqs: max lan queues array per TC
2238 *
2239 * This function adds/updates the VSI lan queues per TC.
2240 */
2241enum ice_status
2242ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2243 u16 *max_lanqs)
2244{
2245 return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
2246 ICE_SCHED_NODE_OWNER_LAN);
2247}