Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7
8#define ICE_PF_RESET_WAIT_COUNT 200
9
10#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
18#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
29/**
30 * ice_set_mac_type - Sets MAC type
31 * @hw: pointer to the HW structure
32 *
33 * This function sets the MAC type of the adapter based on the
34 * vendor ID and device ID stored in the HW structure.
35 */
36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37{
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43}
44
45/**
46 * ice_dev_onetime_setup - Temporary HW/FW workarounds
47 * @hw: pointer to the HW structure
48 *
49 * This function provides temporary workarounds for certain issues
50 * that are expected to be fixed in the HW/FW.
51 */
52void ice_dev_onetime_setup(struct ice_hw *hw)
53{
54#define MBX_PF_VT_PFALLOC 0x00231E80
55 /* set VFs per PF */
56 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
57}
58
59/**
60 * ice_clear_pf_cfg - Clear PF configuration
61 * @hw: pointer to the hardware structure
62 *
63 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
64 * configuration, flow director filters, etc.).
65 */
66enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
67{
68 struct ice_aq_desc desc;
69
70 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
71
72 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
73}
74
75/**
76 * ice_aq_manage_mac_read - manage MAC address read command
77 * @hw: pointer to the HW struct
78 * @buf: a virtual buffer to hold the manage MAC read response
79 * @buf_size: Size of the virtual buffer
80 * @cd: pointer to command details structure or NULL
81 *
82 * This function is used to return per PF station MAC address (0x0107).
83 * NOTE: Upon successful completion of this command, MAC address information
84 * is returned in user specified buffer. Please interpret user specified
85 * buffer as "manage_mac_read" response.
86 * Response such as various MAC addresses are stored in HW struct (port.mac)
87 * ice_aq_discover_caps is expected to be called before this function is called.
88 */
89static enum ice_status
90ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 struct ice_sq_cd *cd)
92{
93 struct ice_aqc_manage_mac_read_resp *resp;
94 struct ice_aqc_manage_mac_read *cmd;
95 struct ice_aq_desc desc;
96 enum ice_status status;
97 u16 flags;
98 u8 i;
99
100 cmd = &desc.params.mac_read;
101
102 if (buf_size < sizeof(*resp))
103 return ICE_ERR_BUF_TOO_SHORT;
104
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
106
107 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
108 if (status)
109 return status;
110
111 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
112 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
113
114 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
115 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
116 return ICE_ERR_CFG;
117 }
118
119 /* A single port can report up to two (LAN and WoL) addresses */
120 for (i = 0; i < cmd->num_addr; i++)
121 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
122 ether_addr_copy(hw->port_info->mac.lan_addr,
123 resp[i].mac_addr);
124 ether_addr_copy(hw->port_info->mac.perm_addr,
125 resp[i].mac_addr);
126 break;
127 }
128
129 return 0;
130}
131
132/**
133 * ice_aq_get_phy_caps - returns PHY capabilities
134 * @pi: port information structure
135 * @qual_mods: report qualified modules
136 * @report_mode: report mode capabilities
137 * @pcaps: structure for PHY capabilities to be filled
138 * @cd: pointer to command details structure or NULL
139 *
140 * Returns the various PHY capabilities supported on the Port (0x0600)
141 */
142enum ice_status
143ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
144 struct ice_aqc_get_phy_caps_data *pcaps,
145 struct ice_sq_cd *cd)
146{
147 struct ice_aqc_get_phy_caps *cmd;
148 u16 pcaps_size = sizeof(*pcaps);
149 struct ice_aq_desc desc;
150 enum ice_status status;
151
152 cmd = &desc.params.get_phy;
153
154 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
155 return ICE_ERR_PARAM;
156
157 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
158
159 if (qual_mods)
160 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
161
162 cmd->param0 |= cpu_to_le16(report_mode);
163 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164
165 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
166 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
167 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
168 }
169
170 return status;
171}
172
173/**
174 * ice_get_media_type - Gets media type
175 * @pi: port information structure
176 */
177static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
178{
179 struct ice_link_status *hw_link_info;
180
181 if (!pi)
182 return ICE_MEDIA_UNKNOWN;
183
184 hw_link_info = &pi->phy.link_info;
185 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
186 /* If more than one media type is selected, report unknown */
187 return ICE_MEDIA_UNKNOWN;
188
189 if (hw_link_info->phy_type_low) {
190 switch (hw_link_info->phy_type_low) {
191 case ICE_PHY_TYPE_LOW_1000BASE_SX:
192 case ICE_PHY_TYPE_LOW_1000BASE_LX:
193 case ICE_PHY_TYPE_LOW_10GBASE_SR:
194 case ICE_PHY_TYPE_LOW_10GBASE_LR:
195 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
196 case ICE_PHY_TYPE_LOW_25GBASE_SR:
197 case ICE_PHY_TYPE_LOW_25GBASE_LR:
198 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
199 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
200 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
201 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
202 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
203 case ICE_PHY_TYPE_LOW_50GBASE_SR:
204 case ICE_PHY_TYPE_LOW_50GBASE_FR:
205 case ICE_PHY_TYPE_LOW_50GBASE_LR:
206 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
207 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
208 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
209 case ICE_PHY_TYPE_LOW_100GBASE_DR:
210 return ICE_MEDIA_FIBER;
211 case ICE_PHY_TYPE_LOW_100BASE_TX:
212 case ICE_PHY_TYPE_LOW_1000BASE_T:
213 case ICE_PHY_TYPE_LOW_2500BASE_T:
214 case ICE_PHY_TYPE_LOW_5GBASE_T:
215 case ICE_PHY_TYPE_LOW_10GBASE_T:
216 case ICE_PHY_TYPE_LOW_25GBASE_T:
217 return ICE_MEDIA_BASET;
218 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
219 case ICE_PHY_TYPE_LOW_25GBASE_CR:
220 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
221 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
222 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
223 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
224 case ICE_PHY_TYPE_LOW_50GBASE_CP:
225 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
226 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
227 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
228 return ICE_MEDIA_DA;
229 case ICE_PHY_TYPE_LOW_1000BASE_KX:
230 case ICE_PHY_TYPE_LOW_2500BASE_KX:
231 case ICE_PHY_TYPE_LOW_2500BASE_X:
232 case ICE_PHY_TYPE_LOW_5GBASE_KR:
233 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
234 case ICE_PHY_TYPE_LOW_25GBASE_KR:
235 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
236 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
237 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
238 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
239 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
240 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
241 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
242 return ICE_MEDIA_BACKPLANE;
243 }
244 } else {
245 switch (hw_link_info->phy_type_high) {
246 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
247 return ICE_MEDIA_BACKPLANE;
248 }
249 }
250 return ICE_MEDIA_UNKNOWN;
251}
252
253/**
254 * ice_aq_get_link_info
255 * @pi: port information structure
256 * @ena_lse: enable/disable LinkStatusEvent reporting
257 * @link: pointer to link status structure - optional
258 * @cd: pointer to command details structure or NULL
259 *
260 * Get Link Status (0x607). Returns the link status of the adapter.
261 */
262enum ice_status
263ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
264 struct ice_link_status *link, struct ice_sq_cd *cd)
265{
266 struct ice_aqc_get_link_status_data link_data = { 0 };
267 struct ice_aqc_get_link_status *resp;
268 struct ice_link_status *li_old, *li;
269 enum ice_media_type *hw_media_type;
270 struct ice_fc_info *hw_fc_info;
271 bool tx_pause, rx_pause;
272 struct ice_aq_desc desc;
273 enum ice_status status;
274 struct ice_hw *hw;
275 u16 cmd_flags;
276
277 if (!pi)
278 return ICE_ERR_PARAM;
279 hw = pi->hw;
280 li_old = &pi->phy.link_info_old;
281 hw_media_type = &pi->phy.media_type;
282 li = &pi->phy.link_info;
283 hw_fc_info = &pi->fc;
284
285 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
286 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
287 resp = &desc.params.get_link_status;
288 resp->cmd_flags = cpu_to_le16(cmd_flags);
289 resp->lport_num = pi->lport;
290
291 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
292
293 if (status)
294 return status;
295
296 /* save off old link status information */
297 *li_old = *li;
298
299 /* update current link status information */
300 li->link_speed = le16_to_cpu(link_data.link_speed);
301 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
302 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
303 *hw_media_type = ice_get_media_type(pi);
304 li->link_info = link_data.link_info;
305 li->an_info = link_data.an_info;
306 li->ext_info = link_data.ext_info;
307 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
308 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
309 li->topo_media_conflict = link_data.topo_media_conflict;
310 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
311 ICE_AQ_CFG_PACING_TYPE_M);
312
313 /* update fc info */
314 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
315 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
316 if (tx_pause && rx_pause)
317 hw_fc_info->current_mode = ICE_FC_FULL;
318 else if (tx_pause)
319 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
320 else if (rx_pause)
321 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
322 else
323 hw_fc_info->current_mode = ICE_FC_NONE;
324
325 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
326
327 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
328 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
329 (unsigned long long)li->phy_type_low);
330 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
331 (unsigned long long)li->phy_type_high);
332 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
333 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
334 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
335 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
336 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
337 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
338 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
339
340 /* save link status information */
341 if (link)
342 *link = *li;
343
344 /* flag cleared so calling functions don't call AQ again */
345 pi->phy.get_link_info = false;
346
347 return 0;
348}
349
350/**
351 * ice_init_flex_flags
352 * @hw: pointer to the hardware structure
353 * @prof_id: Rx Descriptor Builder profile ID
354 *
355 * Function to initialize Rx flex flags
356 */
357static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
358{
359 u8 idx = 0;
360
361 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
362 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
363 * flexiflags1[3:0] - Not used for flag programming
364 * flexiflags2[7:0] - Tunnel and VLAN types
365 * 2 invalid fields in last index
366 */
367 switch (prof_id) {
368 /* Rx flex flags are currently programmed for the NIC profiles only.
369 * Different flag bit programming configurations can be added per
370 * profile as needed.
371 */
372 case ICE_RXDID_FLEX_NIC:
373 case ICE_RXDID_FLEX_NIC_2:
374 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
375 ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
376 ICE_FLG_FIN, idx++);
377 /* flex flag 1 is not used for flexi-flag programming, skipping
378 * these four FLG64 bits.
379 */
380 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
381 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
382 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
383 ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
384 ICE_FLG_EVLAN_x9100, idx++);
385 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
386 ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
387 ICE_FLG_TNL0, idx++);
388 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
389 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
390 break;
391
392 default:
393 ice_debug(hw, ICE_DBG_INIT,
394 "Flag programming for profile ID %d not supported\n",
395 prof_id);
396 }
397}
398
399/**
400 * ice_init_flex_flds
401 * @hw: pointer to the hardware structure
402 * @prof_id: Rx Descriptor Builder profile ID
403 *
404 * Function to initialize flex descriptors
405 */
406static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
407{
408 enum ice_flex_rx_mdid mdid;
409
410 switch (prof_id) {
411 case ICE_RXDID_FLEX_NIC:
412 case ICE_RXDID_FLEX_NIC_2:
413 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
414 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
415 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
416
417 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
418 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
419
420 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
421
422 ice_init_flex_flags(hw, prof_id);
423 break;
424
425 default:
426 ice_debug(hw, ICE_DBG_INIT,
427 "Field init for profile ID %d not supported\n",
428 prof_id);
429 }
430}
431
432/**
433 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
434 * @hw: pointer to the HW struct
435 */
436static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
437{
438 struct ice_switch_info *sw;
439
440 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
441 sizeof(*hw->switch_info), GFP_KERNEL);
442 sw = hw->switch_info;
443
444 if (!sw)
445 return ICE_ERR_NO_MEMORY;
446
447 INIT_LIST_HEAD(&sw->vsi_list_map_head);
448
449 return ice_init_def_sw_recp(hw);
450}
451
452/**
453 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
454 * @hw: pointer to the HW struct
455 */
456static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
457{
458 struct ice_switch_info *sw = hw->switch_info;
459 struct ice_vsi_list_map_info *v_pos_map;
460 struct ice_vsi_list_map_info *v_tmp_map;
461 struct ice_sw_recipe *recps;
462 u8 i;
463
464 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
465 list_entry) {
466 list_del(&v_pos_map->list_entry);
467 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
468 }
469 recps = hw->switch_info->recp_list;
470 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
471 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
472
473 recps[i].root_rid = i;
474 mutex_destroy(&recps[i].filt_rule_lock);
475 list_for_each_entry_safe(lst_itr, tmp_entry,
476 &recps[i].filt_rules, list_entry) {
477 list_del(&lst_itr->list_entry);
478 devm_kfree(ice_hw_to_dev(hw), lst_itr);
479 }
480 }
481 ice_rm_all_sw_replay_rule_info(hw);
482 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
483 devm_kfree(ice_hw_to_dev(hw), sw);
484}
485
486#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
487 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
488#define ICE_FW_LOG_DESC_SIZE_MAX \
489 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
490
491/**
492 * ice_get_fw_log_cfg - get FW logging configuration
493 * @hw: pointer to the HW struct
494 */
495static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
496{
497 struct ice_aqc_fw_logging_data *config;
498 struct ice_aq_desc desc;
499 enum ice_status status;
500 u16 size;
501
502 size = ICE_FW_LOG_DESC_SIZE_MAX;
503 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
504 if (!config)
505 return ICE_ERR_NO_MEMORY;
506
507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
508
509 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
510 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
511
512 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
513 if (!status) {
514 u16 i;
515
516 /* Save FW logging information into the HW structure */
517 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
518 u16 v, m, flgs;
519
520 v = le16_to_cpu(config->entry[i]);
521 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
522 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
523
524 if (m < ICE_AQC_FW_LOG_ID_MAX)
525 hw->fw_log.evnts[m].cur = flgs;
526 }
527 }
528
529 devm_kfree(ice_hw_to_dev(hw), config);
530
531 return status;
532}
533
534/**
535 * ice_cfg_fw_log - configure FW logging
536 * @hw: pointer to the HW struct
537 * @enable: enable certain FW logging events if true, disable all if false
538 *
539 * This function enables/disables the FW logging via Rx CQ events and a UART
540 * port based on predetermined configurations. FW logging via the Rx CQ can be
541 * enabled/disabled for individual PF's. However, FW logging via the UART can
542 * only be enabled/disabled for all PFs on the same device.
543 *
544 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
545 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
546 * before initializing the device.
547 *
548 * When re/configuring FW logging, callers need to update the "cfg" elements of
549 * the hw->fw_log.evnts array with the desired logging event configurations for
550 * modules of interest. When disabling FW logging completely, the callers can
551 * just pass false in the "enable" parameter. On completion, the function will
552 * update the "cur" element of the hw->fw_log.evnts array with the resulting
553 * logging event configurations of the modules that are being re/configured. FW
554 * logging modules that are not part of a reconfiguration operation retain their
555 * previous states.
556 *
557 * Before resetting the device, it is recommended that the driver disables FW
558 * logging before shutting down the control queue. When disabling FW logging
559 * ("enable" = false), the latest configurations of FW logging events stored in
560 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
561 * a device reset.
562 *
563 * When enabling FW logging to emit log messages via the Rx CQ during the
564 * device's initialization phase, a mechanism alternative to interrupt handlers
565 * needs to be used to extract FW log messages from the Rx CQ periodically and
566 * to prevent the Rx CQ from being full and stalling other types of control
567 * messages from FW to SW. Interrupts are typically disabled during the device's
568 * initialization phase.
569 */
570static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
571{
572 struct ice_aqc_fw_logging_data *data = NULL;
573 struct ice_aqc_fw_logging *cmd;
574 enum ice_status status = 0;
575 u16 i, chgs = 0, len = 0;
576 struct ice_aq_desc desc;
577 u8 actv_evnts = 0;
578 void *buf = NULL;
579
580 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
581 return 0;
582
583 /* Disable FW logging only when the control queue is still responsive */
584 if (!enable &&
585 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
586 return 0;
587
588 /* Get current FW log settings */
589 status = ice_get_fw_log_cfg(hw);
590 if (status)
591 return status;
592
593 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
594 cmd = &desc.params.fw_logging;
595
596 /* Indicate which controls are valid */
597 if (hw->fw_log.cq_en)
598 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
599
600 if (hw->fw_log.uart_en)
601 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
602
603 if (enable) {
604 /* Fill in an array of entries with FW logging modules and
605 * logging events being reconfigured.
606 */
607 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
608 u16 val;
609
610 /* Keep track of enabled event types */
611 actv_evnts |= hw->fw_log.evnts[i].cfg;
612
613 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
614 continue;
615
616 if (!data) {
617 data = devm_kzalloc(ice_hw_to_dev(hw),
618 ICE_FW_LOG_DESC_SIZE_MAX,
619 GFP_KERNEL);
620 if (!data)
621 return ICE_ERR_NO_MEMORY;
622 }
623
624 val = i << ICE_AQC_FW_LOG_ID_S;
625 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
626 data->entry[chgs++] = cpu_to_le16(val);
627 }
628
629 /* Only enable FW logging if at least one module is specified.
630 * If FW logging is currently enabled but all modules are not
631 * enabled to emit log messages, disable FW logging altogether.
632 */
633 if (actv_evnts) {
634 /* Leave if there is effectively no change */
635 if (!chgs)
636 goto out;
637
638 if (hw->fw_log.cq_en)
639 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
640
641 if (hw->fw_log.uart_en)
642 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
643
644 buf = data;
645 len = ICE_FW_LOG_DESC_SIZE(chgs);
646 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
647 }
648 }
649
650 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
651 if (!status) {
652 /* Update the current configuration to reflect events enabled.
653 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
654 * logging mode is enabled for the device. They do not reflect
655 * actual modules being enabled to emit log messages. So, their
656 * values remain unchanged even when all modules are disabled.
657 */
658 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
659
660 hw->fw_log.actv_evnts = actv_evnts;
661 for (i = 0; i < cnt; i++) {
662 u16 v, m;
663
664 if (!enable) {
665 /* When disabling all FW logging events as part
666 * of device's de-initialization, the original
667 * configurations are retained, and can be used
668 * to reconfigure FW logging later if the device
669 * is re-initialized.
670 */
671 hw->fw_log.evnts[i].cur = 0;
672 continue;
673 }
674
675 v = le16_to_cpu(data->entry[i]);
676 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
677 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
678 }
679 }
680
681out:
682 if (data)
683 devm_kfree(ice_hw_to_dev(hw), data);
684
685 return status;
686}
687
688/**
689 * ice_output_fw_log
690 * @hw: pointer to the HW struct
691 * @desc: pointer to the AQ message descriptor
692 * @buf: pointer to the buffer accompanying the AQ message
693 *
694 * Formats a FW Log message and outputs it via the standard driver logs.
695 */
696void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
697{
698 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
699 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
700 le16_to_cpu(desc->datalen));
701 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
702}
703
704/**
705 * ice_get_itr_intrl_gran - determine int/intrl granularity
706 * @hw: pointer to the HW struct
707 *
708 * Determines the ITR/intrl granularities based on the maximum aggregate
709 * bandwidth according to the device's configuration during power-on.
710 */
711static void ice_get_itr_intrl_gran(struct ice_hw *hw)
712{
713 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
714 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
715 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
716
717 switch (max_agg_bw) {
718 case ICE_MAX_AGG_BW_200G:
719 case ICE_MAX_AGG_BW_100G:
720 case ICE_MAX_AGG_BW_50G:
721 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
722 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
723 break;
724 case ICE_MAX_AGG_BW_25G:
725 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
726 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
727 break;
728 }
729}
730
731/**
732 * ice_get_nvm_version - get cached NVM version data
733 * @hw: pointer to the hardware structure
734 * @oem_ver: 8 bit NVM version
735 * @oem_build: 16 bit NVM build number
736 * @oem_patch: 8 NVM patch number
737 * @ver_hi: high 16 bits of the NVM version
738 * @ver_lo: low 16 bits of the NVM version
739 */
740void
741ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
742 u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
743{
744 struct ice_nvm_info *nvm = &hw->nvm;
745
746 *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
747 *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
748 *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
749 ICE_OEM_VER_BUILD_SHIFT);
750 *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
751 *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
752}
753
754/**
755 * ice_init_hw - main hardware initialization routine
756 * @hw: pointer to the hardware structure
757 */
758enum ice_status ice_init_hw(struct ice_hw *hw)
759{
760 struct ice_aqc_get_phy_caps_data *pcaps;
761 enum ice_status status;
762 u16 mac_buf_len;
763 void *mac_buf;
764
765 /* Set MAC type based on DeviceID */
766 status = ice_set_mac_type(hw);
767 if (status)
768 return status;
769
770 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
771 PF_FUNC_RID_FUNC_NUM_M) >>
772 PF_FUNC_RID_FUNC_NUM_S;
773
774 status = ice_reset(hw, ICE_RESET_PFR);
775 if (status)
776 return status;
777
778 ice_get_itr_intrl_gran(hw);
779
780 status = ice_create_all_ctrlq(hw);
781 if (status)
782 goto err_unroll_cqinit;
783
784 /* Enable FW logging. Not fatal if this fails. */
785 status = ice_cfg_fw_log(hw, true);
786 if (status)
787 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
788
789 status = ice_clear_pf_cfg(hw);
790 if (status)
791 goto err_unroll_cqinit;
792
793 ice_clear_pxe_mode(hw);
794
795 status = ice_init_nvm(hw);
796 if (status)
797 goto err_unroll_cqinit;
798
799 status = ice_get_caps(hw);
800 if (status)
801 goto err_unroll_cqinit;
802
803 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
804 sizeof(*hw->port_info), GFP_KERNEL);
805 if (!hw->port_info) {
806 status = ICE_ERR_NO_MEMORY;
807 goto err_unroll_cqinit;
808 }
809
810 /* set the back pointer to HW */
811 hw->port_info->hw = hw;
812
813 /* Initialize port_info struct with switch configuration data */
814 status = ice_get_initial_sw_cfg(hw);
815 if (status)
816 goto err_unroll_alloc;
817
818 hw->evb_veb = true;
819
820 /* Query the allocated resources for Tx scheduler */
821 status = ice_sched_query_res_alloc(hw);
822 if (status) {
823 ice_debug(hw, ICE_DBG_SCHED,
824 "Failed to get scheduler allocated resources\n");
825 goto err_unroll_alloc;
826 }
827
828 /* Initialize port_info struct with scheduler data */
829 status = ice_sched_init_port(hw->port_info);
830 if (status)
831 goto err_unroll_sched;
832
833 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
834 if (!pcaps) {
835 status = ICE_ERR_NO_MEMORY;
836 goto err_unroll_sched;
837 }
838
839 /* Initialize port_info struct with PHY capabilities */
840 status = ice_aq_get_phy_caps(hw->port_info, false,
841 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
842 devm_kfree(ice_hw_to_dev(hw), pcaps);
843 if (status)
844 goto err_unroll_sched;
845
846 /* Initialize port_info struct with link information */
847 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
848 if (status)
849 goto err_unroll_sched;
850
851 /* need a valid SW entry point to build a Tx tree */
852 if (!hw->sw_entry_point_layer) {
853 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
854 status = ICE_ERR_CFG;
855 goto err_unroll_sched;
856 }
857 INIT_LIST_HEAD(&hw->agg_list);
858
859 status = ice_init_fltr_mgmt_struct(hw);
860 if (status)
861 goto err_unroll_sched;
862
863 ice_dev_onetime_setup(hw);
864
865 /* Get MAC information */
866 /* A single port can report up to two (LAN and WoL) addresses */
867 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
868 sizeof(struct ice_aqc_manage_mac_read_resp),
869 GFP_KERNEL);
870 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
871
872 if (!mac_buf) {
873 status = ICE_ERR_NO_MEMORY;
874 goto err_unroll_fltr_mgmt_struct;
875 }
876
877 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
878 devm_kfree(ice_hw_to_dev(hw), mac_buf);
879
880 if (status)
881 goto err_unroll_fltr_mgmt_struct;
882
883 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
884 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
885 status = ice_init_hw_tbls(hw);
886 if (status)
887 goto err_unroll_fltr_mgmt_struct;
888 return 0;
889
890err_unroll_fltr_mgmt_struct:
891 ice_cleanup_fltr_mgmt_struct(hw);
892err_unroll_sched:
893 ice_sched_cleanup_all(hw);
894err_unroll_alloc:
895 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
896err_unroll_cqinit:
897 ice_destroy_all_ctrlq(hw);
898 return status;
899}
900
901/**
902 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
903 * @hw: pointer to the hardware structure
904 *
905 * This should be called only during nominal operation, not as a result of
906 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
907 * applicable initializations if it fails for any reason.
908 */
909void ice_deinit_hw(struct ice_hw *hw)
910{
911 ice_cleanup_fltr_mgmt_struct(hw);
912
913 ice_sched_cleanup_all(hw);
914 ice_sched_clear_agg(hw);
915 ice_free_seg(hw);
916 ice_free_hw_tbls(hw);
917
918 if (hw->port_info) {
919 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
920 hw->port_info = NULL;
921 }
922
923 /* Attempt to disable FW logging before shutting down control queues */
924 ice_cfg_fw_log(hw, false);
925 ice_destroy_all_ctrlq(hw);
926
927 /* Clear VSI contexts if not already cleared */
928 ice_clear_all_vsi_ctx(hw);
929}
930
931/**
932 * ice_check_reset - Check to see if a global reset is complete
933 * @hw: pointer to the hardware structure
934 */
935enum ice_status ice_check_reset(struct ice_hw *hw)
936{
937 u32 cnt, reg = 0, grst_delay;
938
939 /* Poll for Device Active state in case a recent CORER, GLOBR,
940 * or EMPR has occurred. The grst delay value is in 100ms units.
941 * Add 1sec for outstanding AQ commands that can take a long time.
942 */
943 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
944 GLGEN_RSTCTL_GRSTDEL_S) + 10;
945
946 for (cnt = 0; cnt < grst_delay; cnt++) {
947 mdelay(100);
948 reg = rd32(hw, GLGEN_RSTAT);
949 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
950 break;
951 }
952
953 if (cnt == grst_delay) {
954 ice_debug(hw, ICE_DBG_INIT,
955 "Global reset polling failed to complete.\n");
956 return ICE_ERR_RESET_FAILED;
957 }
958
959#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
960 GLNVM_ULD_GLOBR_DONE_M)
961
962 /* Device is Active; check Global Reset processes are done */
963 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
964 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
965 if (reg == ICE_RESET_DONE_MASK) {
966 ice_debug(hw, ICE_DBG_INIT,
967 "Global reset processes done. %d\n", cnt);
968 break;
969 }
970 mdelay(10);
971 }
972
973 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
974 ice_debug(hw, ICE_DBG_INIT,
975 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
976 reg);
977 return ICE_ERR_RESET_FAILED;
978 }
979
980 return 0;
981}
982
983/**
984 * ice_pf_reset - Reset the PF
985 * @hw: pointer to the hardware structure
986 *
987 * If a global reset has been triggered, this function checks
988 * for its completion and then issues the PF reset
989 */
990static enum ice_status ice_pf_reset(struct ice_hw *hw)
991{
992 u32 cnt, reg;
993
994 /* If at function entry a global reset was already in progress, i.e.
995 * state is not 'device active' or any of the reset done bits are not
996 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
997 * global reset is done.
998 */
999 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1000 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1001 /* poll on global reset currently in progress until done */
1002 if (ice_check_reset(hw))
1003 return ICE_ERR_RESET_FAILED;
1004
1005 return 0;
1006 }
1007
1008 /* Reset the PF */
1009 reg = rd32(hw, PFGEN_CTRL);
1010
1011 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1012
1013 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1014 reg = rd32(hw, PFGEN_CTRL);
1015 if (!(reg & PFGEN_CTRL_PFSWR_M))
1016 break;
1017
1018 mdelay(1);
1019 }
1020
1021 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1022 ice_debug(hw, ICE_DBG_INIT,
1023 "PF reset polling failed to complete.\n");
1024 return ICE_ERR_RESET_FAILED;
1025 }
1026
1027 return 0;
1028}
1029
1030/**
1031 * ice_reset - Perform different types of reset
1032 * @hw: pointer to the hardware structure
1033 * @req: reset request
1034 *
1035 * This function triggers a reset as specified by the req parameter.
1036 *
1037 * Note:
1038 * If anything other than a PF reset is triggered, PXE mode is restored.
1039 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1040 * interface has been restored in the rebuild flow.
1041 */
1042enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1043{
1044 u32 val = 0;
1045
1046 switch (req) {
1047 case ICE_RESET_PFR:
1048 return ice_pf_reset(hw);
1049 case ICE_RESET_CORER:
1050 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1051 val = GLGEN_RTRIG_CORER_M;
1052 break;
1053 case ICE_RESET_GLOBR:
1054 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1055 val = GLGEN_RTRIG_GLOBR_M;
1056 break;
1057 default:
1058 return ICE_ERR_PARAM;
1059 }
1060
1061 val |= rd32(hw, GLGEN_RTRIG);
1062 wr32(hw, GLGEN_RTRIG, val);
1063 ice_flush(hw);
1064
1065 /* wait for the FW to be ready */
1066 return ice_check_reset(hw);
1067}
1068
1069/**
1070 * ice_copy_rxq_ctx_to_hw
1071 * @hw: pointer to the hardware structure
1072 * @ice_rxq_ctx: pointer to the rxq context
1073 * @rxq_index: the index of the Rx queue
1074 *
1075 * Copies rxq context from dense structure to HW register space
1076 */
1077static enum ice_status
1078ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1079{
1080 u8 i;
1081
1082 if (!ice_rxq_ctx)
1083 return ICE_ERR_BAD_PTR;
1084
1085 if (rxq_index > QRX_CTRL_MAX_INDEX)
1086 return ICE_ERR_PARAM;
1087
1088 /* Copy each dword separately to HW */
1089 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1090 wr32(hw, QRX_CONTEXT(i, rxq_index),
1091 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1092
1093 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1094 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1095 }
1096
1097 return 0;
1098}
1099
1100/* LAN Rx Queue Context */
1101static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1102 /* Field Width LSB */
1103 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1104 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1105 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1106 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1107 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1108 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1109 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1110 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1111 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1112 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1113 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1114 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1115 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1116 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1117 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1118 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1119 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1120 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1121 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1122 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1123 { 0 }
1124};
1125
1126/**
1127 * ice_write_rxq_ctx
1128 * @hw: pointer to the hardware structure
1129 * @rlan_ctx: pointer to the rxq context
1130 * @rxq_index: the index of the Rx queue
1131 *
1132 * Converts rxq context from sparse to dense structure and then writes
1133 * it to HW register space and enables the hardware to prefetch descriptors
1134 * instead of only fetching them on demand
1135 */
1136enum ice_status
1137ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1138 u32 rxq_index)
1139{
1140 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1141
1142 if (!rlan_ctx)
1143 return ICE_ERR_BAD_PTR;
1144
1145 rlan_ctx->prefena = 1;
1146
1147 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1148 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1149}
1150
1151/* LAN Tx Queue Context */
1152const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1153 /* Field Width LSB */
1154 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1155 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1156 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1157 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1158 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1159 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1160 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1161 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1162 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1163 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1164 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1165 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1166 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1167 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1168 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1169 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1170 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1171 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1172 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1173 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1174 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1175 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1176 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1177 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1178 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1179 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1180 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1181 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1182 { 0 }
1183};
1184
1185/**
1186 * ice_debug_cq
1187 * @hw: pointer to the hardware structure
1188 * @mask: debug mask
1189 * @desc: pointer to control queue descriptor
1190 * @buf: pointer to command buffer
1191 * @buf_len: max length of buf
1192 *
1193 * Dumps debug log about control command with descriptor contents.
1194 */
1195void
1196ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
1197 u16 buf_len)
1198{
1199 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1200 u16 len;
1201
1202#ifndef CONFIG_DYNAMIC_DEBUG
1203 if (!(mask & hw->debug_mask))
1204 return;
1205#endif
1206
1207 if (!desc)
1208 return;
1209
1210 len = le16_to_cpu(cq_desc->datalen);
1211
1212 ice_debug(hw, mask,
1213 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1214 le16_to_cpu(cq_desc->opcode),
1215 le16_to_cpu(cq_desc->flags),
1216 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1217 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1218 le32_to_cpu(cq_desc->cookie_high),
1219 le32_to_cpu(cq_desc->cookie_low));
1220 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1221 le32_to_cpu(cq_desc->params.generic.param0),
1222 le32_to_cpu(cq_desc->params.generic.param1));
1223 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1224 le32_to_cpu(cq_desc->params.generic.addr_high),
1225 le32_to_cpu(cq_desc->params.generic.addr_low));
1226 if (buf && cq_desc->datalen != 0) {
1227 ice_debug(hw, mask, "Buffer:\n");
1228 if (buf_len < len)
1229 len = buf_len;
1230
1231 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1232 }
1233}
1234
1235/* FW Admin Queue command wrappers */
1236
1237/* Software lock/mutex that is meant to be held while the Global Config Lock
1238 * in firmware is acquired by the software to prevent most (but not all) types
1239 * of AQ commands from being sent to FW
1240 */
1241DEFINE_MUTEX(ice_global_cfg_lock_sw);
1242
1243/**
1244 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1245 * @hw: pointer to the HW struct
1246 * @desc: descriptor describing the command
1247 * @buf: buffer to use for indirect commands (NULL for direct commands)
1248 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1249 * @cd: pointer to command details structure
1250 *
1251 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1252 */
1253enum ice_status
1254ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1255 u16 buf_size, struct ice_sq_cd *cd)
1256{
1257 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1258 bool lock_acquired = false;
1259 enum ice_status status;
1260
1261 /* When a package download is in process (i.e. when the firmware's
1262 * Global Configuration Lock resource is held), only the Download
1263 * Package, Get Version, Get Package Info List and Release Resource
1264 * (with resource ID set to Global Config Lock) AdminQ commands are
1265 * allowed; all others must block until the package download completes
1266 * and the Global Config Lock is released. See also
1267 * ice_acquire_global_cfg_lock().
1268 */
1269 switch (le16_to_cpu(desc->opcode)) {
1270 case ice_aqc_opc_download_pkg:
1271 case ice_aqc_opc_get_pkg_info_list:
1272 case ice_aqc_opc_get_ver:
1273 break;
1274 case ice_aqc_opc_release_res:
1275 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1276 break;
1277 /* fall-through */
1278 default:
1279 mutex_lock(&ice_global_cfg_lock_sw);
1280 lock_acquired = true;
1281 break;
1282 }
1283
1284 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1285 if (lock_acquired)
1286 mutex_unlock(&ice_global_cfg_lock_sw);
1287
1288 return status;
1289}
1290
1291/**
1292 * ice_aq_get_fw_ver
1293 * @hw: pointer to the HW struct
1294 * @cd: pointer to command details structure or NULL
1295 *
1296 * Get the firmware version (0x0001) from the admin queue commands
1297 */
1298enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1299{
1300 struct ice_aqc_get_ver *resp;
1301 struct ice_aq_desc desc;
1302 enum ice_status status;
1303
1304 resp = &desc.params.get_ver;
1305
1306 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1307
1308 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1309
1310 if (!status) {
1311 hw->fw_branch = resp->fw_branch;
1312 hw->fw_maj_ver = resp->fw_major;
1313 hw->fw_min_ver = resp->fw_minor;
1314 hw->fw_patch = resp->fw_patch;
1315 hw->fw_build = le32_to_cpu(resp->fw_build);
1316 hw->api_branch = resp->api_branch;
1317 hw->api_maj_ver = resp->api_major;
1318 hw->api_min_ver = resp->api_minor;
1319 hw->api_patch = resp->api_patch;
1320 }
1321
1322 return status;
1323}
1324
1325/**
1326 * ice_aq_send_driver_ver
1327 * @hw: pointer to the HW struct
1328 * @dv: driver's major, minor version
1329 * @cd: pointer to command details structure or NULL
1330 *
1331 * Send the driver version (0x0002) to the firmware
1332 */
1333enum ice_status
1334ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1335 struct ice_sq_cd *cd)
1336{
1337 struct ice_aqc_driver_ver *cmd;
1338 struct ice_aq_desc desc;
1339 u16 len;
1340
1341 cmd = &desc.params.driver_ver;
1342
1343 if (!dv)
1344 return ICE_ERR_PARAM;
1345
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1347
1348 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1349 cmd->major_ver = dv->major_ver;
1350 cmd->minor_ver = dv->minor_ver;
1351 cmd->build_ver = dv->build_ver;
1352 cmd->subbuild_ver = dv->subbuild_ver;
1353
1354 len = 0;
1355 while (len < sizeof(dv->driver_string) &&
1356 isascii(dv->driver_string[len]) && dv->driver_string[len])
1357 len++;
1358
1359 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1360}
1361
1362/**
1363 * ice_aq_q_shutdown
1364 * @hw: pointer to the HW struct
1365 * @unloading: is the driver unloading itself
1366 *
1367 * Tell the Firmware that we're shutting down the AdminQ and whether
1368 * or not the driver is unloading as well (0x0003).
1369 */
1370enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1371{
1372 struct ice_aqc_q_shutdown *cmd;
1373 struct ice_aq_desc desc;
1374
1375 cmd = &desc.params.q_shutdown;
1376
1377 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1378
1379 if (unloading)
1380 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1381
1382 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1383}
1384
1385/**
1386 * ice_aq_req_res
1387 * @hw: pointer to the HW struct
1388 * @res: resource ID
1389 * @access: access type
1390 * @sdp_number: resource number
1391 * @timeout: the maximum time in ms that the driver may hold the resource
1392 * @cd: pointer to command details structure or NULL
1393 *
1394 * Requests common resource using the admin queue commands (0x0008).
1395 * When attempting to acquire the Global Config Lock, the driver can
1396 * learn of three states:
1397 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1398 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1399 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1400 * successfully downloaded the package; the driver does
1401 * not have to download the package and can continue
1402 * loading
1403 *
1404 * Note that if the caller is in an acquire lock, perform action, release lock
1405 * phase of operation, it is possible that the FW may detect a timeout and issue
1406 * a CORER. In this case, the driver will receive a CORER interrupt and will
1407 * have to determine its cause. The calling thread that is handling this flow
1408 * will likely get an error propagated back to it indicating the Download
1409 * Package, Update Package or the Release Resource AQ commands timed out.
1410 */
1411static enum ice_status
1412ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1413 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1414 struct ice_sq_cd *cd)
1415{
1416 struct ice_aqc_req_res *cmd_resp;
1417 struct ice_aq_desc desc;
1418 enum ice_status status;
1419
1420 cmd_resp = &desc.params.res_owner;
1421
1422 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1423
1424 cmd_resp->res_id = cpu_to_le16(res);
1425 cmd_resp->access_type = cpu_to_le16(access);
1426 cmd_resp->res_number = cpu_to_le32(sdp_number);
1427 cmd_resp->timeout = cpu_to_le32(*timeout);
1428 *timeout = 0;
1429
1430 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1431
1432 /* The completion specifies the maximum time in ms that the driver
1433 * may hold the resource in the Timeout field.
1434 */
1435
1436 /* Global config lock response utilizes an additional status field.
1437 *
1438 * If the Global config lock resource is held by some other driver, the
1439 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1440 * and the timeout field indicates the maximum time the current owner
1441 * of the resource has to free it.
1442 */
1443 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1444 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1445 *timeout = le32_to_cpu(cmd_resp->timeout);
1446 return 0;
1447 } else if (le16_to_cpu(cmd_resp->status) ==
1448 ICE_AQ_RES_GLBL_IN_PROG) {
1449 *timeout = le32_to_cpu(cmd_resp->timeout);
1450 return ICE_ERR_AQ_ERROR;
1451 } else if (le16_to_cpu(cmd_resp->status) ==
1452 ICE_AQ_RES_GLBL_DONE) {
1453 return ICE_ERR_AQ_NO_WORK;
1454 }
1455
1456 /* invalid FW response, force a timeout immediately */
1457 *timeout = 0;
1458 return ICE_ERR_AQ_ERROR;
1459 }
1460
1461 /* If the resource is held by some other driver, the command completes
1462 * with a busy return value and the timeout field indicates the maximum
1463 * time the current owner of the resource has to free it.
1464 */
1465 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1466 *timeout = le32_to_cpu(cmd_resp->timeout);
1467
1468 return status;
1469}
1470
1471/**
1472 * ice_aq_release_res
1473 * @hw: pointer to the HW struct
1474 * @res: resource ID
1475 * @sdp_number: resource number
1476 * @cd: pointer to command details structure or NULL
1477 *
1478 * release common resource using the admin queue commands (0x0009)
1479 */
1480static enum ice_status
1481ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1482 struct ice_sq_cd *cd)
1483{
1484 struct ice_aqc_req_res *cmd;
1485 struct ice_aq_desc desc;
1486
1487 cmd = &desc.params.res_owner;
1488
1489 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1490
1491 cmd->res_id = cpu_to_le16(res);
1492 cmd->res_number = cpu_to_le32(sdp_number);
1493
1494 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1495}
1496
1497/**
1498 * ice_acquire_res
1499 * @hw: pointer to the HW structure
1500 * @res: resource ID
1501 * @access: access type (read or write)
1502 * @timeout: timeout in milliseconds
1503 *
1504 * This function will attempt to acquire the ownership of a resource.
1505 */
1506enum ice_status
1507ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1508 enum ice_aq_res_access_type access, u32 timeout)
1509{
1510#define ICE_RES_POLLING_DELAY_MS 10
1511 u32 delay = ICE_RES_POLLING_DELAY_MS;
1512 u32 time_left = timeout;
1513 enum ice_status status;
1514
1515 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1516
1517 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1518 * previously acquired the resource and performed any necessary updates;
1519 * in this case the caller does not obtain the resource and has no
1520 * further work to do.
1521 */
1522 if (status == ICE_ERR_AQ_NO_WORK)
1523 goto ice_acquire_res_exit;
1524
1525 if (status)
1526 ice_debug(hw, ICE_DBG_RES,
1527 "resource %d acquire type %d failed.\n", res, access);
1528
1529 /* If necessary, poll until the current lock owner timeouts */
1530 timeout = time_left;
1531 while (status && timeout && time_left) {
1532 mdelay(delay);
1533 timeout = (timeout > delay) ? timeout - delay : 0;
1534 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1535
1536 if (status == ICE_ERR_AQ_NO_WORK)
1537 /* lock free, but no work to do */
1538 break;
1539
1540 if (!status)
1541 /* lock acquired */
1542 break;
1543 }
1544 if (status && status != ICE_ERR_AQ_NO_WORK)
1545 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1546
1547ice_acquire_res_exit:
1548 if (status == ICE_ERR_AQ_NO_WORK) {
1549 if (access == ICE_RES_WRITE)
1550 ice_debug(hw, ICE_DBG_RES,
1551 "resource indicates no work to do.\n");
1552 else
1553 ice_debug(hw, ICE_DBG_RES,
1554 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1555 }
1556 return status;
1557}
1558
1559/**
1560 * ice_release_res
1561 * @hw: pointer to the HW structure
1562 * @res: resource ID
1563 *
1564 * This function will release a resource using the proper Admin Command.
1565 */
1566void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1567{
1568 enum ice_status status;
1569 u32 total_delay = 0;
1570
1571 status = ice_aq_release_res(hw, res, 0, NULL);
1572
1573 /* there are some rare cases when trying to release the resource
1574 * results in an admin queue timeout, so handle them correctly
1575 */
1576 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1577 (total_delay < hw->adminq.sq_cmd_timeout)) {
1578 mdelay(1);
1579 status = ice_aq_release_res(hw, res, 0, NULL);
1580 total_delay++;
1581 }
1582}
1583
1584/**
1585 * ice_get_num_per_func - determine number of resources per PF
1586 * @hw: pointer to the HW structure
1587 * @max: value to be evenly split between each PF
1588 *
1589 * Determine the number of valid functions by going through the bitmap returned
1590 * from parsing capabilities and use this to calculate the number of resources
1591 * per PF based on the max value passed in.
1592 */
1593static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1594{
1595 u8 funcs;
1596
1597#define ICE_CAPS_VALID_FUNCS_M 0xFF
1598 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1599 ICE_CAPS_VALID_FUNCS_M);
1600
1601 if (!funcs)
1602 return 0;
1603
1604 return max / funcs;
1605}
1606
1607/**
1608 * ice_parse_caps - parse function/device capabilities
1609 * @hw: pointer to the HW struct
1610 * @buf: pointer to a buffer containing function/device capability records
1611 * @cap_count: number of capability records in the list
1612 * @opc: type of capabilities list to parse
1613 *
1614 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1615 */
1616static void
1617ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1618 enum ice_adminq_opc opc)
1619{
1620 struct ice_aqc_list_caps_elem *cap_resp;
1621 struct ice_hw_func_caps *func_p = NULL;
1622 struct ice_hw_dev_caps *dev_p = NULL;
1623 struct ice_hw_common_caps *caps;
1624 char const *prefix;
1625 u32 i;
1626
1627 if (!buf)
1628 return;
1629
1630 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1631
1632 if (opc == ice_aqc_opc_list_dev_caps) {
1633 dev_p = &hw->dev_caps;
1634 caps = &dev_p->common_cap;
1635 prefix = "dev cap";
1636 } else if (opc == ice_aqc_opc_list_func_caps) {
1637 func_p = &hw->func_caps;
1638 caps = &func_p->common_cap;
1639 prefix = "func cap";
1640 } else {
1641 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1642 return;
1643 }
1644
1645 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1646 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1647 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1648 u32 number = le32_to_cpu(cap_resp->number);
1649 u16 cap = le16_to_cpu(cap_resp->cap);
1650
1651 switch (cap) {
1652 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1653 caps->valid_functions = number;
1654 ice_debug(hw, ICE_DBG_INIT,
1655 "%s: valid_functions (bitmap) = %d\n", prefix,
1656 caps->valid_functions);
1657 break;
1658 case ICE_AQC_CAPS_SRIOV:
1659 caps->sr_iov_1_1 = (number == 1);
1660 ice_debug(hw, ICE_DBG_INIT,
1661 "%s: sr_iov_1_1 = %d\n", prefix,
1662 caps->sr_iov_1_1);
1663 break;
1664 case ICE_AQC_CAPS_VF:
1665 if (dev_p) {
1666 dev_p->num_vfs_exposed = number;
1667 ice_debug(hw, ICE_DBG_INIT,
1668 "%s: num_vfs_exposed = %d\n", prefix,
1669 dev_p->num_vfs_exposed);
1670 } else if (func_p) {
1671 func_p->num_allocd_vfs = number;
1672 func_p->vf_base_id = logical_id;
1673 ice_debug(hw, ICE_DBG_INIT,
1674 "%s: num_allocd_vfs = %d\n", prefix,
1675 func_p->num_allocd_vfs);
1676 ice_debug(hw, ICE_DBG_INIT,
1677 "%s: vf_base_id = %d\n", prefix,
1678 func_p->vf_base_id);
1679 }
1680 break;
1681 case ICE_AQC_CAPS_VSI:
1682 if (dev_p) {
1683 dev_p->num_vsi_allocd_to_host = number;
1684 ice_debug(hw, ICE_DBG_INIT,
1685 "%s: num_vsi_allocd_to_host = %d\n",
1686 prefix,
1687 dev_p->num_vsi_allocd_to_host);
1688 } else if (func_p) {
1689 func_p->guar_num_vsi =
1690 ice_get_num_per_func(hw, ICE_MAX_VSI);
1691 ice_debug(hw, ICE_DBG_INIT,
1692 "%s: guar_num_vsi (fw) = %d\n",
1693 prefix, number);
1694 ice_debug(hw, ICE_DBG_INIT,
1695 "%s: guar_num_vsi = %d\n",
1696 prefix, func_p->guar_num_vsi);
1697 }
1698 break;
1699 case ICE_AQC_CAPS_DCB:
1700 caps->dcb = (number == 1);
1701 caps->active_tc_bitmap = logical_id;
1702 caps->maxtc = phys_id;
1703 ice_debug(hw, ICE_DBG_INIT,
1704 "%s: dcb = %d\n", prefix, caps->dcb);
1705 ice_debug(hw, ICE_DBG_INIT,
1706 "%s: active_tc_bitmap = %d\n", prefix,
1707 caps->active_tc_bitmap);
1708 ice_debug(hw, ICE_DBG_INIT,
1709 "%s: maxtc = %d\n", prefix, caps->maxtc);
1710 break;
1711 case ICE_AQC_CAPS_RSS:
1712 caps->rss_table_size = number;
1713 caps->rss_table_entry_width = logical_id;
1714 ice_debug(hw, ICE_DBG_INIT,
1715 "%s: rss_table_size = %d\n", prefix,
1716 caps->rss_table_size);
1717 ice_debug(hw, ICE_DBG_INIT,
1718 "%s: rss_table_entry_width = %d\n", prefix,
1719 caps->rss_table_entry_width);
1720 break;
1721 case ICE_AQC_CAPS_RXQS:
1722 caps->num_rxq = number;
1723 caps->rxq_first_id = phys_id;
1724 ice_debug(hw, ICE_DBG_INIT,
1725 "%s: num_rxq = %d\n", prefix,
1726 caps->num_rxq);
1727 ice_debug(hw, ICE_DBG_INIT,
1728 "%s: rxq_first_id = %d\n", prefix,
1729 caps->rxq_first_id);
1730 break;
1731 case ICE_AQC_CAPS_TXQS:
1732 caps->num_txq = number;
1733 caps->txq_first_id = phys_id;
1734 ice_debug(hw, ICE_DBG_INIT,
1735 "%s: num_txq = %d\n", prefix,
1736 caps->num_txq);
1737 ice_debug(hw, ICE_DBG_INIT,
1738 "%s: txq_first_id = %d\n", prefix,
1739 caps->txq_first_id);
1740 break;
1741 case ICE_AQC_CAPS_MSIX:
1742 caps->num_msix_vectors = number;
1743 caps->msix_vector_first_id = phys_id;
1744 ice_debug(hw, ICE_DBG_INIT,
1745 "%s: num_msix_vectors = %d\n", prefix,
1746 caps->num_msix_vectors);
1747 ice_debug(hw, ICE_DBG_INIT,
1748 "%s: msix_vector_first_id = %d\n", prefix,
1749 caps->msix_vector_first_id);
1750 break;
1751 case ICE_AQC_CAPS_MAX_MTU:
1752 caps->max_mtu = number;
1753 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1754 prefix, caps->max_mtu);
1755 break;
1756 default:
1757 ice_debug(hw, ICE_DBG_INIT,
1758 "%s: unknown capability[%d]: 0x%x\n", prefix,
1759 i, cap);
1760 break;
1761 }
1762 }
1763}
1764
1765/**
1766 * ice_aq_discover_caps - query function/device capabilities
1767 * @hw: pointer to the HW struct
1768 * @buf: a virtual buffer to hold the capabilities
1769 * @buf_size: Size of the virtual buffer
1770 * @cap_count: cap count needed if AQ err==ENOMEM
1771 * @opc: capabilities type to discover - pass in the command opcode
1772 * @cd: pointer to command details structure or NULL
1773 *
1774 * Get the function(0x000a)/device(0x000b) capabilities description from
1775 * the firmware.
1776 */
1777static enum ice_status
1778ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1779 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1780{
1781 struct ice_aqc_list_caps *cmd;
1782 struct ice_aq_desc desc;
1783 enum ice_status status;
1784
1785 cmd = &desc.params.get_cap;
1786
1787 if (opc != ice_aqc_opc_list_func_caps &&
1788 opc != ice_aqc_opc_list_dev_caps)
1789 return ICE_ERR_PARAM;
1790
1791 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1792
1793 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1794 if (!status)
1795 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1796 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1797 *cap_count = le32_to_cpu(cmd->count);
1798 return status;
1799}
1800
1801/**
1802 * ice_discover_caps - get info about the HW
1803 * @hw: pointer to the hardware structure
1804 * @opc: capabilities type to discover - pass in the command opcode
1805 */
1806static enum ice_status
1807ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1808{
1809 enum ice_status status;
1810 u32 cap_count;
1811 u16 cbuf_len;
1812 u8 retries;
1813
1814 /* The driver doesn't know how many capabilities the device will return
1815 * so the buffer size required isn't known ahead of time. The driver
1816 * starts with cbuf_len and if this turns out to be insufficient, the
1817 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1818 * The driver then allocates the buffer based on the count and retries
1819 * the operation. So it follows that the retry count is 2.
1820 */
1821#define ICE_GET_CAP_BUF_COUNT 40
1822#define ICE_GET_CAP_RETRY_COUNT 2
1823
1824 cap_count = ICE_GET_CAP_BUF_COUNT;
1825 retries = ICE_GET_CAP_RETRY_COUNT;
1826
1827 do {
1828 void *cbuf;
1829
1830 cbuf_len = (u16)(cap_count *
1831 sizeof(struct ice_aqc_list_caps_elem));
1832 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1833 if (!cbuf)
1834 return ICE_ERR_NO_MEMORY;
1835
1836 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1837 opc, NULL);
1838 devm_kfree(ice_hw_to_dev(hw), cbuf);
1839
1840 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1841 break;
1842
1843 /* If ENOMEM is returned, try again with bigger buffer */
1844 } while (--retries);
1845
1846 return status;
1847}
1848
1849/**
1850 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
1851 * @hw: pointer to the hardware structure
1852 */
1853void ice_set_safe_mode_caps(struct ice_hw *hw)
1854{
1855 struct ice_hw_func_caps *func_caps = &hw->func_caps;
1856 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1857 u32 valid_func, rxq_first_id, txq_first_id;
1858 u32 msix_vector_first_id, max_mtu;
1859 u32 num_func = 0;
1860 u8 i;
1861
1862 /* cache some func_caps values that should be restored after memset */
1863 valid_func = func_caps->common_cap.valid_functions;
1864 txq_first_id = func_caps->common_cap.txq_first_id;
1865 rxq_first_id = func_caps->common_cap.rxq_first_id;
1866 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1867 max_mtu = func_caps->common_cap.max_mtu;
1868
1869 /* unset func capabilities */
1870 memset(func_caps, 0, sizeof(*func_caps));
1871
1872 /* restore cached values */
1873 func_caps->common_cap.valid_functions = valid_func;
1874 func_caps->common_cap.txq_first_id = txq_first_id;
1875 func_caps->common_cap.rxq_first_id = rxq_first_id;
1876 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1877 func_caps->common_cap.max_mtu = max_mtu;
1878
1879 /* one Tx and one Rx queue in safe mode */
1880 func_caps->common_cap.num_rxq = 1;
1881 func_caps->common_cap.num_txq = 1;
1882
1883 /* two MSIX vectors, one for traffic and one for misc causes */
1884 func_caps->common_cap.num_msix_vectors = 2;
1885 func_caps->guar_num_vsi = 1;
1886
1887 /* cache some dev_caps values that should be restored after memset */
1888 valid_func = dev_caps->common_cap.valid_functions;
1889 txq_first_id = dev_caps->common_cap.txq_first_id;
1890 rxq_first_id = dev_caps->common_cap.rxq_first_id;
1891 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1892 max_mtu = dev_caps->common_cap.max_mtu;
1893
1894 /* unset dev capabilities */
1895 memset(dev_caps, 0, sizeof(*dev_caps));
1896
1897 /* restore cached values */
1898 dev_caps->common_cap.valid_functions = valid_func;
1899 dev_caps->common_cap.txq_first_id = txq_first_id;
1900 dev_caps->common_cap.rxq_first_id = rxq_first_id;
1901 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1902 dev_caps->common_cap.max_mtu = max_mtu;
1903
1904 /* valid_func is a bitmap. get number of functions */
1905#define ICE_MAX_FUNCS 8
1906 for (i = 0; i < ICE_MAX_FUNCS; i++)
1907 if (valid_func & BIT(i))
1908 num_func++;
1909
1910 /* one Tx and one Rx queue per function in safe mode */
1911 dev_caps->common_cap.num_rxq = num_func;
1912 dev_caps->common_cap.num_txq = num_func;
1913
1914 /* two MSIX vectors per function */
1915 dev_caps->common_cap.num_msix_vectors = 2 * num_func;
1916}
1917
1918/**
1919 * ice_get_caps - get info about the HW
1920 * @hw: pointer to the hardware structure
1921 */
1922enum ice_status ice_get_caps(struct ice_hw *hw)
1923{
1924 enum ice_status status;
1925
1926 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1927 if (!status)
1928 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1929
1930 return status;
1931}
1932
1933/**
1934 * ice_aq_manage_mac_write - manage MAC address write command
1935 * @hw: pointer to the HW struct
1936 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1937 * @flags: flags to control write behavior
1938 * @cd: pointer to command details structure or NULL
1939 *
1940 * This function is used to write MAC address to the NVM (0x0108).
1941 */
1942enum ice_status
1943ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1944 struct ice_sq_cd *cd)
1945{
1946 struct ice_aqc_manage_mac_write *cmd;
1947 struct ice_aq_desc desc;
1948
1949 cmd = &desc.params.mac_write;
1950 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1951
1952 cmd->flags = flags;
1953
1954 /* Prep values for flags, sah, sal */
1955 cmd->sah = htons(*((const u16 *)mac_addr));
1956 cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1957
1958 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1959}
1960
1961/**
1962 * ice_aq_clear_pxe_mode
1963 * @hw: pointer to the HW struct
1964 *
1965 * Tell the firmware that the driver is taking over from PXE (0x0110).
1966 */
1967static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1968{
1969 struct ice_aq_desc desc;
1970
1971 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1972 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1973
1974 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1975}
1976
1977/**
1978 * ice_clear_pxe_mode - clear pxe operations mode
1979 * @hw: pointer to the HW struct
1980 *
1981 * Make sure all PXE mode settings are cleared, including things
1982 * like descriptor fetch/write-back mode.
1983 */
1984void ice_clear_pxe_mode(struct ice_hw *hw)
1985{
1986 if (ice_check_sq_alive(hw, &hw->adminq))
1987 ice_aq_clear_pxe_mode(hw);
1988}
1989
1990/**
1991 * ice_get_link_speed_based_on_phy_type - returns link speed
1992 * @phy_type_low: lower part of phy_type
1993 * @phy_type_high: higher part of phy_type
1994 *
1995 * This helper function will convert an entry in PHY type structure
1996 * [phy_type_low, phy_type_high] to its corresponding link speed.
1997 * Note: In the structure of [phy_type_low, phy_type_high], there should
1998 * be one bit set, as this function will convert one PHY type to its
1999 * speed.
2000 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2001 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2002 */
2003static u16
2004ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2005{
2006 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2007 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2008
2009 switch (phy_type_low) {
2010 case ICE_PHY_TYPE_LOW_100BASE_TX:
2011 case ICE_PHY_TYPE_LOW_100M_SGMII:
2012 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2013 break;
2014 case ICE_PHY_TYPE_LOW_1000BASE_T:
2015 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2016 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2017 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2018 case ICE_PHY_TYPE_LOW_1G_SGMII:
2019 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2020 break;
2021 case ICE_PHY_TYPE_LOW_2500BASE_T:
2022 case ICE_PHY_TYPE_LOW_2500BASE_X:
2023 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2024 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2025 break;
2026 case ICE_PHY_TYPE_LOW_5GBASE_T:
2027 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2028 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2029 break;
2030 case ICE_PHY_TYPE_LOW_10GBASE_T:
2031 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2032 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2033 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2034 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2035 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2036 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2037 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2038 break;
2039 case ICE_PHY_TYPE_LOW_25GBASE_T:
2040 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2041 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2042 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2043 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2044 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2045 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2046 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2047 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2048 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2049 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2050 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2051 break;
2052 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2053 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2054 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2055 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2056 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2057 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2058 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2059 break;
2060 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2061 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2062 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2063 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2064 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2065 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2066 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2067 case ICE_PHY_TYPE_LOW_50G_AUI2:
2068 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2069 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2070 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2071 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2072 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2073 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2074 case ICE_PHY_TYPE_LOW_50G_AUI1:
2075 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2076 break;
2077 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2078 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2079 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2080 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2081 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2082 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2083 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2084 case ICE_PHY_TYPE_LOW_100G_AUI4:
2085 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2086 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2087 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2088 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2089 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2090 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2091 break;
2092 default:
2093 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2094 break;
2095 }
2096
2097 switch (phy_type_high) {
2098 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2099 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2100 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2101 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2102 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2103 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2104 break;
2105 default:
2106 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2107 break;
2108 }
2109
2110 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2111 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2112 return ICE_AQ_LINK_SPEED_UNKNOWN;
2113 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2114 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2115 return ICE_AQ_LINK_SPEED_UNKNOWN;
2116 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2117 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2118 return speed_phy_type_low;
2119 else
2120 return speed_phy_type_high;
2121}
2122
2123/**
2124 * ice_update_phy_type
2125 * @phy_type_low: pointer to the lower part of phy_type
2126 * @phy_type_high: pointer to the higher part of phy_type
2127 * @link_speeds_bitmap: targeted link speeds bitmap
2128 *
2129 * Note: For the link_speeds_bitmap structure, you can check it at
2130 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2131 * link_speeds_bitmap include multiple speeds.
2132 *
2133 * Each entry in this [phy_type_low, phy_type_high] structure will
2134 * present a certain link speed. This helper function will turn on bits
2135 * in [phy_type_low, phy_type_high] structure based on the value of
2136 * link_speeds_bitmap input parameter.
2137 */
2138void
2139ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2140 u16 link_speeds_bitmap)
2141{
2142 u64 pt_high;
2143 u64 pt_low;
2144 int index;
2145 u16 speed;
2146
2147 /* We first check with low part of phy_type */
2148 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2149 pt_low = BIT_ULL(index);
2150 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2151
2152 if (link_speeds_bitmap & speed)
2153 *phy_type_low |= BIT_ULL(index);
2154 }
2155
2156 /* We then check with high part of phy_type */
2157 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2158 pt_high = BIT_ULL(index);
2159 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2160
2161 if (link_speeds_bitmap & speed)
2162 *phy_type_high |= BIT_ULL(index);
2163 }
2164}
2165
2166/**
2167 * ice_aq_set_phy_cfg
2168 * @hw: pointer to the HW struct
2169 * @lport: logical port number
2170 * @cfg: structure with PHY configuration data to be set
2171 * @cd: pointer to command details structure or NULL
2172 *
2173 * Set the various PHY configuration parameters supported on the Port.
2174 * One or more of the Set PHY config parameters may be ignored in an MFP
2175 * mode as the PF may not have the privilege to set some of the PHY Config
2176 * parameters. This status will be indicated by the command response (0x0601).
2177 */
2178enum ice_status
2179ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2180 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2181{
2182 struct ice_aq_desc desc;
2183
2184 if (!cfg)
2185 return ICE_ERR_PARAM;
2186
2187 /* Ensure that only valid bits of cfg->caps can be turned on. */
2188 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2189 ice_debug(hw, ICE_DBG_PHY,
2190 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2191 cfg->caps);
2192
2193 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2194 }
2195
2196 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2197 desc.params.set_phy.lport_num = lport;
2198 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2199
2200 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2201 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2202 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2203 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2204 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2205 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2206 cfg->low_power_ctrl);
2207 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2208 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2209 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2210
2211 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2212}
2213
2214/**
2215 * ice_update_link_info - update status of the HW network link
2216 * @pi: port info structure of the interested logical port
2217 */
2218enum ice_status ice_update_link_info(struct ice_port_info *pi)
2219{
2220 struct ice_link_status *li;
2221 enum ice_status status;
2222
2223 if (!pi)
2224 return ICE_ERR_PARAM;
2225
2226 li = &pi->phy.link_info;
2227
2228 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2229 if (status)
2230 return status;
2231
2232 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2233 struct ice_aqc_get_phy_caps_data *pcaps;
2234 struct ice_hw *hw;
2235
2236 hw = pi->hw;
2237 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2238 GFP_KERNEL);
2239 if (!pcaps)
2240 return ICE_ERR_NO_MEMORY;
2241
2242 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2243 pcaps, NULL);
2244 if (!status)
2245 memcpy(li->module_type, &pcaps->module_type,
2246 sizeof(li->module_type));
2247
2248 devm_kfree(ice_hw_to_dev(hw), pcaps);
2249 }
2250
2251 return status;
2252}
2253
2254/**
2255 * ice_set_fc
2256 * @pi: port information structure
2257 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2258 * @ena_auto_link_update: enable automatic link update
2259 *
2260 * Set the requested flow control mode.
2261 */
2262enum ice_status
2263ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2264{
2265 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2266 struct ice_aqc_get_phy_caps_data *pcaps;
2267 enum ice_status status;
2268 u8 pause_mask = 0x0;
2269 struct ice_hw *hw;
2270
2271 if (!pi)
2272 return ICE_ERR_PARAM;
2273 hw = pi->hw;
2274 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2275
2276 switch (pi->fc.req_mode) {
2277 case ICE_FC_FULL:
2278 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2279 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2280 break;
2281 case ICE_FC_RX_PAUSE:
2282 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2283 break;
2284 case ICE_FC_TX_PAUSE:
2285 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2286 break;
2287 default:
2288 break;
2289 }
2290
2291 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2292 if (!pcaps)
2293 return ICE_ERR_NO_MEMORY;
2294
2295 /* Get the current PHY config */
2296 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2297 NULL);
2298 if (status) {
2299 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2300 goto out;
2301 }
2302
2303 /* clear the old pause settings */
2304 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2305 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2306
2307 /* set the new capabilities */
2308 cfg.caps |= pause_mask;
2309
2310 /* If the capabilities have changed, then set the new config */
2311 if (cfg.caps != pcaps->caps) {
2312 int retry_count, retry_max = 10;
2313
2314 /* Auto restart link so settings take effect */
2315 if (ena_auto_link_update)
2316 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2317 /* Copy over all the old settings */
2318 cfg.phy_type_high = pcaps->phy_type_high;
2319 cfg.phy_type_low = pcaps->phy_type_low;
2320 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2321 cfg.eee_cap = pcaps->eee_cap;
2322 cfg.eeer_value = pcaps->eeer_value;
2323 cfg.link_fec_opt = pcaps->link_fec_options;
2324
2325 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2326 if (status) {
2327 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2328 goto out;
2329 }
2330
2331 /* Update the link info
2332 * It sometimes takes a really long time for link to
2333 * come back from the atomic reset. Thus, we wait a
2334 * little bit.
2335 */
2336 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2337 status = ice_update_link_info(pi);
2338
2339 if (!status)
2340 break;
2341
2342 mdelay(100);
2343 }
2344
2345 if (status)
2346 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2347 }
2348
2349out:
2350 devm_kfree(ice_hw_to_dev(hw), pcaps);
2351 return status;
2352}
2353
2354/**
2355 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2356 * @caps: PHY ability structure to copy date from
2357 * @cfg: PHY configuration structure to copy data to
2358 *
2359 * Helper function to copy AQC PHY get ability data to PHY set configuration
2360 * data structure
2361 */
2362void
2363ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2364 struct ice_aqc_set_phy_cfg_data *cfg)
2365{
2366 if (!caps || !cfg)
2367 return;
2368
2369 cfg->phy_type_low = caps->phy_type_low;
2370 cfg->phy_type_high = caps->phy_type_high;
2371 cfg->caps = caps->caps;
2372 cfg->low_power_ctrl = caps->low_power_ctrl;
2373 cfg->eee_cap = caps->eee_cap;
2374 cfg->eeer_value = caps->eeer_value;
2375 cfg->link_fec_opt = caps->link_fec_options;
2376}
2377
2378/**
2379 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2380 * @cfg: PHY configuration data to set FEC mode
2381 * @fec: FEC mode to configure
2382 *
2383 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2384 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2385 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2386 */
2387void
2388ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2389{
2390 switch (fec) {
2391 case ICE_FEC_BASER:
2392 /* Clear RS bits, and AND BASE-R ability
2393 * bits and OR request bits.
2394 */
2395 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2396 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2397 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2398 ICE_AQC_PHY_FEC_25G_KR_REQ;
2399 break;
2400 case ICE_FEC_RS:
2401 /* Clear BASE-R bits, and AND RS ability
2402 * bits and OR request bits.
2403 */
2404 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2405 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2406 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2407 break;
2408 case ICE_FEC_NONE:
2409 /* Clear all FEC option bits. */
2410 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2411 break;
2412 case ICE_FEC_AUTO:
2413 /* AND auto FEC bit, and all caps bits. */
2414 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2415 break;
2416 }
2417}
2418
2419/**
2420 * ice_get_link_status - get status of the HW network link
2421 * @pi: port information structure
2422 * @link_up: pointer to bool (true/false = linkup/linkdown)
2423 *
2424 * Variable link_up is true if link is up, false if link is down.
2425 * The variable link_up is invalid if status is non zero. As a
2426 * result of this call, link status reporting becomes enabled
2427 */
2428enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2429{
2430 struct ice_phy_info *phy_info;
2431 enum ice_status status = 0;
2432
2433 if (!pi || !link_up)
2434 return ICE_ERR_PARAM;
2435
2436 phy_info = &pi->phy;
2437
2438 if (phy_info->get_link_info) {
2439 status = ice_update_link_info(pi);
2440
2441 if (status)
2442 ice_debug(pi->hw, ICE_DBG_LINK,
2443 "get link status error, status = %d\n",
2444 status);
2445 }
2446
2447 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2448
2449 return status;
2450}
2451
2452/**
2453 * ice_aq_set_link_restart_an
2454 * @pi: pointer to the port information structure
2455 * @ena_link: if true: enable link, if false: disable link
2456 * @cd: pointer to command details structure or NULL
2457 *
2458 * Sets up the link and restarts the Auto-Negotiation over the link.
2459 */
2460enum ice_status
2461ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2462 struct ice_sq_cd *cd)
2463{
2464 struct ice_aqc_restart_an *cmd;
2465 struct ice_aq_desc desc;
2466
2467 cmd = &desc.params.restart_an;
2468
2469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2470
2471 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2472 cmd->lport_num = pi->lport;
2473 if (ena_link)
2474 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2475 else
2476 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2477
2478 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2479}
2480
2481/**
2482 * ice_aq_set_event_mask
2483 * @hw: pointer to the HW struct
2484 * @port_num: port number of the physical function
2485 * @mask: event mask to be set
2486 * @cd: pointer to command details structure or NULL
2487 *
2488 * Set event mask (0x0613)
2489 */
2490enum ice_status
2491ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2492 struct ice_sq_cd *cd)
2493{
2494 struct ice_aqc_set_event_mask *cmd;
2495 struct ice_aq_desc desc;
2496
2497 cmd = &desc.params.set_event_mask;
2498
2499 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2500
2501 cmd->lport_num = port_num;
2502
2503 cmd->event_mask = cpu_to_le16(mask);
2504 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2505}
2506
2507/**
2508 * ice_aq_set_mac_loopback
2509 * @hw: pointer to the HW struct
2510 * @ena_lpbk: Enable or Disable loopback
2511 * @cd: pointer to command details structure or NULL
2512 *
2513 * Enable/disable loopback on a given port
2514 */
2515enum ice_status
2516ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2517{
2518 struct ice_aqc_set_mac_lb *cmd;
2519 struct ice_aq_desc desc;
2520
2521 cmd = &desc.params.set_mac_lb;
2522
2523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2524 if (ena_lpbk)
2525 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2526
2527 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2528}
2529
2530/**
2531 * ice_aq_set_port_id_led
2532 * @pi: pointer to the port information
2533 * @is_orig_mode: is this LED set to original mode (by the net-list)
2534 * @cd: pointer to command details structure or NULL
2535 *
2536 * Set LED value for the given port (0x06e9)
2537 */
2538enum ice_status
2539ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2540 struct ice_sq_cd *cd)
2541{
2542 struct ice_aqc_set_port_id_led *cmd;
2543 struct ice_hw *hw = pi->hw;
2544 struct ice_aq_desc desc;
2545
2546 cmd = &desc.params.set_port_id_led;
2547
2548 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2549
2550 if (is_orig_mode)
2551 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2552 else
2553 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2554
2555 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2556}
2557
2558/**
2559 * __ice_aq_get_set_rss_lut
2560 * @hw: pointer to the hardware structure
2561 * @vsi_id: VSI FW index
2562 * @lut_type: LUT table type
2563 * @lut: pointer to the LUT buffer provided by the caller
2564 * @lut_size: size of the LUT buffer
2565 * @glob_lut_idx: global LUT index
2566 * @set: set true to set the table, false to get the table
2567 *
2568 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2569 */
2570static enum ice_status
2571__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2572 u16 lut_size, u8 glob_lut_idx, bool set)
2573{
2574 struct ice_aqc_get_set_rss_lut *cmd_resp;
2575 struct ice_aq_desc desc;
2576 enum ice_status status;
2577 u16 flags = 0;
2578
2579 cmd_resp = &desc.params.get_set_rss_lut;
2580
2581 if (set) {
2582 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2583 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2584 } else {
2585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2586 }
2587
2588 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2589 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2590 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2591 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2592
2593 switch (lut_type) {
2594 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2595 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2596 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2597 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2598 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2599 break;
2600 default:
2601 status = ICE_ERR_PARAM;
2602 goto ice_aq_get_set_rss_lut_exit;
2603 }
2604
2605 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2606 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2607 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2608
2609 if (!set)
2610 goto ice_aq_get_set_rss_lut_send;
2611 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2612 if (!set)
2613 goto ice_aq_get_set_rss_lut_send;
2614 } else {
2615 goto ice_aq_get_set_rss_lut_send;
2616 }
2617
2618 /* LUT size is only valid for Global and PF table types */
2619 switch (lut_size) {
2620 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2621 break;
2622 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2623 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2624 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2625 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2626 break;
2627 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2628 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2629 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2630 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2631 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2632 break;
2633 }
2634 /* fall-through */
2635 default:
2636 status = ICE_ERR_PARAM;
2637 goto ice_aq_get_set_rss_lut_exit;
2638 }
2639
2640ice_aq_get_set_rss_lut_send:
2641 cmd_resp->flags = cpu_to_le16(flags);
2642 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2643
2644ice_aq_get_set_rss_lut_exit:
2645 return status;
2646}
2647
2648/**
2649 * ice_aq_get_rss_lut
2650 * @hw: pointer to the hardware structure
2651 * @vsi_handle: software VSI handle
2652 * @lut_type: LUT table type
2653 * @lut: pointer to the LUT buffer provided by the caller
2654 * @lut_size: size of the LUT buffer
2655 *
2656 * get the RSS lookup table, PF or VSI type
2657 */
2658enum ice_status
2659ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2660 u8 *lut, u16 lut_size)
2661{
2662 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2663 return ICE_ERR_PARAM;
2664
2665 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2666 lut_type, lut, lut_size, 0, false);
2667}
2668
2669/**
2670 * ice_aq_set_rss_lut
2671 * @hw: pointer to the hardware structure
2672 * @vsi_handle: software VSI handle
2673 * @lut_type: LUT table type
2674 * @lut: pointer to the LUT buffer provided by the caller
2675 * @lut_size: size of the LUT buffer
2676 *
2677 * set the RSS lookup table, PF or VSI type
2678 */
2679enum ice_status
2680ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2681 u8 *lut, u16 lut_size)
2682{
2683 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2684 return ICE_ERR_PARAM;
2685
2686 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2687 lut_type, lut, lut_size, 0, true);
2688}
2689
2690/**
2691 * __ice_aq_get_set_rss_key
2692 * @hw: pointer to the HW struct
2693 * @vsi_id: VSI FW index
2694 * @key: pointer to key info struct
2695 * @set: set true to set the key, false to get the key
2696 *
2697 * get (0x0B04) or set (0x0B02) the RSS key per VSI
2698 */
2699static enum
2700ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2701 struct ice_aqc_get_set_rss_keys *key,
2702 bool set)
2703{
2704 struct ice_aqc_get_set_rss_key *cmd_resp;
2705 u16 key_size = sizeof(*key);
2706 struct ice_aq_desc desc;
2707
2708 cmd_resp = &desc.params.get_set_rss_key;
2709
2710 if (set) {
2711 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2712 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2713 } else {
2714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2715 }
2716
2717 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2718 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2719 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2720 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2721
2722 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2723}
2724
2725/**
2726 * ice_aq_get_rss_key
2727 * @hw: pointer to the HW struct
2728 * @vsi_handle: software VSI handle
2729 * @key: pointer to key info struct
2730 *
2731 * get the RSS key per VSI
2732 */
2733enum ice_status
2734ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2735 struct ice_aqc_get_set_rss_keys *key)
2736{
2737 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2738 return ICE_ERR_PARAM;
2739
2740 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2741 key, false);
2742}
2743
2744/**
2745 * ice_aq_set_rss_key
2746 * @hw: pointer to the HW struct
2747 * @vsi_handle: software VSI handle
2748 * @keys: pointer to key info struct
2749 *
2750 * set the RSS key per VSI
2751 */
2752enum ice_status
2753ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2754 struct ice_aqc_get_set_rss_keys *keys)
2755{
2756 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2757 return ICE_ERR_PARAM;
2758
2759 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2760 keys, true);
2761}
2762
2763/**
2764 * ice_aq_add_lan_txq
2765 * @hw: pointer to the hardware structure
2766 * @num_qgrps: Number of added queue groups
2767 * @qg_list: list of queue groups to be added
2768 * @buf_size: size of buffer for indirect command
2769 * @cd: pointer to command details structure or NULL
2770 *
2771 * Add Tx LAN queue (0x0C30)
2772 *
2773 * NOTE:
2774 * Prior to calling add Tx LAN queue:
2775 * Initialize the following as part of the Tx queue context:
2776 * Completion queue ID if the queue uses Completion queue, Quanta profile,
2777 * Cache profile and Packet shaper profile.
2778 *
2779 * After add Tx LAN queue AQ command is completed:
2780 * Interrupts should be associated with specific queues,
2781 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2782 * flow.
2783 */
2784static enum ice_status
2785ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2786 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2787 struct ice_sq_cd *cd)
2788{
2789 u16 i, sum_header_size, sum_q_size = 0;
2790 struct ice_aqc_add_tx_qgrp *list;
2791 struct ice_aqc_add_txqs *cmd;
2792 struct ice_aq_desc desc;
2793
2794 cmd = &desc.params.add_txqs;
2795
2796 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2797
2798 if (!qg_list)
2799 return ICE_ERR_PARAM;
2800
2801 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2802 return ICE_ERR_PARAM;
2803
2804 sum_header_size = num_qgrps *
2805 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2806
2807 list = qg_list;
2808 for (i = 0; i < num_qgrps; i++) {
2809 struct ice_aqc_add_txqs_perq *q = list->txqs;
2810
2811 sum_q_size += list->num_txqs * sizeof(*q);
2812 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2813 }
2814
2815 if (buf_size != (sum_header_size + sum_q_size))
2816 return ICE_ERR_PARAM;
2817
2818 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2819
2820 cmd->num_qgrps = num_qgrps;
2821
2822 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2823}
2824
2825/**
2826 * ice_aq_dis_lan_txq
2827 * @hw: pointer to the hardware structure
2828 * @num_qgrps: number of groups in the list
2829 * @qg_list: the list of groups to disable
2830 * @buf_size: the total size of the qg_list buffer in bytes
2831 * @rst_src: if called due to reset, specifies the reset source
2832 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2833 * @cd: pointer to command details structure or NULL
2834 *
2835 * Disable LAN Tx queue (0x0C31)
2836 */
2837static enum ice_status
2838ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2839 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2840 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2841 struct ice_sq_cd *cd)
2842{
2843 struct ice_aqc_dis_txqs *cmd;
2844 struct ice_aq_desc desc;
2845 enum ice_status status;
2846 u16 i, sz = 0;
2847
2848 cmd = &desc.params.dis_txqs;
2849 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2850
2851 /* qg_list can be NULL only in VM/VF reset flow */
2852 if (!qg_list && !rst_src)
2853 return ICE_ERR_PARAM;
2854
2855 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2856 return ICE_ERR_PARAM;
2857
2858 cmd->num_entries = num_qgrps;
2859
2860 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2861 ICE_AQC_Q_DIS_TIMEOUT_M);
2862
2863 switch (rst_src) {
2864 case ICE_VM_RESET:
2865 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2866 cmd->vmvf_and_timeout |=
2867 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2868 break;
2869 case ICE_VF_RESET:
2870 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2871 /* In this case, FW expects vmvf_num to be absolute VF ID */
2872 cmd->vmvf_and_timeout |=
2873 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2874 ICE_AQC_Q_DIS_VMVF_NUM_M);
2875 break;
2876 case ICE_NO_RESET:
2877 default:
2878 break;
2879 }
2880
2881 /* flush pipe on time out */
2882 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2883 /* If no queue group info, we are in a reset flow. Issue the AQ */
2884 if (!qg_list)
2885 goto do_aq;
2886
2887 /* set RD bit to indicate that command buffer is provided by the driver
2888 * and it needs to be read by the firmware
2889 */
2890 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2891
2892 for (i = 0; i < num_qgrps; ++i) {
2893 /* Calculate the size taken up by the queue IDs in this group */
2894 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2895
2896 /* Add the size of the group header */
2897 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2898
2899 /* If the num of queues is even, add 2 bytes of padding */
2900 if ((qg_list[i].num_qs % 2) == 0)
2901 sz += 2;
2902 }
2903
2904 if (buf_size != sz)
2905 return ICE_ERR_PARAM;
2906
2907do_aq:
2908 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2909 if (status) {
2910 if (!qg_list)
2911 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2912 vmvf_num, hw->adminq.sq_last_status);
2913 else
2914 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2915 le16_to_cpu(qg_list[0].q_id[0]),
2916 hw->adminq.sq_last_status);
2917 }
2918 return status;
2919}
2920
2921/* End of FW Admin Queue command wrappers */
2922
2923/**
2924 * ice_write_byte - write a byte to a packed context structure
2925 * @src_ctx: the context structure to read from
2926 * @dest_ctx: the context to be written to
2927 * @ce_info: a description of the struct to be filled
2928 */
2929static void
2930ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2931{
2932 u8 src_byte, dest_byte, mask;
2933 u8 *from, *dest;
2934 u16 shift_width;
2935
2936 /* copy from the next struct field */
2937 from = src_ctx + ce_info->offset;
2938
2939 /* prepare the bits and mask */
2940 shift_width = ce_info->lsb % 8;
2941 mask = (u8)(BIT(ce_info->width) - 1);
2942
2943 src_byte = *from;
2944 src_byte &= mask;
2945
2946 /* shift to correct alignment */
2947 mask <<= shift_width;
2948 src_byte <<= shift_width;
2949
2950 /* get the current bits from the target bit string */
2951 dest = dest_ctx + (ce_info->lsb / 8);
2952
2953 memcpy(&dest_byte, dest, sizeof(dest_byte));
2954
2955 dest_byte &= ~mask; /* get the bits not changing */
2956 dest_byte |= src_byte; /* add in the new bits */
2957
2958 /* put it all back */
2959 memcpy(dest, &dest_byte, sizeof(dest_byte));
2960}
2961
2962/**
2963 * ice_write_word - write a word to a packed context structure
2964 * @src_ctx: the context structure to read from
2965 * @dest_ctx: the context to be written to
2966 * @ce_info: a description of the struct to be filled
2967 */
2968static void
2969ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2970{
2971 u16 src_word, mask;
2972 __le16 dest_word;
2973 u8 *from, *dest;
2974 u16 shift_width;
2975
2976 /* copy from the next struct field */
2977 from = src_ctx + ce_info->offset;
2978
2979 /* prepare the bits and mask */
2980 shift_width = ce_info->lsb % 8;
2981 mask = BIT(ce_info->width) - 1;
2982
2983 /* don't swizzle the bits until after the mask because the mask bits
2984 * will be in a different bit position on big endian machines
2985 */
2986 src_word = *(u16 *)from;
2987 src_word &= mask;
2988
2989 /* shift to correct alignment */
2990 mask <<= shift_width;
2991 src_word <<= shift_width;
2992
2993 /* get the current bits from the target bit string */
2994 dest = dest_ctx + (ce_info->lsb / 8);
2995
2996 memcpy(&dest_word, dest, sizeof(dest_word));
2997
2998 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
2999 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3000
3001 /* put it all back */
3002 memcpy(dest, &dest_word, sizeof(dest_word));
3003}
3004
3005/**
3006 * ice_write_dword - write a dword to a packed context structure
3007 * @src_ctx: the context structure to read from
3008 * @dest_ctx: the context to be written to
3009 * @ce_info: a description of the struct to be filled
3010 */
3011static void
3012ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3013{
3014 u32 src_dword, mask;
3015 __le32 dest_dword;
3016 u8 *from, *dest;
3017 u16 shift_width;
3018
3019 /* copy from the next struct field */
3020 from = src_ctx + ce_info->offset;
3021
3022 /* prepare the bits and mask */
3023 shift_width = ce_info->lsb % 8;
3024
3025 /* if the field width is exactly 32 on an x86 machine, then the shift
3026 * operation will not work because the SHL instructions count is masked
3027 * to 5 bits so the shift will do nothing
3028 */
3029 if (ce_info->width < 32)
3030 mask = BIT(ce_info->width) - 1;
3031 else
3032 mask = (u32)~0;
3033
3034 /* don't swizzle the bits until after the mask because the mask bits
3035 * will be in a different bit position on big endian machines
3036 */
3037 src_dword = *(u32 *)from;
3038 src_dword &= mask;
3039
3040 /* shift to correct alignment */
3041 mask <<= shift_width;
3042 src_dword <<= shift_width;
3043
3044 /* get the current bits from the target bit string */
3045 dest = dest_ctx + (ce_info->lsb / 8);
3046
3047 memcpy(&dest_dword, dest, sizeof(dest_dword));
3048
3049 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3050 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
3051
3052 /* put it all back */
3053 memcpy(dest, &dest_dword, sizeof(dest_dword));
3054}
3055
3056/**
3057 * ice_write_qword - write a qword to a packed context structure
3058 * @src_ctx: the context structure to read from
3059 * @dest_ctx: the context to be written to
3060 * @ce_info: a description of the struct to be filled
3061 */
3062static void
3063ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3064{
3065 u64 src_qword, mask;
3066 __le64 dest_qword;
3067 u8 *from, *dest;
3068 u16 shift_width;
3069
3070 /* copy from the next struct field */
3071 from = src_ctx + ce_info->offset;
3072
3073 /* prepare the bits and mask */
3074 shift_width = ce_info->lsb % 8;
3075
3076 /* if the field width is exactly 64 on an x86 machine, then the shift
3077 * operation will not work because the SHL instructions count is masked
3078 * to 6 bits so the shift will do nothing
3079 */
3080 if (ce_info->width < 64)
3081 mask = BIT_ULL(ce_info->width) - 1;
3082 else
3083 mask = (u64)~0;
3084
3085 /* don't swizzle the bits until after the mask because the mask bits
3086 * will be in a different bit position on big endian machines
3087 */
3088 src_qword = *(u64 *)from;
3089 src_qword &= mask;
3090
3091 /* shift to correct alignment */
3092 mask <<= shift_width;
3093 src_qword <<= shift_width;
3094
3095 /* get the current bits from the target bit string */
3096 dest = dest_ctx + (ce_info->lsb / 8);
3097
3098 memcpy(&dest_qword, dest, sizeof(dest_qword));
3099
3100 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
3101 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
3102
3103 /* put it all back */
3104 memcpy(dest, &dest_qword, sizeof(dest_qword));
3105}
3106
3107/**
3108 * ice_set_ctx - set context bits in packed structure
3109 * @src_ctx: pointer to a generic non-packed context structure
3110 * @dest_ctx: pointer to memory for the packed structure
3111 * @ce_info: a description of the structure to be transformed
3112 */
3113enum ice_status
3114ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3115{
3116 int f;
3117
3118 for (f = 0; ce_info[f].width; f++) {
3119 /* We have to deal with each element of the FW response
3120 * using the correct size so that we are correct regardless
3121 * of the endianness of the machine.
3122 */
3123 switch (ce_info[f].size_of) {
3124 case sizeof(u8):
3125 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3126 break;
3127 case sizeof(u16):
3128 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3129 break;
3130 case sizeof(u32):
3131 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3132 break;
3133 case sizeof(u64):
3134 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3135 break;
3136 default:
3137 return ICE_ERR_INVAL_SIZE;
3138 }
3139 }
3140
3141 return 0;
3142}
3143
3144/**
3145 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3146 * @hw: pointer to the HW struct
3147 * @vsi_handle: software VSI handle
3148 * @tc: TC number
3149 * @q_handle: software queue handle
3150 */
3151static struct ice_q_ctx *
3152ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3153{
3154 struct ice_vsi_ctx *vsi;
3155 struct ice_q_ctx *q_ctx;
3156
3157 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3158 if (!vsi)
3159 return NULL;
3160 if (q_handle >= vsi->num_lan_q_entries[tc])
3161 return NULL;
3162 if (!vsi->lan_q_ctx[tc])
3163 return NULL;
3164 q_ctx = vsi->lan_q_ctx[tc];
3165 return &q_ctx[q_handle];
3166}
3167
3168/**
3169 * ice_ena_vsi_txq
3170 * @pi: port information structure
3171 * @vsi_handle: software VSI handle
3172 * @tc: TC number
3173 * @q_handle: software queue handle
3174 * @num_qgrps: Number of added queue groups
3175 * @buf: list of queue groups to be added
3176 * @buf_size: size of buffer for indirect command
3177 * @cd: pointer to command details structure or NULL
3178 *
3179 * This function adds one LAN queue
3180 */
3181enum ice_status
3182ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3183 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3184 struct ice_sq_cd *cd)
3185{
3186 struct ice_aqc_txsched_elem_data node = { 0 };
3187 struct ice_sched_node *parent;
3188 struct ice_q_ctx *q_ctx;
3189 enum ice_status status;
3190 struct ice_hw *hw;
3191
3192 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3193 return ICE_ERR_CFG;
3194
3195 if (num_qgrps > 1 || buf->num_txqs > 1)
3196 return ICE_ERR_MAX_LIMIT;
3197
3198 hw = pi->hw;
3199
3200 if (!ice_is_vsi_valid(hw, vsi_handle))
3201 return ICE_ERR_PARAM;
3202
3203 mutex_lock(&pi->sched_lock);
3204
3205 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3206 if (!q_ctx) {
3207 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3208 q_handle);
3209 status = ICE_ERR_PARAM;
3210 goto ena_txq_exit;
3211 }
3212
3213 /* find a parent node */
3214 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3215 ICE_SCHED_NODE_OWNER_LAN);
3216 if (!parent) {
3217 status = ICE_ERR_PARAM;
3218 goto ena_txq_exit;
3219 }
3220
3221 buf->parent_teid = parent->info.node_teid;
3222 node.parent_teid = parent->info.node_teid;
3223 /* Mark that the values in the "generic" section as valid. The default
3224 * value in the "generic" section is zero. This means that :
3225 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3226 * - 0 priority among siblings, indicated by Bit 1-3.
3227 * - WFQ, indicated by Bit 4.
3228 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3229 * Bit 5-6.
3230 * - Bit 7 is reserved.
3231 * Without setting the generic section as valid in valid_sections, the
3232 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3233 */
3234 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3235
3236 /* add the LAN queue */
3237 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3238 if (status) {
3239 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3240 le16_to_cpu(buf->txqs[0].txq_id),
3241 hw->adminq.sq_last_status);
3242 goto ena_txq_exit;
3243 }
3244
3245 node.node_teid = buf->txqs[0].q_teid;
3246 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3247 q_ctx->q_handle = q_handle;
3248
3249 /* add a leaf node into schduler tree queue layer */
3250 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3251
3252ena_txq_exit:
3253 mutex_unlock(&pi->sched_lock);
3254 return status;
3255}
3256
3257/**
3258 * ice_dis_vsi_txq
3259 * @pi: port information structure
3260 * @vsi_handle: software VSI handle
3261 * @tc: TC number
3262 * @num_queues: number of queues
3263 * @q_handles: pointer to software queue handle array
3264 * @q_ids: pointer to the q_id array
3265 * @q_teids: pointer to queue node teids
3266 * @rst_src: if called due to reset, specifies the reset source
3267 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3268 * @cd: pointer to command details structure or NULL
3269 *
3270 * This function removes queues and their corresponding nodes in SW DB
3271 */
3272enum ice_status
3273ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3274 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3275 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3276 struct ice_sq_cd *cd)
3277{
3278 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3279 struct ice_aqc_dis_txq_item qg_list;
3280 struct ice_q_ctx *q_ctx;
3281 u16 i;
3282
3283 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3284 return ICE_ERR_CFG;
3285
3286 if (!num_queues) {
3287 /* if queue is disabled already yet the disable queue command
3288 * has to be sent to complete the VF reset, then call
3289 * ice_aq_dis_lan_txq without any queue information
3290 */
3291 if (rst_src)
3292 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3293 vmvf_num, NULL);
3294 return ICE_ERR_CFG;
3295 }
3296
3297 mutex_lock(&pi->sched_lock);
3298
3299 for (i = 0; i < num_queues; i++) {
3300 struct ice_sched_node *node;
3301
3302 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3303 if (!node)
3304 continue;
3305 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3306 if (!q_ctx) {
3307 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3308 q_handles[i]);
3309 continue;
3310 }
3311 if (q_ctx->q_handle != q_handles[i]) {
3312 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3313 q_ctx->q_handle, q_handles[i]);
3314 continue;
3315 }
3316 qg_list.parent_teid = node->info.parent_teid;
3317 qg_list.num_qs = 1;
3318 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3319 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3320 sizeof(qg_list), rst_src, vmvf_num,
3321 cd);
3322
3323 if (status)
3324 break;
3325 ice_free_sched_node(pi, node);
3326 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3327 }
3328 mutex_unlock(&pi->sched_lock);
3329 return status;
3330}
3331
3332/**
3333 * ice_cfg_vsi_qs - configure the new/existing VSI queues
3334 * @pi: port information structure
3335 * @vsi_handle: software VSI handle
3336 * @tc_bitmap: TC bitmap
3337 * @maxqs: max queues array per TC
3338 * @owner: LAN or RDMA
3339 *
3340 * This function adds/updates the VSI queues per TC.
3341 */
3342static enum ice_status
3343ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3344 u16 *maxqs, u8 owner)
3345{
3346 enum ice_status status = 0;
3347 u8 i;
3348
3349 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3350 return ICE_ERR_CFG;
3351
3352 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3353 return ICE_ERR_PARAM;
3354
3355 mutex_lock(&pi->sched_lock);
3356
3357 ice_for_each_traffic_class(i) {
3358 /* configuration is possible only if TC node is present */
3359 if (!ice_sched_get_tc_node(pi, i))
3360 continue;
3361
3362 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3363 ice_is_tc_ena(tc_bitmap, i));
3364 if (status)
3365 break;
3366 }
3367
3368 mutex_unlock(&pi->sched_lock);
3369 return status;
3370}
3371
3372/**
3373 * ice_cfg_vsi_lan - configure VSI LAN queues
3374 * @pi: port information structure
3375 * @vsi_handle: software VSI handle
3376 * @tc_bitmap: TC bitmap
3377 * @max_lanqs: max LAN queues array per TC
3378 *
3379 * This function adds/updates the VSI LAN queues per TC.
3380 */
3381enum ice_status
3382ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3383 u16 *max_lanqs)
3384{
3385 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3386 ICE_SCHED_NODE_OWNER_LAN);
3387}
3388
3389/**
3390 * ice_replay_pre_init - replay pre initialization
3391 * @hw: pointer to the HW struct
3392 *
3393 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3394 */
3395static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3396{
3397 struct ice_switch_info *sw = hw->switch_info;
3398 u8 i;
3399
3400 /* Delete old entries from replay filter list head if there is any */
3401 ice_rm_all_sw_replay_rule_info(hw);
3402 /* In start of replay, move entries into replay_rules list, it
3403 * will allow adding rules entries back to filt_rules list,
3404 * which is operational list.
3405 */
3406 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3407 list_replace_init(&sw->recp_list[i].filt_rules,
3408 &sw->recp_list[i].filt_replay_rules);
3409
3410 return 0;
3411}
3412
3413/**
3414 * ice_replay_vsi - replay VSI configuration
3415 * @hw: pointer to the HW struct
3416 * @vsi_handle: driver VSI handle
3417 *
3418 * Restore all VSI configuration after reset. It is required to call this
3419 * function with main VSI first.
3420 */
3421enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3422{
3423 enum ice_status status;
3424
3425 if (!ice_is_vsi_valid(hw, vsi_handle))
3426 return ICE_ERR_PARAM;
3427
3428 /* Replay pre-initialization if there is any */
3429 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3430 status = ice_replay_pre_init(hw);
3431 if (status)
3432 return status;
3433 }
3434
3435 /* Replay per VSI all filters */
3436 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3437 return status;
3438}
3439
3440/**
3441 * ice_replay_post - post replay configuration cleanup
3442 * @hw: pointer to the HW struct
3443 *
3444 * Post replay cleanup.
3445 */
3446void ice_replay_post(struct ice_hw *hw)
3447{
3448 /* Delete old entries from replay filter list head */
3449 ice_rm_all_sw_replay_rule_info(hw);
3450}
3451
3452/**
3453 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3454 * @hw: ptr to the hardware info
3455 * @reg: offset of 64 bit HW register to read from
3456 * @prev_stat_loaded: bool to specify if previous stats are loaded
3457 * @prev_stat: ptr to previous loaded stat value
3458 * @cur_stat: ptr to current stat value
3459 */
3460void
3461ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3462 u64 *prev_stat, u64 *cur_stat)
3463{
3464 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3465
3466 /* device stats are not reset at PFR, they likely will not be zeroed
3467 * when the driver starts. Thus, save the value from the first read
3468 * without adding to the statistic value so that we report stats which
3469 * count up from zero.
3470 */
3471 if (!prev_stat_loaded) {
3472 *prev_stat = new_data;
3473 return;
3474 }
3475
3476 /* Calculate the difference between the new and old values, and then
3477 * add it to the software stat value.
3478 */
3479 if (new_data >= *prev_stat)
3480 *cur_stat += new_data - *prev_stat;
3481 else
3482 /* to manage the potential roll-over */
3483 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3484
3485 /* Update the previously stored value to prepare for next read */
3486 *prev_stat = new_data;
3487}
3488
3489/**
3490 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3491 * @hw: ptr to the hardware info
3492 * @reg: offset of HW register to read from
3493 * @prev_stat_loaded: bool to specify if previous stats are loaded
3494 * @prev_stat: ptr to previous loaded stat value
3495 * @cur_stat: ptr to current stat value
3496 */
3497void
3498ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3499 u64 *prev_stat, u64 *cur_stat)
3500{
3501 u32 new_data;
3502
3503 new_data = rd32(hw, reg);
3504
3505 /* device stats are not reset at PFR, they likely will not be zeroed
3506 * when the driver starts. Thus, save the value from the first read
3507 * without adding to the statistic value so that we report stats which
3508 * count up from zero.
3509 */
3510 if (!prev_stat_loaded) {
3511 *prev_stat = new_data;
3512 return;
3513 }
3514
3515 /* Calculate the difference between the new and old values, and then
3516 * add it to the software stat value.
3517 */
3518 if (new_data >= *prev_stat)
3519 *cur_stat += new_data - *prev_stat;
3520 else
3521 /* to manage the potential roll-over */
3522 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3523
3524 /* Update the previously stored value to prepare for next read */
3525 *prev_stat = new_data;
3526}
3527
3528/**
3529 * ice_sched_query_elem - query element information from HW
3530 * @hw: pointer to the HW struct
3531 * @node_teid: node TEID to be queried
3532 * @buf: buffer to element information
3533 *
3534 * This function queries HW element information
3535 */
3536enum ice_status
3537ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3538 struct ice_aqc_get_elem *buf)
3539{
3540 u16 buf_size, num_elem_ret = 0;
3541 enum ice_status status;
3542
3543 buf_size = sizeof(*buf);
3544 memset(buf, 0, buf_size);
3545 buf->generic[0].node_teid = cpu_to_le32(node_teid);
3546 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3547 NULL);
3548 if (status || num_elem_ret != 1)
3549 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3550 return status;
3551}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7#include "ice_flow.h"
8
9#define ICE_PF_RESET_WAIT_COUNT 300
10
11/**
12 * ice_set_mac_type - Sets MAC type
13 * @hw: pointer to the HW structure
14 *
15 * This function sets the MAC type of the adapter based on the
16 * vendor ID and device ID stored in the HW structure.
17 */
18static enum ice_status ice_set_mac_type(struct ice_hw *hw)
19{
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return ICE_ERR_DEVICE_NOT_SUPPORTED;
22
23 switch (hw->device_id) {
24 case ICE_DEV_ID_E810C_BACKPLANE:
25 case ICE_DEV_ID_E810C_QSFP:
26 case ICE_DEV_ID_E810C_SFP:
27 case ICE_DEV_ID_E810_XXV_SFP:
28 hw->mac_type = ICE_MAC_E810;
29 break;
30 case ICE_DEV_ID_E823C_10G_BASE_T:
31 case ICE_DEV_ID_E823C_BACKPLANE:
32 case ICE_DEV_ID_E823C_QSFP:
33 case ICE_DEV_ID_E823C_SFP:
34 case ICE_DEV_ID_E823C_SGMII:
35 case ICE_DEV_ID_E822C_10G_BASE_T:
36 case ICE_DEV_ID_E822C_BACKPLANE:
37 case ICE_DEV_ID_E822C_QSFP:
38 case ICE_DEV_ID_E822C_SFP:
39 case ICE_DEV_ID_E822C_SGMII:
40 case ICE_DEV_ID_E822L_10G_BASE_T:
41 case ICE_DEV_ID_E822L_BACKPLANE:
42 case ICE_DEV_ID_E822L_SFP:
43 case ICE_DEV_ID_E822L_SGMII:
44 case ICE_DEV_ID_E823L_10G_BASE_T:
45 case ICE_DEV_ID_E823L_1GBE:
46 case ICE_DEV_ID_E823L_BACKPLANE:
47 case ICE_DEV_ID_E823L_QSFP:
48 case ICE_DEV_ID_E823L_SFP:
49 hw->mac_type = ICE_MAC_GENERIC;
50 break;
51 default:
52 hw->mac_type = ICE_MAC_UNKNOWN;
53 break;
54 }
55
56 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
57 return 0;
58}
59
60/**
61 * ice_clear_pf_cfg - Clear PF configuration
62 * @hw: pointer to the hardware structure
63 *
64 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
65 * configuration, flow director filters, etc.).
66 */
67enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
68{
69 struct ice_aq_desc desc;
70
71 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
72
73 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
74}
75
76/**
77 * ice_aq_manage_mac_read - manage MAC address read command
78 * @hw: pointer to the HW struct
79 * @buf: a virtual buffer to hold the manage MAC read response
80 * @buf_size: Size of the virtual buffer
81 * @cd: pointer to command details structure or NULL
82 *
83 * This function is used to return per PF station MAC address (0x0107).
84 * NOTE: Upon successful completion of this command, MAC address information
85 * is returned in user specified buffer. Please interpret user specified
86 * buffer as "manage_mac_read" response.
87 * Response such as various MAC addresses are stored in HW struct (port.mac)
88 * ice_discover_dev_caps is expected to be called before this function is
89 * called.
90 */
91static enum ice_status
92ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
93 struct ice_sq_cd *cd)
94{
95 struct ice_aqc_manage_mac_read_resp *resp;
96 struct ice_aqc_manage_mac_read *cmd;
97 struct ice_aq_desc desc;
98 enum ice_status status;
99 u16 flags;
100 u8 i;
101
102 cmd = &desc.params.mac_read;
103
104 if (buf_size < sizeof(*resp))
105 return ICE_ERR_BUF_TOO_SHORT;
106
107 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
108
109 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
110 if (status)
111 return status;
112
113 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
114 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
115
116 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
117 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
118 return ICE_ERR_CFG;
119 }
120
121 /* A single port can report up to two (LAN and WoL) addresses */
122 for (i = 0; i < cmd->num_addr; i++)
123 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
124 ether_addr_copy(hw->port_info->mac.lan_addr,
125 resp[i].mac_addr);
126 ether_addr_copy(hw->port_info->mac.perm_addr,
127 resp[i].mac_addr);
128 break;
129 }
130
131 return 0;
132}
133
134/**
135 * ice_aq_get_phy_caps - returns PHY capabilities
136 * @pi: port information structure
137 * @qual_mods: report qualified modules
138 * @report_mode: report mode capabilities
139 * @pcaps: structure for PHY capabilities to be filled
140 * @cd: pointer to command details structure or NULL
141 *
142 * Returns the various PHY capabilities supported on the Port (0x0600)
143 */
144enum ice_status
145ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
146 struct ice_aqc_get_phy_caps_data *pcaps,
147 struct ice_sq_cd *cd)
148{
149 struct ice_aqc_get_phy_caps *cmd;
150 u16 pcaps_size = sizeof(*pcaps);
151 struct ice_aq_desc desc;
152 enum ice_status status;
153 struct ice_hw *hw;
154
155 cmd = &desc.params.get_phy;
156
157 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
158 return ICE_ERR_PARAM;
159 hw = pi->hw;
160
161 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
162
163 if (qual_mods)
164 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
165
166 cmd->param0 |= cpu_to_le16(report_mode);
167 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
168
169 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
170 report_mode);
171 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
172 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
173 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
174 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
175 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
176 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
177 pcaps->low_power_ctrl_an);
178 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
179 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
180 pcaps->eeer_value);
181 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
182 pcaps->link_fec_options);
183 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
184 pcaps->module_compliance_enforcement);
185 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
186 pcaps->extended_compliance_code);
187 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
188 pcaps->module_type[0]);
189 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
190 pcaps->module_type[1]);
191 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
192 pcaps->module_type[2]);
193
194 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
195 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
196 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
197 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
198 sizeof(pi->phy.link_info.module_type));
199 }
200
201 return status;
202}
203
204/**
205 * ice_aq_get_link_topo_handle - get link topology node return status
206 * @pi: port information structure
207 * @node_type: requested node type
208 * @cd: pointer to command details structure or NULL
209 *
210 * Get link topology node return status for specified node type (0x06E0)
211 *
212 * Node type cage can be used to determine if cage is present. If AQC
213 * returns error (ENOENT), then no cage present. If no cage present, then
214 * connection type is backplane or BASE-T.
215 */
216static enum ice_status
217ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
218 struct ice_sq_cd *cd)
219{
220 struct ice_aqc_get_link_topo *cmd;
221 struct ice_aq_desc desc;
222
223 cmd = &desc.params.get_link_topo;
224
225 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
226
227 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
228 ICE_AQC_LINK_TOPO_NODE_CTX_S);
229
230 /* set node type */
231 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
232
233 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
234}
235
236/**
237 * ice_is_media_cage_present
238 * @pi: port information structure
239 *
240 * Returns true if media cage is present, else false. If no cage, then
241 * media type is backplane or BASE-T.
242 */
243static bool ice_is_media_cage_present(struct ice_port_info *pi)
244{
245 /* Node type cage can be used to determine if cage is present. If AQC
246 * returns error (ENOENT), then no cage present. If no cage present then
247 * connection type is backplane or BASE-T.
248 */
249 return !ice_aq_get_link_topo_handle(pi,
250 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
251 NULL);
252}
253
254/**
255 * ice_get_media_type - Gets media type
256 * @pi: port information structure
257 */
258static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
259{
260 struct ice_link_status *hw_link_info;
261
262 if (!pi)
263 return ICE_MEDIA_UNKNOWN;
264
265 hw_link_info = &pi->phy.link_info;
266 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
267 /* If more than one media type is selected, report unknown */
268 return ICE_MEDIA_UNKNOWN;
269
270 if (hw_link_info->phy_type_low) {
271 /* 1G SGMII is a special case where some DA cable PHYs
272 * may show this as an option when it really shouldn't
273 * be since SGMII is meant to be between a MAC and a PHY
274 * in a backplane. Try to detect this case and handle it
275 */
276 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
277 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
278 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
279 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
280 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
281 return ICE_MEDIA_DA;
282
283 switch (hw_link_info->phy_type_low) {
284 case ICE_PHY_TYPE_LOW_1000BASE_SX:
285 case ICE_PHY_TYPE_LOW_1000BASE_LX:
286 case ICE_PHY_TYPE_LOW_10GBASE_SR:
287 case ICE_PHY_TYPE_LOW_10GBASE_LR:
288 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
289 case ICE_PHY_TYPE_LOW_25GBASE_SR:
290 case ICE_PHY_TYPE_LOW_25GBASE_LR:
291 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
292 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
293 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
294 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
295 case ICE_PHY_TYPE_LOW_50GBASE_SR:
296 case ICE_PHY_TYPE_LOW_50GBASE_FR:
297 case ICE_PHY_TYPE_LOW_50GBASE_LR:
298 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
299 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
300 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
301 case ICE_PHY_TYPE_LOW_100GBASE_DR:
302 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
303 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
304 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
305 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
306 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
307 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
308 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
309 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
310 return ICE_MEDIA_FIBER;
311 case ICE_PHY_TYPE_LOW_100BASE_TX:
312 case ICE_PHY_TYPE_LOW_1000BASE_T:
313 case ICE_PHY_TYPE_LOW_2500BASE_T:
314 case ICE_PHY_TYPE_LOW_5GBASE_T:
315 case ICE_PHY_TYPE_LOW_10GBASE_T:
316 case ICE_PHY_TYPE_LOW_25GBASE_T:
317 return ICE_MEDIA_BASET;
318 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
319 case ICE_PHY_TYPE_LOW_25GBASE_CR:
320 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
321 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
322 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
323 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
324 case ICE_PHY_TYPE_LOW_50GBASE_CP:
325 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
326 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
327 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
328 return ICE_MEDIA_DA;
329 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
330 case ICE_PHY_TYPE_LOW_40G_XLAUI:
331 case ICE_PHY_TYPE_LOW_50G_LAUI2:
332 case ICE_PHY_TYPE_LOW_50G_AUI2:
333 case ICE_PHY_TYPE_LOW_50G_AUI1:
334 case ICE_PHY_TYPE_LOW_100G_AUI4:
335 case ICE_PHY_TYPE_LOW_100G_CAUI4:
336 if (ice_is_media_cage_present(pi))
337 return ICE_MEDIA_DA;
338 fallthrough;
339 case ICE_PHY_TYPE_LOW_1000BASE_KX:
340 case ICE_PHY_TYPE_LOW_2500BASE_KX:
341 case ICE_PHY_TYPE_LOW_2500BASE_X:
342 case ICE_PHY_TYPE_LOW_5GBASE_KR:
343 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
344 case ICE_PHY_TYPE_LOW_25GBASE_KR:
345 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
346 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
347 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
348 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
349 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
350 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
351 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
352 return ICE_MEDIA_BACKPLANE;
353 }
354 } else {
355 switch (hw_link_info->phy_type_high) {
356 case ICE_PHY_TYPE_HIGH_100G_AUI2:
357 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
358 if (ice_is_media_cage_present(pi))
359 return ICE_MEDIA_DA;
360 fallthrough;
361 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
362 return ICE_MEDIA_BACKPLANE;
363 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
364 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
365 return ICE_MEDIA_FIBER;
366 }
367 }
368 return ICE_MEDIA_UNKNOWN;
369}
370
371/**
372 * ice_aq_get_link_info
373 * @pi: port information structure
374 * @ena_lse: enable/disable LinkStatusEvent reporting
375 * @link: pointer to link status structure - optional
376 * @cd: pointer to command details structure or NULL
377 *
378 * Get Link Status (0x607). Returns the link status of the adapter.
379 */
380enum ice_status
381ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
382 struct ice_link_status *link, struct ice_sq_cd *cd)
383{
384 struct ice_aqc_get_link_status_data link_data = { 0 };
385 struct ice_aqc_get_link_status *resp;
386 struct ice_link_status *li_old, *li;
387 enum ice_media_type *hw_media_type;
388 struct ice_fc_info *hw_fc_info;
389 bool tx_pause, rx_pause;
390 struct ice_aq_desc desc;
391 enum ice_status status;
392 struct ice_hw *hw;
393 u16 cmd_flags;
394
395 if (!pi)
396 return ICE_ERR_PARAM;
397 hw = pi->hw;
398 li_old = &pi->phy.link_info_old;
399 hw_media_type = &pi->phy.media_type;
400 li = &pi->phy.link_info;
401 hw_fc_info = &pi->fc;
402
403 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
404 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
405 resp = &desc.params.get_link_status;
406 resp->cmd_flags = cpu_to_le16(cmd_flags);
407 resp->lport_num = pi->lport;
408
409 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
410
411 if (status)
412 return status;
413
414 /* save off old link status information */
415 *li_old = *li;
416
417 /* update current link status information */
418 li->link_speed = le16_to_cpu(link_data.link_speed);
419 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
420 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
421 *hw_media_type = ice_get_media_type(pi);
422 li->link_info = link_data.link_info;
423 li->an_info = link_data.an_info;
424 li->ext_info = link_data.ext_info;
425 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
426 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
427 li->topo_media_conflict = link_data.topo_media_conflict;
428 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
429 ICE_AQ_CFG_PACING_TYPE_M);
430
431 /* update fc info */
432 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
433 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
434 if (tx_pause && rx_pause)
435 hw_fc_info->current_mode = ICE_FC_FULL;
436 else if (tx_pause)
437 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
438 else if (rx_pause)
439 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
440 else
441 hw_fc_info->current_mode = ICE_FC_NONE;
442
443 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
444
445 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
446 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
447 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
448 (unsigned long long)li->phy_type_low);
449 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
450 (unsigned long long)li->phy_type_high);
451 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
452 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
453 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
454 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
455 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
456 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
457 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
458 li->max_frame_size);
459 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
460
461 /* save link status information */
462 if (link)
463 *link = *li;
464
465 /* flag cleared so calling functions don't call AQ again */
466 pi->phy.get_link_info = false;
467
468 return 0;
469}
470
471/**
472 * ice_fill_tx_timer_and_fc_thresh
473 * @hw: pointer to the HW struct
474 * @cmd: pointer to MAC cfg structure
475 *
476 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
477 * descriptor
478 */
479static void
480ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
481 struct ice_aqc_set_mac_cfg *cmd)
482{
483 u16 fc_thres_val, tx_timer_val;
484 u32 val;
485
486 /* We read back the transmit timer and FC threshold value of
487 * LFC. Thus, we will use index =
488 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
489 *
490 * Also, because we are operating on transmit timer and FC
491 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
492 */
493#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
494
495 /* Retrieve the transmit timer */
496 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
497 tx_timer_val = val &
498 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
499 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
500
501 /* Retrieve the FC threshold */
502 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
503 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
504
505 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
506}
507
508/**
509 * ice_aq_set_mac_cfg
510 * @hw: pointer to the HW struct
511 * @max_frame_size: Maximum Frame Size to be supported
512 * @cd: pointer to command details structure or NULL
513 *
514 * Set MAC configuration (0x0603)
515 */
516enum ice_status
517ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
518{
519 struct ice_aqc_set_mac_cfg *cmd;
520 struct ice_aq_desc desc;
521
522 cmd = &desc.params.set_mac_cfg;
523
524 if (max_frame_size == 0)
525 return ICE_ERR_PARAM;
526
527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
528
529 cmd->max_frame_size = cpu_to_le16(max_frame_size);
530
531 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
532
533 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
534}
535
536/**
537 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
538 * @hw: pointer to the HW struct
539 */
540static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
541{
542 struct ice_switch_info *sw;
543 enum ice_status status;
544
545 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
546 sizeof(*hw->switch_info), GFP_KERNEL);
547 sw = hw->switch_info;
548
549 if (!sw)
550 return ICE_ERR_NO_MEMORY;
551
552 INIT_LIST_HEAD(&sw->vsi_list_map_head);
553
554 status = ice_init_def_sw_recp(hw);
555 if (status) {
556 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
557 return status;
558 }
559 return 0;
560}
561
562/**
563 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
564 * @hw: pointer to the HW struct
565 */
566static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
567{
568 struct ice_switch_info *sw = hw->switch_info;
569 struct ice_vsi_list_map_info *v_pos_map;
570 struct ice_vsi_list_map_info *v_tmp_map;
571 struct ice_sw_recipe *recps;
572 u8 i;
573
574 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
575 list_entry) {
576 list_del(&v_pos_map->list_entry);
577 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
578 }
579 recps = hw->switch_info->recp_list;
580 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
581 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
582
583 recps[i].root_rid = i;
584 mutex_destroy(&recps[i].filt_rule_lock);
585 list_for_each_entry_safe(lst_itr, tmp_entry,
586 &recps[i].filt_rules, list_entry) {
587 list_del(&lst_itr->list_entry);
588 devm_kfree(ice_hw_to_dev(hw), lst_itr);
589 }
590 }
591 ice_rm_all_sw_replay_rule_info(hw);
592 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
593 devm_kfree(ice_hw_to_dev(hw), sw);
594}
595
596/**
597 * ice_get_fw_log_cfg - get FW logging configuration
598 * @hw: pointer to the HW struct
599 */
600static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
601{
602 struct ice_aq_desc desc;
603 enum ice_status status;
604 __le16 *config;
605 u16 size;
606
607 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
608 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
609 if (!config)
610 return ICE_ERR_NO_MEMORY;
611
612 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
613
614 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
615 if (!status) {
616 u16 i;
617
618 /* Save FW logging information into the HW structure */
619 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
620 u16 v, m, flgs;
621
622 v = le16_to_cpu(config[i]);
623 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
624 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
625
626 if (m < ICE_AQC_FW_LOG_ID_MAX)
627 hw->fw_log.evnts[m].cur = flgs;
628 }
629 }
630
631 devm_kfree(ice_hw_to_dev(hw), config);
632
633 return status;
634}
635
636/**
637 * ice_cfg_fw_log - configure FW logging
638 * @hw: pointer to the HW struct
639 * @enable: enable certain FW logging events if true, disable all if false
640 *
641 * This function enables/disables the FW logging via Rx CQ events and a UART
642 * port based on predetermined configurations. FW logging via the Rx CQ can be
643 * enabled/disabled for individual PF's. However, FW logging via the UART can
644 * only be enabled/disabled for all PFs on the same device.
645 *
646 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
647 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
648 * before initializing the device.
649 *
650 * When re/configuring FW logging, callers need to update the "cfg" elements of
651 * the hw->fw_log.evnts array with the desired logging event configurations for
652 * modules of interest. When disabling FW logging completely, the callers can
653 * just pass false in the "enable" parameter. On completion, the function will
654 * update the "cur" element of the hw->fw_log.evnts array with the resulting
655 * logging event configurations of the modules that are being re/configured. FW
656 * logging modules that are not part of a reconfiguration operation retain their
657 * previous states.
658 *
659 * Before resetting the device, it is recommended that the driver disables FW
660 * logging before shutting down the control queue. When disabling FW logging
661 * ("enable" = false), the latest configurations of FW logging events stored in
662 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
663 * a device reset.
664 *
665 * When enabling FW logging to emit log messages via the Rx CQ during the
666 * device's initialization phase, a mechanism alternative to interrupt handlers
667 * needs to be used to extract FW log messages from the Rx CQ periodically and
668 * to prevent the Rx CQ from being full and stalling other types of control
669 * messages from FW to SW. Interrupts are typically disabled during the device's
670 * initialization phase.
671 */
672static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
673{
674 struct ice_aqc_fw_logging *cmd;
675 enum ice_status status = 0;
676 u16 i, chgs = 0, len = 0;
677 struct ice_aq_desc desc;
678 __le16 *data = NULL;
679 u8 actv_evnts = 0;
680 void *buf = NULL;
681
682 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
683 return 0;
684
685 /* Disable FW logging only when the control queue is still responsive */
686 if (!enable &&
687 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
688 return 0;
689
690 /* Get current FW log settings */
691 status = ice_get_fw_log_cfg(hw);
692 if (status)
693 return status;
694
695 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
696 cmd = &desc.params.fw_logging;
697
698 /* Indicate which controls are valid */
699 if (hw->fw_log.cq_en)
700 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
701
702 if (hw->fw_log.uart_en)
703 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
704
705 if (enable) {
706 /* Fill in an array of entries with FW logging modules and
707 * logging events being reconfigured.
708 */
709 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
710 u16 val;
711
712 /* Keep track of enabled event types */
713 actv_evnts |= hw->fw_log.evnts[i].cfg;
714
715 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
716 continue;
717
718 if (!data) {
719 data = devm_kcalloc(ice_hw_to_dev(hw),
720 sizeof(*data),
721 ICE_AQC_FW_LOG_ID_MAX,
722 GFP_KERNEL);
723 if (!data)
724 return ICE_ERR_NO_MEMORY;
725 }
726
727 val = i << ICE_AQC_FW_LOG_ID_S;
728 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
729 data[chgs++] = cpu_to_le16(val);
730 }
731
732 /* Only enable FW logging if at least one module is specified.
733 * If FW logging is currently enabled but all modules are not
734 * enabled to emit log messages, disable FW logging altogether.
735 */
736 if (actv_evnts) {
737 /* Leave if there is effectively no change */
738 if (!chgs)
739 goto out;
740
741 if (hw->fw_log.cq_en)
742 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
743
744 if (hw->fw_log.uart_en)
745 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
746
747 buf = data;
748 len = sizeof(*data) * chgs;
749 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
750 }
751 }
752
753 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
754 if (!status) {
755 /* Update the current configuration to reflect events enabled.
756 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
757 * logging mode is enabled for the device. They do not reflect
758 * actual modules being enabled to emit log messages. So, their
759 * values remain unchanged even when all modules are disabled.
760 */
761 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
762
763 hw->fw_log.actv_evnts = actv_evnts;
764 for (i = 0; i < cnt; i++) {
765 u16 v, m;
766
767 if (!enable) {
768 /* When disabling all FW logging events as part
769 * of device's de-initialization, the original
770 * configurations are retained, and can be used
771 * to reconfigure FW logging later if the device
772 * is re-initialized.
773 */
774 hw->fw_log.evnts[i].cur = 0;
775 continue;
776 }
777
778 v = le16_to_cpu(data[i]);
779 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
780 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
781 }
782 }
783
784out:
785 if (data)
786 devm_kfree(ice_hw_to_dev(hw), data);
787
788 return status;
789}
790
791/**
792 * ice_output_fw_log
793 * @hw: pointer to the HW struct
794 * @desc: pointer to the AQ message descriptor
795 * @buf: pointer to the buffer accompanying the AQ message
796 *
797 * Formats a FW Log message and outputs it via the standard driver logs.
798 */
799void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
800{
801 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
802 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
803 le16_to_cpu(desc->datalen));
804 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
805}
806
807/**
808 * ice_get_itr_intrl_gran
809 * @hw: pointer to the HW struct
810 *
811 * Determines the ITR/INTRL granularities based on the maximum aggregate
812 * bandwidth according to the device's configuration during power-on.
813 */
814static void ice_get_itr_intrl_gran(struct ice_hw *hw)
815{
816 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
817 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
818 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
819
820 switch (max_agg_bw) {
821 case ICE_MAX_AGG_BW_200G:
822 case ICE_MAX_AGG_BW_100G:
823 case ICE_MAX_AGG_BW_50G:
824 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
825 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
826 break;
827 case ICE_MAX_AGG_BW_25G:
828 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
829 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
830 break;
831 }
832}
833
834/**
835 * ice_init_hw - main hardware initialization routine
836 * @hw: pointer to the hardware structure
837 */
838enum ice_status ice_init_hw(struct ice_hw *hw)
839{
840 struct ice_aqc_get_phy_caps_data *pcaps;
841 enum ice_status status;
842 u16 mac_buf_len;
843 void *mac_buf;
844
845 /* Set MAC type based on DeviceID */
846 status = ice_set_mac_type(hw);
847 if (status)
848 return status;
849
850 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
851 PF_FUNC_RID_FUNC_NUM_M) >>
852 PF_FUNC_RID_FUNC_NUM_S;
853
854 status = ice_reset(hw, ICE_RESET_PFR);
855 if (status)
856 return status;
857
858 ice_get_itr_intrl_gran(hw);
859
860 status = ice_create_all_ctrlq(hw);
861 if (status)
862 goto err_unroll_cqinit;
863
864 /* Enable FW logging. Not fatal if this fails. */
865 status = ice_cfg_fw_log(hw, true);
866 if (status)
867 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
868
869 status = ice_clear_pf_cfg(hw);
870 if (status)
871 goto err_unroll_cqinit;
872
873 /* Set bit to enable Flow Director filters */
874 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
875 INIT_LIST_HEAD(&hw->fdir_list_head);
876
877 ice_clear_pxe_mode(hw);
878
879 status = ice_init_nvm(hw);
880 if (status)
881 goto err_unroll_cqinit;
882
883 status = ice_get_caps(hw);
884 if (status)
885 goto err_unroll_cqinit;
886
887 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
888 sizeof(*hw->port_info), GFP_KERNEL);
889 if (!hw->port_info) {
890 status = ICE_ERR_NO_MEMORY;
891 goto err_unroll_cqinit;
892 }
893
894 /* set the back pointer to HW */
895 hw->port_info->hw = hw;
896
897 /* Initialize port_info struct with switch configuration data */
898 status = ice_get_initial_sw_cfg(hw);
899 if (status)
900 goto err_unroll_alloc;
901
902 hw->evb_veb = true;
903
904 /* Query the allocated resources for Tx scheduler */
905 status = ice_sched_query_res_alloc(hw);
906 if (status) {
907 ice_debug(hw, ICE_DBG_SCHED,
908 "Failed to get scheduler allocated resources\n");
909 goto err_unroll_alloc;
910 }
911
912 /* Initialize port_info struct with scheduler data */
913 status = ice_sched_init_port(hw->port_info);
914 if (status)
915 goto err_unroll_sched;
916
917 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
918 if (!pcaps) {
919 status = ICE_ERR_NO_MEMORY;
920 goto err_unroll_sched;
921 }
922
923 /* Initialize port_info struct with PHY capabilities */
924 status = ice_aq_get_phy_caps(hw->port_info, false,
925 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
926 devm_kfree(ice_hw_to_dev(hw), pcaps);
927 if (status)
928 goto err_unroll_sched;
929
930 /* Initialize port_info struct with link information */
931 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
932 if (status)
933 goto err_unroll_sched;
934
935 /* need a valid SW entry point to build a Tx tree */
936 if (!hw->sw_entry_point_layer) {
937 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
938 status = ICE_ERR_CFG;
939 goto err_unroll_sched;
940 }
941 INIT_LIST_HEAD(&hw->agg_list);
942 /* Initialize max burst size */
943 if (!hw->max_burst_size)
944 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
945
946 status = ice_init_fltr_mgmt_struct(hw);
947 if (status)
948 goto err_unroll_sched;
949
950 /* Get MAC information */
951 /* A single port can report up to two (LAN and WoL) addresses */
952 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
953 sizeof(struct ice_aqc_manage_mac_read_resp),
954 GFP_KERNEL);
955 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
956
957 if (!mac_buf) {
958 status = ICE_ERR_NO_MEMORY;
959 goto err_unroll_fltr_mgmt_struct;
960 }
961
962 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
963 devm_kfree(ice_hw_to_dev(hw), mac_buf);
964
965 if (status)
966 goto err_unroll_fltr_mgmt_struct;
967 /* enable jumbo frame support at MAC level */
968 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
969 if (status)
970 goto err_unroll_fltr_mgmt_struct;
971 /* Obtain counter base index which would be used by flow director */
972 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
973 if (status)
974 goto err_unroll_fltr_mgmt_struct;
975 status = ice_init_hw_tbls(hw);
976 if (status)
977 goto err_unroll_fltr_mgmt_struct;
978 mutex_init(&hw->tnl_lock);
979 return 0;
980
981err_unroll_fltr_mgmt_struct:
982 ice_cleanup_fltr_mgmt_struct(hw);
983err_unroll_sched:
984 ice_sched_cleanup_all(hw);
985err_unroll_alloc:
986 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
987err_unroll_cqinit:
988 ice_destroy_all_ctrlq(hw);
989 return status;
990}
991
992/**
993 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
994 * @hw: pointer to the hardware structure
995 *
996 * This should be called only during nominal operation, not as a result of
997 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
998 * applicable initializations if it fails for any reason.
999 */
1000void ice_deinit_hw(struct ice_hw *hw)
1001{
1002 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1003 ice_cleanup_fltr_mgmt_struct(hw);
1004
1005 ice_sched_cleanup_all(hw);
1006 ice_sched_clear_agg(hw);
1007 ice_free_seg(hw);
1008 ice_free_hw_tbls(hw);
1009 mutex_destroy(&hw->tnl_lock);
1010
1011 if (hw->port_info) {
1012 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1013 hw->port_info = NULL;
1014 }
1015
1016 /* Attempt to disable FW logging before shutting down control queues */
1017 ice_cfg_fw_log(hw, false);
1018 ice_destroy_all_ctrlq(hw);
1019
1020 /* Clear VSI contexts if not already cleared */
1021 ice_clear_all_vsi_ctx(hw);
1022}
1023
1024/**
1025 * ice_check_reset - Check to see if a global reset is complete
1026 * @hw: pointer to the hardware structure
1027 */
1028enum ice_status ice_check_reset(struct ice_hw *hw)
1029{
1030 u32 cnt, reg = 0, grst_timeout, uld_mask;
1031
1032 /* Poll for Device Active state in case a recent CORER, GLOBR,
1033 * or EMPR has occurred. The grst delay value is in 100ms units.
1034 * Add 1sec for outstanding AQ commands that can take a long time.
1035 */
1036 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1037 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1038
1039 for (cnt = 0; cnt < grst_timeout; cnt++) {
1040 mdelay(100);
1041 reg = rd32(hw, GLGEN_RSTAT);
1042 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1043 break;
1044 }
1045
1046 if (cnt == grst_timeout) {
1047 ice_debug(hw, ICE_DBG_INIT,
1048 "Global reset polling failed to complete.\n");
1049 return ICE_ERR_RESET_FAILED;
1050 }
1051
1052#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1053 GLNVM_ULD_PCIER_DONE_1_M |\
1054 GLNVM_ULD_CORER_DONE_M |\
1055 GLNVM_ULD_GLOBR_DONE_M |\
1056 GLNVM_ULD_POR_DONE_M |\
1057 GLNVM_ULD_POR_DONE_1_M |\
1058 GLNVM_ULD_PCIER_DONE_2_M)
1059
1060 uld_mask = ICE_RESET_DONE_MASK;
1061
1062 /* Device is Active; check Global Reset processes are done */
1063 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1064 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1065 if (reg == uld_mask) {
1066 ice_debug(hw, ICE_DBG_INIT,
1067 "Global reset processes done. %d\n", cnt);
1068 break;
1069 }
1070 mdelay(10);
1071 }
1072
1073 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1074 ice_debug(hw, ICE_DBG_INIT,
1075 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1076 reg);
1077 return ICE_ERR_RESET_FAILED;
1078 }
1079
1080 return 0;
1081}
1082
1083/**
1084 * ice_pf_reset - Reset the PF
1085 * @hw: pointer to the hardware structure
1086 *
1087 * If a global reset has been triggered, this function checks
1088 * for its completion and then issues the PF reset
1089 */
1090static enum ice_status ice_pf_reset(struct ice_hw *hw)
1091{
1092 u32 cnt, reg;
1093
1094 /* If at function entry a global reset was already in progress, i.e.
1095 * state is not 'device active' or any of the reset done bits are not
1096 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1097 * global reset is done.
1098 */
1099 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1100 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1101 /* poll on global reset currently in progress until done */
1102 if (ice_check_reset(hw))
1103 return ICE_ERR_RESET_FAILED;
1104
1105 return 0;
1106 }
1107
1108 /* Reset the PF */
1109 reg = rd32(hw, PFGEN_CTRL);
1110
1111 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1112
1113 /* Wait for the PFR to complete. The wait time is the global config lock
1114 * timeout plus the PFR timeout which will account for a possible reset
1115 * that is occurring during a download package operation.
1116 */
1117 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1118 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1119 reg = rd32(hw, PFGEN_CTRL);
1120 if (!(reg & PFGEN_CTRL_PFSWR_M))
1121 break;
1122
1123 mdelay(1);
1124 }
1125
1126 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1127 ice_debug(hw, ICE_DBG_INIT,
1128 "PF reset polling failed to complete.\n");
1129 return ICE_ERR_RESET_FAILED;
1130 }
1131
1132 return 0;
1133}
1134
1135/**
1136 * ice_reset - Perform different types of reset
1137 * @hw: pointer to the hardware structure
1138 * @req: reset request
1139 *
1140 * This function triggers a reset as specified by the req parameter.
1141 *
1142 * Note:
1143 * If anything other than a PF reset is triggered, PXE mode is restored.
1144 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1145 * interface has been restored in the rebuild flow.
1146 */
1147enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1148{
1149 u32 val = 0;
1150
1151 switch (req) {
1152 case ICE_RESET_PFR:
1153 return ice_pf_reset(hw);
1154 case ICE_RESET_CORER:
1155 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1156 val = GLGEN_RTRIG_CORER_M;
1157 break;
1158 case ICE_RESET_GLOBR:
1159 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1160 val = GLGEN_RTRIG_GLOBR_M;
1161 break;
1162 default:
1163 return ICE_ERR_PARAM;
1164 }
1165
1166 val |= rd32(hw, GLGEN_RTRIG);
1167 wr32(hw, GLGEN_RTRIG, val);
1168 ice_flush(hw);
1169
1170 /* wait for the FW to be ready */
1171 return ice_check_reset(hw);
1172}
1173
1174/**
1175 * ice_copy_rxq_ctx_to_hw
1176 * @hw: pointer to the hardware structure
1177 * @ice_rxq_ctx: pointer to the rxq context
1178 * @rxq_index: the index of the Rx queue
1179 *
1180 * Copies rxq context from dense structure to HW register space
1181 */
1182static enum ice_status
1183ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1184{
1185 u8 i;
1186
1187 if (!ice_rxq_ctx)
1188 return ICE_ERR_BAD_PTR;
1189
1190 if (rxq_index > QRX_CTRL_MAX_INDEX)
1191 return ICE_ERR_PARAM;
1192
1193 /* Copy each dword separately to HW */
1194 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1195 wr32(hw, QRX_CONTEXT(i, rxq_index),
1196 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1197
1198 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1199 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1200 }
1201
1202 return 0;
1203}
1204
1205/* LAN Rx Queue Context */
1206static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1207 /* Field Width LSB */
1208 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1209 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1210 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1211 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1212 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1213 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1214 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1215 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1216 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1217 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1218 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1219 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1220 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1221 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1222 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1223 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1224 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1225 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1226 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1227 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1228 { 0 }
1229};
1230
1231/**
1232 * ice_write_rxq_ctx
1233 * @hw: pointer to the hardware structure
1234 * @rlan_ctx: pointer to the rxq context
1235 * @rxq_index: the index of the Rx queue
1236 *
1237 * Converts rxq context from sparse to dense structure and then writes
1238 * it to HW register space and enables the hardware to prefetch descriptors
1239 * instead of only fetching them on demand
1240 */
1241enum ice_status
1242ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1243 u32 rxq_index)
1244{
1245 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1246
1247 if (!rlan_ctx)
1248 return ICE_ERR_BAD_PTR;
1249
1250 rlan_ctx->prefena = 1;
1251
1252 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1253 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1254}
1255
1256/* LAN Tx Queue Context */
1257const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1258 /* Field Width LSB */
1259 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1260 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1261 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1262 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1263 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1264 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1265 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1266 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1267 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1268 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1269 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1270 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1271 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1272 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1273 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1274 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1275 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1276 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1277 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1278 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1279 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1280 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1281 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1282 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1283 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1284 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1285 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1286 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1287 { 0 }
1288};
1289
1290/* FW Admin Queue command wrappers */
1291
1292/* Software lock/mutex that is meant to be held while the Global Config Lock
1293 * in firmware is acquired by the software to prevent most (but not all) types
1294 * of AQ commands from being sent to FW
1295 */
1296DEFINE_MUTEX(ice_global_cfg_lock_sw);
1297
1298/**
1299 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1300 * @hw: pointer to the HW struct
1301 * @desc: descriptor describing the command
1302 * @buf: buffer to use for indirect commands (NULL for direct commands)
1303 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1304 * @cd: pointer to command details structure
1305 *
1306 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1307 */
1308enum ice_status
1309ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1310 u16 buf_size, struct ice_sq_cd *cd)
1311{
1312 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1313 bool lock_acquired = false;
1314 enum ice_status status;
1315
1316 /* When a package download is in process (i.e. when the firmware's
1317 * Global Configuration Lock resource is held), only the Download
1318 * Package, Get Version, Get Package Info List and Release Resource
1319 * (with resource ID set to Global Config Lock) AdminQ commands are
1320 * allowed; all others must block until the package download completes
1321 * and the Global Config Lock is released. See also
1322 * ice_acquire_global_cfg_lock().
1323 */
1324 switch (le16_to_cpu(desc->opcode)) {
1325 case ice_aqc_opc_download_pkg:
1326 case ice_aqc_opc_get_pkg_info_list:
1327 case ice_aqc_opc_get_ver:
1328 break;
1329 case ice_aqc_opc_release_res:
1330 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1331 break;
1332 fallthrough;
1333 default:
1334 mutex_lock(&ice_global_cfg_lock_sw);
1335 lock_acquired = true;
1336 break;
1337 }
1338
1339 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1340 if (lock_acquired)
1341 mutex_unlock(&ice_global_cfg_lock_sw);
1342
1343 return status;
1344}
1345
1346/**
1347 * ice_aq_get_fw_ver
1348 * @hw: pointer to the HW struct
1349 * @cd: pointer to command details structure or NULL
1350 *
1351 * Get the firmware version (0x0001) from the admin queue commands
1352 */
1353enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1354{
1355 struct ice_aqc_get_ver *resp;
1356 struct ice_aq_desc desc;
1357 enum ice_status status;
1358
1359 resp = &desc.params.get_ver;
1360
1361 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1362
1363 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1364
1365 if (!status) {
1366 hw->fw_branch = resp->fw_branch;
1367 hw->fw_maj_ver = resp->fw_major;
1368 hw->fw_min_ver = resp->fw_minor;
1369 hw->fw_patch = resp->fw_patch;
1370 hw->fw_build = le32_to_cpu(resp->fw_build);
1371 hw->api_branch = resp->api_branch;
1372 hw->api_maj_ver = resp->api_major;
1373 hw->api_min_ver = resp->api_minor;
1374 hw->api_patch = resp->api_patch;
1375 }
1376
1377 return status;
1378}
1379
1380/**
1381 * ice_aq_send_driver_ver
1382 * @hw: pointer to the HW struct
1383 * @dv: driver's major, minor version
1384 * @cd: pointer to command details structure or NULL
1385 *
1386 * Send the driver version (0x0002) to the firmware
1387 */
1388enum ice_status
1389ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1390 struct ice_sq_cd *cd)
1391{
1392 struct ice_aqc_driver_ver *cmd;
1393 struct ice_aq_desc desc;
1394 u16 len;
1395
1396 cmd = &desc.params.driver_ver;
1397
1398 if (!dv)
1399 return ICE_ERR_PARAM;
1400
1401 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1402
1403 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1404 cmd->major_ver = dv->major_ver;
1405 cmd->minor_ver = dv->minor_ver;
1406 cmd->build_ver = dv->build_ver;
1407 cmd->subbuild_ver = dv->subbuild_ver;
1408
1409 len = 0;
1410 while (len < sizeof(dv->driver_string) &&
1411 isascii(dv->driver_string[len]) && dv->driver_string[len])
1412 len++;
1413
1414 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1415}
1416
1417/**
1418 * ice_aq_q_shutdown
1419 * @hw: pointer to the HW struct
1420 * @unloading: is the driver unloading itself
1421 *
1422 * Tell the Firmware that we're shutting down the AdminQ and whether
1423 * or not the driver is unloading as well (0x0003).
1424 */
1425enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1426{
1427 struct ice_aqc_q_shutdown *cmd;
1428 struct ice_aq_desc desc;
1429
1430 cmd = &desc.params.q_shutdown;
1431
1432 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1433
1434 if (unloading)
1435 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1436
1437 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1438}
1439
1440/**
1441 * ice_aq_req_res
1442 * @hw: pointer to the HW struct
1443 * @res: resource ID
1444 * @access: access type
1445 * @sdp_number: resource number
1446 * @timeout: the maximum time in ms that the driver may hold the resource
1447 * @cd: pointer to command details structure or NULL
1448 *
1449 * Requests common resource using the admin queue commands (0x0008).
1450 * When attempting to acquire the Global Config Lock, the driver can
1451 * learn of three states:
1452 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1453 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1454 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1455 * successfully downloaded the package; the driver does
1456 * not have to download the package and can continue
1457 * loading
1458 *
1459 * Note that if the caller is in an acquire lock, perform action, release lock
1460 * phase of operation, it is possible that the FW may detect a timeout and issue
1461 * a CORER. In this case, the driver will receive a CORER interrupt and will
1462 * have to determine its cause. The calling thread that is handling this flow
1463 * will likely get an error propagated back to it indicating the Download
1464 * Package, Update Package or the Release Resource AQ commands timed out.
1465 */
1466static enum ice_status
1467ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1468 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1469 struct ice_sq_cd *cd)
1470{
1471 struct ice_aqc_req_res *cmd_resp;
1472 struct ice_aq_desc desc;
1473 enum ice_status status;
1474
1475 cmd_resp = &desc.params.res_owner;
1476
1477 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1478
1479 cmd_resp->res_id = cpu_to_le16(res);
1480 cmd_resp->access_type = cpu_to_le16(access);
1481 cmd_resp->res_number = cpu_to_le32(sdp_number);
1482 cmd_resp->timeout = cpu_to_le32(*timeout);
1483 *timeout = 0;
1484
1485 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1486
1487 /* The completion specifies the maximum time in ms that the driver
1488 * may hold the resource in the Timeout field.
1489 */
1490
1491 /* Global config lock response utilizes an additional status field.
1492 *
1493 * If the Global config lock resource is held by some other driver, the
1494 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1495 * and the timeout field indicates the maximum time the current owner
1496 * of the resource has to free it.
1497 */
1498 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1499 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1500 *timeout = le32_to_cpu(cmd_resp->timeout);
1501 return 0;
1502 } else if (le16_to_cpu(cmd_resp->status) ==
1503 ICE_AQ_RES_GLBL_IN_PROG) {
1504 *timeout = le32_to_cpu(cmd_resp->timeout);
1505 return ICE_ERR_AQ_ERROR;
1506 } else if (le16_to_cpu(cmd_resp->status) ==
1507 ICE_AQ_RES_GLBL_DONE) {
1508 return ICE_ERR_AQ_NO_WORK;
1509 }
1510
1511 /* invalid FW response, force a timeout immediately */
1512 *timeout = 0;
1513 return ICE_ERR_AQ_ERROR;
1514 }
1515
1516 /* If the resource is held by some other driver, the command completes
1517 * with a busy return value and the timeout field indicates the maximum
1518 * time the current owner of the resource has to free it.
1519 */
1520 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1521 *timeout = le32_to_cpu(cmd_resp->timeout);
1522
1523 return status;
1524}
1525
1526/**
1527 * ice_aq_release_res
1528 * @hw: pointer to the HW struct
1529 * @res: resource ID
1530 * @sdp_number: resource number
1531 * @cd: pointer to command details structure or NULL
1532 *
1533 * release common resource using the admin queue commands (0x0009)
1534 */
1535static enum ice_status
1536ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1537 struct ice_sq_cd *cd)
1538{
1539 struct ice_aqc_req_res *cmd;
1540 struct ice_aq_desc desc;
1541
1542 cmd = &desc.params.res_owner;
1543
1544 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1545
1546 cmd->res_id = cpu_to_le16(res);
1547 cmd->res_number = cpu_to_le32(sdp_number);
1548
1549 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1550}
1551
1552/**
1553 * ice_acquire_res
1554 * @hw: pointer to the HW structure
1555 * @res: resource ID
1556 * @access: access type (read or write)
1557 * @timeout: timeout in milliseconds
1558 *
1559 * This function will attempt to acquire the ownership of a resource.
1560 */
1561enum ice_status
1562ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1563 enum ice_aq_res_access_type access, u32 timeout)
1564{
1565#define ICE_RES_POLLING_DELAY_MS 10
1566 u32 delay = ICE_RES_POLLING_DELAY_MS;
1567 u32 time_left = timeout;
1568 enum ice_status status;
1569
1570 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1571
1572 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1573 * previously acquired the resource and performed any necessary updates;
1574 * in this case the caller does not obtain the resource and has no
1575 * further work to do.
1576 */
1577 if (status == ICE_ERR_AQ_NO_WORK)
1578 goto ice_acquire_res_exit;
1579
1580 if (status)
1581 ice_debug(hw, ICE_DBG_RES,
1582 "resource %d acquire type %d failed.\n", res, access);
1583
1584 /* If necessary, poll until the current lock owner timeouts */
1585 timeout = time_left;
1586 while (status && timeout && time_left) {
1587 mdelay(delay);
1588 timeout = (timeout > delay) ? timeout - delay : 0;
1589 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1590
1591 if (status == ICE_ERR_AQ_NO_WORK)
1592 /* lock free, but no work to do */
1593 break;
1594
1595 if (!status)
1596 /* lock acquired */
1597 break;
1598 }
1599 if (status && status != ICE_ERR_AQ_NO_WORK)
1600 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1601
1602ice_acquire_res_exit:
1603 if (status == ICE_ERR_AQ_NO_WORK) {
1604 if (access == ICE_RES_WRITE)
1605 ice_debug(hw, ICE_DBG_RES,
1606 "resource indicates no work to do.\n");
1607 else
1608 ice_debug(hw, ICE_DBG_RES,
1609 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1610 }
1611 return status;
1612}
1613
1614/**
1615 * ice_release_res
1616 * @hw: pointer to the HW structure
1617 * @res: resource ID
1618 *
1619 * This function will release a resource using the proper Admin Command.
1620 */
1621void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1622{
1623 enum ice_status status;
1624 u32 total_delay = 0;
1625
1626 status = ice_aq_release_res(hw, res, 0, NULL);
1627
1628 /* there are some rare cases when trying to release the resource
1629 * results in an admin queue timeout, so handle them correctly
1630 */
1631 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1632 (total_delay < hw->adminq.sq_cmd_timeout)) {
1633 mdelay(1);
1634 status = ice_aq_release_res(hw, res, 0, NULL);
1635 total_delay++;
1636 }
1637}
1638
1639/**
1640 * ice_aq_alloc_free_res - command to allocate/free resources
1641 * @hw: pointer to the HW struct
1642 * @num_entries: number of resource entries in buffer
1643 * @buf: Indirect buffer to hold data parameters and response
1644 * @buf_size: size of buffer for indirect commands
1645 * @opc: pass in the command opcode
1646 * @cd: pointer to command details structure or NULL
1647 *
1648 * Helper function to allocate/free resources using the admin queue commands
1649 */
1650enum ice_status
1651ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1652 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1653 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1654{
1655 struct ice_aqc_alloc_free_res_cmd *cmd;
1656 struct ice_aq_desc desc;
1657
1658 cmd = &desc.params.sw_res_ctrl;
1659
1660 if (!buf)
1661 return ICE_ERR_PARAM;
1662
1663 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1664 return ICE_ERR_PARAM;
1665
1666 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1667
1668 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1669
1670 cmd->num_entries = cpu_to_le16(num_entries);
1671
1672 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1673}
1674
1675/**
1676 * ice_alloc_hw_res - allocate resource
1677 * @hw: pointer to the HW struct
1678 * @type: type of resource
1679 * @num: number of resources to allocate
1680 * @btm: allocate from bottom
1681 * @res: pointer to array that will receive the resources
1682 */
1683enum ice_status
1684ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1685{
1686 struct ice_aqc_alloc_free_res_elem *buf;
1687 enum ice_status status;
1688 u16 buf_len;
1689
1690 buf_len = struct_size(buf, elem, num);
1691 buf = kzalloc(buf_len, GFP_KERNEL);
1692 if (!buf)
1693 return ICE_ERR_NO_MEMORY;
1694
1695 /* Prepare buffer to allocate resource. */
1696 buf->num_elems = cpu_to_le16(num);
1697 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1698 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1699 if (btm)
1700 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1701
1702 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1703 ice_aqc_opc_alloc_res, NULL);
1704 if (status)
1705 goto ice_alloc_res_exit;
1706
1707 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1708
1709ice_alloc_res_exit:
1710 kfree(buf);
1711 return status;
1712}
1713
1714/**
1715 * ice_free_hw_res - free allocated HW resource
1716 * @hw: pointer to the HW struct
1717 * @type: type of resource to free
1718 * @num: number of resources
1719 * @res: pointer to array that contains the resources to free
1720 */
1721enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1722{
1723 struct ice_aqc_alloc_free_res_elem *buf;
1724 enum ice_status status;
1725 u16 buf_len;
1726
1727 buf_len = struct_size(buf, elem, num);
1728 buf = kzalloc(buf_len, GFP_KERNEL);
1729 if (!buf)
1730 return ICE_ERR_NO_MEMORY;
1731
1732 /* Prepare buffer to free resource. */
1733 buf->num_elems = cpu_to_le16(num);
1734 buf->res_type = cpu_to_le16(type);
1735 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1736
1737 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1738 ice_aqc_opc_free_res, NULL);
1739 if (status)
1740 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1741
1742 kfree(buf);
1743 return status;
1744}
1745
1746/**
1747 * ice_get_num_per_func - determine number of resources per PF
1748 * @hw: pointer to the HW structure
1749 * @max: value to be evenly split between each PF
1750 *
1751 * Determine the number of valid functions by going through the bitmap returned
1752 * from parsing capabilities and use this to calculate the number of resources
1753 * per PF based on the max value passed in.
1754 */
1755static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1756{
1757 u8 funcs;
1758
1759#define ICE_CAPS_VALID_FUNCS_M 0xFF
1760 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1761 ICE_CAPS_VALID_FUNCS_M);
1762
1763 if (!funcs)
1764 return 0;
1765
1766 return max / funcs;
1767}
1768
1769/**
1770 * ice_parse_common_caps - parse common device/function capabilities
1771 * @hw: pointer to the HW struct
1772 * @caps: pointer to common capabilities structure
1773 * @elem: the capability element to parse
1774 * @prefix: message prefix for tracing capabilities
1775 *
1776 * Given a capability element, extract relevant details into the common
1777 * capability structure.
1778 *
1779 * Returns: true if the capability matches one of the common capability ids,
1780 * false otherwise.
1781 */
1782static bool
1783ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1784 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1785{
1786 u32 logical_id = le32_to_cpu(elem->logical_id);
1787 u32 phys_id = le32_to_cpu(elem->phys_id);
1788 u32 number = le32_to_cpu(elem->number);
1789 u16 cap = le16_to_cpu(elem->cap);
1790 bool found = true;
1791
1792 switch (cap) {
1793 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1794 caps->valid_functions = number;
1795 ice_debug(hw, ICE_DBG_INIT,
1796 "%s: valid_functions (bitmap) = %d\n", prefix,
1797 caps->valid_functions);
1798 break;
1799 case ICE_AQC_CAPS_SRIOV:
1800 caps->sr_iov_1_1 = (number == 1);
1801 ice_debug(hw, ICE_DBG_INIT,
1802 "%s: sr_iov_1_1 = %d\n", prefix,
1803 caps->sr_iov_1_1);
1804 break;
1805 case ICE_AQC_CAPS_DCB:
1806 caps->dcb = (number == 1);
1807 caps->active_tc_bitmap = logical_id;
1808 caps->maxtc = phys_id;
1809 ice_debug(hw, ICE_DBG_INIT,
1810 "%s: dcb = %d\n", prefix, caps->dcb);
1811 ice_debug(hw, ICE_DBG_INIT,
1812 "%s: active_tc_bitmap = %d\n", prefix,
1813 caps->active_tc_bitmap);
1814 ice_debug(hw, ICE_DBG_INIT,
1815 "%s: maxtc = %d\n", prefix, caps->maxtc);
1816 break;
1817 case ICE_AQC_CAPS_RSS:
1818 caps->rss_table_size = number;
1819 caps->rss_table_entry_width = logical_id;
1820 ice_debug(hw, ICE_DBG_INIT,
1821 "%s: rss_table_size = %d\n", prefix,
1822 caps->rss_table_size);
1823 ice_debug(hw, ICE_DBG_INIT,
1824 "%s: rss_table_entry_width = %d\n", prefix,
1825 caps->rss_table_entry_width);
1826 break;
1827 case ICE_AQC_CAPS_RXQS:
1828 caps->num_rxq = number;
1829 caps->rxq_first_id = phys_id;
1830 ice_debug(hw, ICE_DBG_INIT,
1831 "%s: num_rxq = %d\n", prefix,
1832 caps->num_rxq);
1833 ice_debug(hw, ICE_DBG_INIT,
1834 "%s: rxq_first_id = %d\n", prefix,
1835 caps->rxq_first_id);
1836 break;
1837 case ICE_AQC_CAPS_TXQS:
1838 caps->num_txq = number;
1839 caps->txq_first_id = phys_id;
1840 ice_debug(hw, ICE_DBG_INIT,
1841 "%s: num_txq = %d\n", prefix,
1842 caps->num_txq);
1843 ice_debug(hw, ICE_DBG_INIT,
1844 "%s: txq_first_id = %d\n", prefix,
1845 caps->txq_first_id);
1846 break;
1847 case ICE_AQC_CAPS_MSIX:
1848 caps->num_msix_vectors = number;
1849 caps->msix_vector_first_id = phys_id;
1850 ice_debug(hw, ICE_DBG_INIT,
1851 "%s: num_msix_vectors = %d\n", prefix,
1852 caps->num_msix_vectors);
1853 ice_debug(hw, ICE_DBG_INIT,
1854 "%s: msix_vector_first_id = %d\n", prefix,
1855 caps->msix_vector_first_id);
1856 break;
1857 case ICE_AQC_CAPS_PENDING_NVM_VER:
1858 caps->nvm_update_pending_nvm = true;
1859 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1860 break;
1861 case ICE_AQC_CAPS_PENDING_OROM_VER:
1862 caps->nvm_update_pending_orom = true;
1863 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
1864 break;
1865 case ICE_AQC_CAPS_PENDING_NET_VER:
1866 caps->nvm_update_pending_netlist = true;
1867 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
1868 break;
1869 case ICE_AQC_CAPS_NVM_MGMT:
1870 caps->nvm_unified_update =
1871 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1872 true : false;
1873 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1874 caps->nvm_unified_update);
1875 break;
1876 case ICE_AQC_CAPS_MAX_MTU:
1877 caps->max_mtu = number;
1878 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1879 prefix, caps->max_mtu);
1880 break;
1881 default:
1882 /* Not one of the recognized common capabilities */
1883 found = false;
1884 }
1885
1886 return found;
1887}
1888
1889/**
1890 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1891 * @hw: pointer to the HW structure
1892 * @caps: pointer to capabilities structure to fix
1893 *
1894 * Re-calculate the capabilities that are dependent on the number of physical
1895 * ports; i.e. some features are not supported or function differently on
1896 * devices with more than 4 ports.
1897 */
1898static void
1899ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1900{
1901 /* This assumes device capabilities are always scanned before function
1902 * capabilities during the initialization flow.
1903 */
1904 if (hw->dev_caps.num_funcs > 4) {
1905 /* Max 4 TCs per port */
1906 caps->maxtc = 4;
1907 ice_debug(hw, ICE_DBG_INIT,
1908 "reducing maxtc to %d (based on #ports)\n",
1909 caps->maxtc);
1910 }
1911}
1912
1913/**
1914 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
1915 * @hw: pointer to the HW struct
1916 * @func_p: pointer to function capabilities structure
1917 * @cap: pointer to the capability element to parse
1918 *
1919 * Extract function capabilities for ICE_AQC_CAPS_VF.
1920 */
1921static void
1922ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1923 struct ice_aqc_list_caps_elem *cap)
1924{
1925 u32 logical_id = le32_to_cpu(cap->logical_id);
1926 u32 number = le32_to_cpu(cap->number);
1927
1928 func_p->num_allocd_vfs = number;
1929 func_p->vf_base_id = logical_id;
1930 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
1931 func_p->num_allocd_vfs);
1932 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
1933 func_p->vf_base_id);
1934}
1935
1936/**
1937 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
1938 * @hw: pointer to the HW struct
1939 * @func_p: pointer to function capabilities structure
1940 * @cap: pointer to the capability element to parse
1941 *
1942 * Extract function capabilities for ICE_AQC_CAPS_VSI.
1943 */
1944static void
1945ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1946 struct ice_aqc_list_caps_elem *cap)
1947{
1948 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
1949 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
1950 le32_to_cpu(cap->number));
1951 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
1952 func_p->guar_num_vsi);
1953}
1954
1955/**
1956 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
1957 * @hw: pointer to the HW struct
1958 * @func_p: pointer to function capabilities structure
1959 *
1960 * Extract function capabilities for ICE_AQC_CAPS_FD.
1961 */
1962static void
1963ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
1964{
1965 u32 reg_val, val;
1966
1967 reg_val = rd32(hw, GLQF_FD_SIZE);
1968 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1969 GLQF_FD_SIZE_FD_GSIZE_S;
1970 func_p->fd_fltr_guar =
1971 ice_get_num_per_func(hw, val);
1972 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1973 GLQF_FD_SIZE_FD_BSIZE_S;
1974 func_p->fd_fltr_best_effort = val;
1975
1976 ice_debug(hw, ICE_DBG_INIT,
1977 "func caps: fd_fltr_guar = %d\n",
1978 func_p->fd_fltr_guar);
1979 ice_debug(hw, ICE_DBG_INIT,
1980 "func caps: fd_fltr_best_effort = %d\n",
1981 func_p->fd_fltr_best_effort);
1982}
1983
1984/**
1985 * ice_parse_func_caps - Parse function capabilities
1986 * @hw: pointer to the HW struct
1987 * @func_p: pointer to function capabilities structure
1988 * @buf: buffer containing the function capability records
1989 * @cap_count: the number of capabilities
1990 *
1991 * Helper function to parse function (0x000A) capabilities list. For
1992 * capabilities shared between device and function, this relies on
1993 * ice_parse_common_caps.
1994 *
1995 * Loop through the list of provided capabilities and extract the relevant
1996 * data into the function capabilities structured.
1997 */
1998static void
1999ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2000 void *buf, u32 cap_count)
2001{
2002 struct ice_aqc_list_caps_elem *cap_resp;
2003 u32 i;
2004
2005 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2006
2007 memset(func_p, 0, sizeof(*func_p));
2008
2009 for (i = 0; i < cap_count; i++) {
2010 u16 cap = le16_to_cpu(cap_resp[i].cap);
2011 bool found;
2012
2013 found = ice_parse_common_caps(hw, &func_p->common_cap,
2014 &cap_resp[i], "func caps");
2015
2016 switch (cap) {
2017 case ICE_AQC_CAPS_VF:
2018 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2019 break;
2020 case ICE_AQC_CAPS_VSI:
2021 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2022 break;
2023 case ICE_AQC_CAPS_FD:
2024 ice_parse_fdir_func_caps(hw, func_p);
2025 break;
2026 default:
2027 /* Don't list common capabilities as unknown */
2028 if (!found)
2029 ice_debug(hw, ICE_DBG_INIT,
2030 "func caps: unknown capability[%d]: 0x%x\n",
2031 i, cap);
2032 break;
2033 }
2034 }
2035
2036 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2037}
2038
2039/**
2040 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2041 * @hw: pointer to the HW struct
2042 * @dev_p: pointer to device capabilities structure
2043 * @cap: capability element to parse
2044 *
2045 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2046 */
2047static void
2048ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2049 struct ice_aqc_list_caps_elem *cap)
2050{
2051 u32 number = le32_to_cpu(cap->number);
2052
2053 dev_p->num_funcs = hweight32(number);
2054 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2055 dev_p->num_funcs);
2056}
2057
2058/**
2059 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2060 * @hw: pointer to the HW struct
2061 * @dev_p: pointer to device capabilities structure
2062 * @cap: capability element to parse
2063 *
2064 * Parse ICE_AQC_CAPS_VF for device capabilities.
2065 */
2066static void
2067ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2068 struct ice_aqc_list_caps_elem *cap)
2069{
2070 u32 number = le32_to_cpu(cap->number);
2071
2072 dev_p->num_vfs_exposed = number;
2073 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2074 dev_p->num_vfs_exposed);
2075}
2076
2077/**
2078 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2079 * @hw: pointer to the HW struct
2080 * @dev_p: pointer to device capabilities structure
2081 * @cap: capability element to parse
2082 *
2083 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2084 */
2085static void
2086ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2087 struct ice_aqc_list_caps_elem *cap)
2088{
2089 u32 number = le32_to_cpu(cap->number);
2090
2091 dev_p->num_vsi_allocd_to_host = number;
2092 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2093 dev_p->num_vsi_allocd_to_host);
2094}
2095
2096/**
2097 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2098 * @hw: pointer to the HW struct
2099 * @dev_p: pointer to device capabilities structure
2100 * @cap: capability element to parse
2101 *
2102 * Parse ICE_AQC_CAPS_FD for device capabilities.
2103 */
2104static void
2105ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2106 struct ice_aqc_list_caps_elem *cap)
2107{
2108 u32 number = le32_to_cpu(cap->number);
2109
2110 dev_p->num_flow_director_fltr = number;
2111 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2112 dev_p->num_flow_director_fltr);
2113}
2114
2115/**
2116 * ice_parse_dev_caps - Parse device capabilities
2117 * @hw: pointer to the HW struct
2118 * @dev_p: pointer to device capabilities structure
2119 * @buf: buffer containing the device capability records
2120 * @cap_count: the number of capabilities
2121 *
2122 * Helper device to parse device (0x000B) capabilities list. For
2123 * capabilities shared between device and function, this relies on
2124 * ice_parse_common_caps.
2125 *
2126 * Loop through the list of provided capabilities and extract the relevant
2127 * data into the device capabilities structured.
2128 */
2129static void
2130ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2131 void *buf, u32 cap_count)
2132{
2133 struct ice_aqc_list_caps_elem *cap_resp;
2134 u32 i;
2135
2136 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2137
2138 memset(dev_p, 0, sizeof(*dev_p));
2139
2140 for (i = 0; i < cap_count; i++) {
2141 u16 cap = le16_to_cpu(cap_resp[i].cap);
2142 bool found;
2143
2144 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2145 &cap_resp[i], "dev caps");
2146
2147 switch (cap) {
2148 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2149 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2150 break;
2151 case ICE_AQC_CAPS_VF:
2152 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2153 break;
2154 case ICE_AQC_CAPS_VSI:
2155 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2156 break;
2157 case ICE_AQC_CAPS_FD:
2158 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2159 break;
2160 default:
2161 /* Don't list common capabilities as unknown */
2162 if (!found)
2163 ice_debug(hw, ICE_DBG_INIT,
2164 "dev caps: unknown capability[%d]: 0x%x\n",
2165 i, cap);
2166 break;
2167 }
2168 }
2169
2170 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2171}
2172
2173/**
2174 * ice_aq_list_caps - query function/device capabilities
2175 * @hw: pointer to the HW struct
2176 * @buf: a buffer to hold the capabilities
2177 * @buf_size: size of the buffer
2178 * @cap_count: if not NULL, set to the number of capabilities reported
2179 * @opc: capabilities type to discover, device or function
2180 * @cd: pointer to command details structure or NULL
2181 *
2182 * Get the function (0x000A) or device (0x000B) capabilities description from
2183 * firmware and store it in the buffer.
2184 *
2185 * If the cap_count pointer is not NULL, then it is set to the number of
2186 * capabilities firmware will report. Note that if the buffer size is too
2187 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2188 * cap_count will still be updated in this case. It is recommended that the
2189 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2190 * firmware could return) to avoid this.
2191 */
2192enum ice_status
2193ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2194 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2195{
2196 struct ice_aqc_list_caps *cmd;
2197 struct ice_aq_desc desc;
2198 enum ice_status status;
2199
2200 cmd = &desc.params.get_cap;
2201
2202 if (opc != ice_aqc_opc_list_func_caps &&
2203 opc != ice_aqc_opc_list_dev_caps)
2204 return ICE_ERR_PARAM;
2205
2206 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2207 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2208
2209 if (cap_count)
2210 *cap_count = le32_to_cpu(cmd->count);
2211
2212 return status;
2213}
2214
2215/**
2216 * ice_discover_dev_caps - Read and extract device capabilities
2217 * @hw: pointer to the hardware structure
2218 * @dev_caps: pointer to device capabilities structure
2219 *
2220 * Read the device capabilities and extract them into the dev_caps structure
2221 * for later use.
2222 */
2223enum ice_status
2224ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2225{
2226 enum ice_status status;
2227 u32 cap_count = 0;
2228 void *cbuf;
2229
2230 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2231 if (!cbuf)
2232 return ICE_ERR_NO_MEMORY;
2233
2234 /* Although the driver doesn't know the number of capabilities the
2235 * device will return, we can simply send a 4KB buffer, the maximum
2236 * possible size that firmware can return.
2237 */
2238 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2239
2240 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2241 ice_aqc_opc_list_dev_caps, NULL);
2242 if (!status)
2243 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2244 kfree(cbuf);
2245
2246 return status;
2247}
2248
2249/**
2250 * ice_discover_func_caps - Read and extract function capabilities
2251 * @hw: pointer to the hardware structure
2252 * @func_caps: pointer to function capabilities structure
2253 *
2254 * Read the function capabilities and extract them into the func_caps structure
2255 * for later use.
2256 */
2257static enum ice_status
2258ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2259{
2260 enum ice_status status;
2261 u32 cap_count = 0;
2262 void *cbuf;
2263
2264 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2265 if (!cbuf)
2266 return ICE_ERR_NO_MEMORY;
2267
2268 /* Although the driver doesn't know the number of capabilities the
2269 * device will return, we can simply send a 4KB buffer, the maximum
2270 * possible size that firmware can return.
2271 */
2272 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2273
2274 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2275 ice_aqc_opc_list_func_caps, NULL);
2276 if (!status)
2277 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2278 kfree(cbuf);
2279
2280 return status;
2281}
2282
2283/**
2284 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2285 * @hw: pointer to the hardware structure
2286 */
2287void ice_set_safe_mode_caps(struct ice_hw *hw)
2288{
2289 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2290 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2291 struct ice_hw_common_caps cached_caps;
2292 u32 num_funcs;
2293
2294 /* cache some func_caps values that should be restored after memset */
2295 cached_caps = func_caps->common_cap;
2296
2297 /* unset func capabilities */
2298 memset(func_caps, 0, sizeof(*func_caps));
2299
2300#define ICE_RESTORE_FUNC_CAP(name) \
2301 func_caps->common_cap.name = cached_caps.name
2302
2303 /* restore cached values */
2304 ICE_RESTORE_FUNC_CAP(valid_functions);
2305 ICE_RESTORE_FUNC_CAP(txq_first_id);
2306 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2307 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2308 ICE_RESTORE_FUNC_CAP(max_mtu);
2309 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2310 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2311 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2312 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2313
2314 /* one Tx and one Rx queue in safe mode */
2315 func_caps->common_cap.num_rxq = 1;
2316 func_caps->common_cap.num_txq = 1;
2317
2318 /* two MSIX vectors, one for traffic and one for misc causes */
2319 func_caps->common_cap.num_msix_vectors = 2;
2320 func_caps->guar_num_vsi = 1;
2321
2322 /* cache some dev_caps values that should be restored after memset */
2323 cached_caps = dev_caps->common_cap;
2324 num_funcs = dev_caps->num_funcs;
2325
2326 /* unset dev capabilities */
2327 memset(dev_caps, 0, sizeof(*dev_caps));
2328
2329#define ICE_RESTORE_DEV_CAP(name) \
2330 dev_caps->common_cap.name = cached_caps.name
2331
2332 /* restore cached values */
2333 ICE_RESTORE_DEV_CAP(valid_functions);
2334 ICE_RESTORE_DEV_CAP(txq_first_id);
2335 ICE_RESTORE_DEV_CAP(rxq_first_id);
2336 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2337 ICE_RESTORE_DEV_CAP(max_mtu);
2338 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2339 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2340 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2341 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2342 dev_caps->num_funcs = num_funcs;
2343
2344 /* one Tx and one Rx queue per function in safe mode */
2345 dev_caps->common_cap.num_rxq = num_funcs;
2346 dev_caps->common_cap.num_txq = num_funcs;
2347
2348 /* two MSIX vectors per function */
2349 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2350}
2351
2352/**
2353 * ice_get_caps - get info about the HW
2354 * @hw: pointer to the hardware structure
2355 */
2356enum ice_status ice_get_caps(struct ice_hw *hw)
2357{
2358 enum ice_status status;
2359
2360 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2361 if (status)
2362 return status;
2363
2364 return ice_discover_func_caps(hw, &hw->func_caps);
2365}
2366
2367/**
2368 * ice_aq_manage_mac_write - manage MAC address write command
2369 * @hw: pointer to the HW struct
2370 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2371 * @flags: flags to control write behavior
2372 * @cd: pointer to command details structure or NULL
2373 *
2374 * This function is used to write MAC address to the NVM (0x0108).
2375 */
2376enum ice_status
2377ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2378 struct ice_sq_cd *cd)
2379{
2380 struct ice_aqc_manage_mac_write *cmd;
2381 struct ice_aq_desc desc;
2382
2383 cmd = &desc.params.mac_write;
2384 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2385
2386 cmd->flags = flags;
2387 ether_addr_copy(cmd->mac_addr, mac_addr);
2388
2389 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2390}
2391
2392/**
2393 * ice_aq_clear_pxe_mode
2394 * @hw: pointer to the HW struct
2395 *
2396 * Tell the firmware that the driver is taking over from PXE (0x0110).
2397 */
2398static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2399{
2400 struct ice_aq_desc desc;
2401
2402 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2403 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2404
2405 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2406}
2407
2408/**
2409 * ice_clear_pxe_mode - clear pxe operations mode
2410 * @hw: pointer to the HW struct
2411 *
2412 * Make sure all PXE mode settings are cleared, including things
2413 * like descriptor fetch/write-back mode.
2414 */
2415void ice_clear_pxe_mode(struct ice_hw *hw)
2416{
2417 if (ice_check_sq_alive(hw, &hw->adminq))
2418 ice_aq_clear_pxe_mode(hw);
2419}
2420
2421/**
2422 * ice_get_link_speed_based_on_phy_type - returns link speed
2423 * @phy_type_low: lower part of phy_type
2424 * @phy_type_high: higher part of phy_type
2425 *
2426 * This helper function will convert an entry in PHY type structure
2427 * [phy_type_low, phy_type_high] to its corresponding link speed.
2428 * Note: In the structure of [phy_type_low, phy_type_high], there should
2429 * be one bit set, as this function will convert one PHY type to its
2430 * speed.
2431 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2432 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2433 */
2434static u16
2435ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2436{
2437 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2438 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2439
2440 switch (phy_type_low) {
2441 case ICE_PHY_TYPE_LOW_100BASE_TX:
2442 case ICE_PHY_TYPE_LOW_100M_SGMII:
2443 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2444 break;
2445 case ICE_PHY_TYPE_LOW_1000BASE_T:
2446 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2447 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2448 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2449 case ICE_PHY_TYPE_LOW_1G_SGMII:
2450 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2451 break;
2452 case ICE_PHY_TYPE_LOW_2500BASE_T:
2453 case ICE_PHY_TYPE_LOW_2500BASE_X:
2454 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2455 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2456 break;
2457 case ICE_PHY_TYPE_LOW_5GBASE_T:
2458 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2459 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2460 break;
2461 case ICE_PHY_TYPE_LOW_10GBASE_T:
2462 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2463 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2464 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2465 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2466 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2467 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2468 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2469 break;
2470 case ICE_PHY_TYPE_LOW_25GBASE_T:
2471 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2472 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2473 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2474 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2475 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2476 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2477 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2478 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2479 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2480 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2481 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2482 break;
2483 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2484 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2485 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2486 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2487 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2488 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2489 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2490 break;
2491 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2492 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2493 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2494 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2495 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2496 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2497 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2498 case ICE_PHY_TYPE_LOW_50G_AUI2:
2499 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2500 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2501 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2502 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2503 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2504 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2505 case ICE_PHY_TYPE_LOW_50G_AUI1:
2506 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2507 break;
2508 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2509 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2510 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2511 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2512 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2513 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2514 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2515 case ICE_PHY_TYPE_LOW_100G_AUI4:
2516 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2517 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2518 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2519 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2520 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2521 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2522 break;
2523 default:
2524 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2525 break;
2526 }
2527
2528 switch (phy_type_high) {
2529 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2530 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2531 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2532 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2533 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2534 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2535 break;
2536 default:
2537 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2538 break;
2539 }
2540
2541 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2542 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2543 return ICE_AQ_LINK_SPEED_UNKNOWN;
2544 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2545 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2546 return ICE_AQ_LINK_SPEED_UNKNOWN;
2547 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2548 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2549 return speed_phy_type_low;
2550 else
2551 return speed_phy_type_high;
2552}
2553
2554/**
2555 * ice_update_phy_type
2556 * @phy_type_low: pointer to the lower part of phy_type
2557 * @phy_type_high: pointer to the higher part of phy_type
2558 * @link_speeds_bitmap: targeted link speeds bitmap
2559 *
2560 * Note: For the link_speeds_bitmap structure, you can check it at
2561 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2562 * link_speeds_bitmap include multiple speeds.
2563 *
2564 * Each entry in this [phy_type_low, phy_type_high] structure will
2565 * present a certain link speed. This helper function will turn on bits
2566 * in [phy_type_low, phy_type_high] structure based on the value of
2567 * link_speeds_bitmap input parameter.
2568 */
2569void
2570ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2571 u16 link_speeds_bitmap)
2572{
2573 u64 pt_high;
2574 u64 pt_low;
2575 int index;
2576 u16 speed;
2577
2578 /* We first check with low part of phy_type */
2579 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2580 pt_low = BIT_ULL(index);
2581 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2582
2583 if (link_speeds_bitmap & speed)
2584 *phy_type_low |= BIT_ULL(index);
2585 }
2586
2587 /* We then check with high part of phy_type */
2588 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2589 pt_high = BIT_ULL(index);
2590 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2591
2592 if (link_speeds_bitmap & speed)
2593 *phy_type_high |= BIT_ULL(index);
2594 }
2595}
2596
2597/**
2598 * ice_aq_set_phy_cfg
2599 * @hw: pointer to the HW struct
2600 * @pi: port info structure of the interested logical port
2601 * @cfg: structure with PHY configuration data to be set
2602 * @cd: pointer to command details structure or NULL
2603 *
2604 * Set the various PHY configuration parameters supported on the Port.
2605 * One or more of the Set PHY config parameters may be ignored in an MFP
2606 * mode as the PF may not have the privilege to set some of the PHY Config
2607 * parameters. This status will be indicated by the command response (0x0601).
2608 */
2609enum ice_status
2610ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2611 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2612{
2613 struct ice_aq_desc desc;
2614 enum ice_status status;
2615
2616 if (!cfg)
2617 return ICE_ERR_PARAM;
2618
2619 /* Ensure that only valid bits of cfg->caps can be turned on. */
2620 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2621 ice_debug(hw, ICE_DBG_PHY,
2622 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2623 cfg->caps);
2624
2625 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2626 }
2627
2628 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2629 desc.params.set_phy.lport_num = pi->lport;
2630 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2631
2632 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2633 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2634 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2635 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2636 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2637 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2638 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2639 cfg->low_power_ctrl_an);
2640 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2641 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2642 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2643 cfg->link_fec_opt);
2644
2645 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2646 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2647 status = 0;
2648
2649 if (!status)
2650 pi->phy.curr_user_phy_cfg = *cfg;
2651
2652 return status;
2653}
2654
2655/**
2656 * ice_update_link_info - update status of the HW network link
2657 * @pi: port info structure of the interested logical port
2658 */
2659enum ice_status ice_update_link_info(struct ice_port_info *pi)
2660{
2661 struct ice_link_status *li;
2662 enum ice_status status;
2663
2664 if (!pi)
2665 return ICE_ERR_PARAM;
2666
2667 li = &pi->phy.link_info;
2668
2669 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2670 if (status)
2671 return status;
2672
2673 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2674 struct ice_aqc_get_phy_caps_data *pcaps;
2675 struct ice_hw *hw;
2676
2677 hw = pi->hw;
2678 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2679 GFP_KERNEL);
2680 if (!pcaps)
2681 return ICE_ERR_NO_MEMORY;
2682
2683 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2684 pcaps, NULL);
2685
2686 devm_kfree(ice_hw_to_dev(hw), pcaps);
2687 }
2688
2689 return status;
2690}
2691
2692/**
2693 * ice_cache_phy_user_req
2694 * @pi: port information structure
2695 * @cache_data: PHY logging data
2696 * @cache_mode: PHY logging mode
2697 *
2698 * Log the user request on (FC, FEC, SPEED) for later use.
2699 */
2700static void
2701ice_cache_phy_user_req(struct ice_port_info *pi,
2702 struct ice_phy_cache_mode_data cache_data,
2703 enum ice_phy_cache_mode cache_mode)
2704{
2705 if (!pi)
2706 return;
2707
2708 switch (cache_mode) {
2709 case ICE_FC_MODE:
2710 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2711 break;
2712 case ICE_SPEED_MODE:
2713 pi->phy.curr_user_speed_req =
2714 cache_data.data.curr_user_speed_req;
2715 break;
2716 case ICE_FEC_MODE:
2717 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2718 break;
2719 default:
2720 break;
2721 }
2722}
2723
2724/**
2725 * ice_caps_to_fc_mode
2726 * @caps: PHY capabilities
2727 *
2728 * Convert PHY FC capabilities to ice FC mode
2729 */
2730enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2731{
2732 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2733 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2734 return ICE_FC_FULL;
2735
2736 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2737 return ICE_FC_TX_PAUSE;
2738
2739 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2740 return ICE_FC_RX_PAUSE;
2741
2742 return ICE_FC_NONE;
2743}
2744
2745/**
2746 * ice_caps_to_fec_mode
2747 * @caps: PHY capabilities
2748 * @fec_options: Link FEC options
2749 *
2750 * Convert PHY FEC capabilities to ice FEC mode
2751 */
2752enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2753{
2754 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2755 return ICE_FEC_AUTO;
2756
2757 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2758 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2759 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2760 ICE_AQC_PHY_FEC_25G_KR_REQ))
2761 return ICE_FEC_BASER;
2762
2763 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2764 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2765 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2766 return ICE_FEC_RS;
2767
2768 return ICE_FEC_NONE;
2769}
2770
2771/**
2772 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2773 * @pi: port information structure
2774 * @cfg: PHY configuration data to set FC mode
2775 * @req_mode: FC mode to configure
2776 */
2777enum ice_status
2778ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2779 enum ice_fc_mode req_mode)
2780{
2781 struct ice_phy_cache_mode_data cache_data;
2782 u8 pause_mask = 0x0;
2783
2784 if (!pi || !cfg)
2785 return ICE_ERR_BAD_PTR;
2786
2787 switch (req_mode) {
2788 case ICE_FC_FULL:
2789 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2790 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2791 break;
2792 case ICE_FC_RX_PAUSE:
2793 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2794 break;
2795 case ICE_FC_TX_PAUSE:
2796 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2797 break;
2798 default:
2799 break;
2800 }
2801
2802 /* clear the old pause settings */
2803 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2804 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2805
2806 /* set the new capabilities */
2807 cfg->caps |= pause_mask;
2808
2809 /* Cache user FC request */
2810 cache_data.data.curr_user_fc_req = req_mode;
2811 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2812
2813 return 0;
2814}
2815
2816/**
2817 * ice_set_fc
2818 * @pi: port information structure
2819 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2820 * @ena_auto_link_update: enable automatic link update
2821 *
2822 * Set the requested flow control mode.
2823 */
2824enum ice_status
2825ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2826{
2827 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2828 struct ice_aqc_get_phy_caps_data *pcaps;
2829 enum ice_status status;
2830 struct ice_hw *hw;
2831
2832 if (!pi || !aq_failures)
2833 return ICE_ERR_BAD_PTR;
2834
2835 *aq_failures = 0;
2836 hw = pi->hw;
2837
2838 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2839 if (!pcaps)
2840 return ICE_ERR_NO_MEMORY;
2841
2842 /* Get the current PHY config */
2843 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2844 NULL);
2845 if (status) {
2846 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2847 goto out;
2848 }
2849
2850 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2851
2852 /* Configure the set PHY data */
2853 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2854 if (status)
2855 goto out;
2856
2857 /* If the capabilities have changed, then set the new config */
2858 if (cfg.caps != pcaps->caps) {
2859 int retry_count, retry_max = 10;
2860
2861 /* Auto restart link so settings take effect */
2862 if (ena_auto_link_update)
2863 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2864
2865 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2866 if (status) {
2867 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2868 goto out;
2869 }
2870
2871 /* Update the link info
2872 * It sometimes takes a really long time for link to
2873 * come back from the atomic reset. Thus, we wait a
2874 * little bit.
2875 */
2876 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2877 status = ice_update_link_info(pi);
2878
2879 if (!status)
2880 break;
2881
2882 mdelay(100);
2883 }
2884
2885 if (status)
2886 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2887 }
2888
2889out:
2890 devm_kfree(ice_hw_to_dev(hw), pcaps);
2891 return status;
2892}
2893
2894/**
2895 * ice_phy_caps_equals_cfg
2896 * @phy_caps: PHY capabilities
2897 * @phy_cfg: PHY configuration
2898 *
2899 * Helper function to determine if PHY capabilities matches PHY
2900 * configuration
2901 */
2902bool
2903ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2904 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2905{
2906 u8 caps_mask, cfg_mask;
2907
2908 if (!phy_caps || !phy_cfg)
2909 return false;
2910
2911 /* These bits are not common between capabilities and configuration.
2912 * Do not use them to determine equality.
2913 */
2914 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2915 ICE_AQC_GET_PHY_EN_MOD_QUAL);
2916 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2917
2918 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2919 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2920 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2921 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2922 phy_caps->eee_cap != phy_cfg->eee_cap ||
2923 phy_caps->eeer_value != phy_cfg->eeer_value ||
2924 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2925 return false;
2926
2927 return true;
2928}
2929
2930/**
2931 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2932 * @pi: port information structure
2933 * @caps: PHY ability structure to copy date from
2934 * @cfg: PHY configuration structure to copy data to
2935 *
2936 * Helper function to copy AQC PHY get ability data to PHY set configuration
2937 * data structure
2938 */
2939void
2940ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2941 struct ice_aqc_get_phy_caps_data *caps,
2942 struct ice_aqc_set_phy_cfg_data *cfg)
2943{
2944 if (!pi || !caps || !cfg)
2945 return;
2946
2947 memset(cfg, 0, sizeof(*cfg));
2948 cfg->phy_type_low = caps->phy_type_low;
2949 cfg->phy_type_high = caps->phy_type_high;
2950 cfg->caps = caps->caps;
2951 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2952 cfg->eee_cap = caps->eee_cap;
2953 cfg->eeer_value = caps->eeer_value;
2954 cfg->link_fec_opt = caps->link_fec_options;
2955 cfg->module_compliance_enforcement =
2956 caps->module_compliance_enforcement;
2957
2958 if (ice_fw_supports_link_override(pi->hw)) {
2959 struct ice_link_default_override_tlv tlv;
2960
2961 if (ice_get_link_default_override(&tlv, pi))
2962 return;
2963
2964 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2965 cfg->module_compliance_enforcement |=
2966 ICE_LINK_OVERRIDE_STRICT_MODE;
2967 }
2968}
2969
2970/**
2971 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2972 * @pi: port information structure
2973 * @cfg: PHY configuration data to set FEC mode
2974 * @fec: FEC mode to configure
2975 */
2976enum ice_status
2977ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2978 enum ice_fec_mode fec)
2979{
2980 struct ice_aqc_get_phy_caps_data *pcaps;
2981 enum ice_status status;
2982
2983 if (!pi || !cfg)
2984 return ICE_ERR_BAD_PTR;
2985
2986 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2987 if (!pcaps)
2988 return ICE_ERR_NO_MEMORY;
2989
2990 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2991 NULL);
2992 if (status)
2993 goto out;
2994
2995 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2996 cfg->link_fec_opt = pcaps->link_fec_options;
2997
2998 switch (fec) {
2999 case ICE_FEC_BASER:
3000 /* Clear RS bits, and AND BASE-R ability
3001 * bits and OR request bits.
3002 */
3003 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3004 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3005 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3006 ICE_AQC_PHY_FEC_25G_KR_REQ;
3007 break;
3008 case ICE_FEC_RS:
3009 /* Clear BASE-R bits, and AND RS ability
3010 * bits and OR request bits.
3011 */
3012 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3013 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3014 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3015 break;
3016 case ICE_FEC_NONE:
3017 /* Clear all FEC option bits. */
3018 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3019 break;
3020 case ICE_FEC_AUTO:
3021 /* AND auto FEC bit, and all caps bits. */
3022 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3023 cfg->link_fec_opt |= pcaps->link_fec_options;
3024 break;
3025 default:
3026 status = ICE_ERR_PARAM;
3027 break;
3028 }
3029
3030 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3031 struct ice_link_default_override_tlv tlv;
3032
3033 if (ice_get_link_default_override(&tlv, pi))
3034 goto out;
3035
3036 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3037 (tlv.options & ICE_LINK_OVERRIDE_EN))
3038 cfg->link_fec_opt = tlv.fec_options;
3039 }
3040
3041out:
3042 kfree(pcaps);
3043
3044 return status;
3045}
3046
3047/**
3048 * ice_get_link_status - get status of the HW network link
3049 * @pi: port information structure
3050 * @link_up: pointer to bool (true/false = linkup/linkdown)
3051 *
3052 * Variable link_up is true if link is up, false if link is down.
3053 * The variable link_up is invalid if status is non zero. As a
3054 * result of this call, link status reporting becomes enabled
3055 */
3056enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3057{
3058 struct ice_phy_info *phy_info;
3059 enum ice_status status = 0;
3060
3061 if (!pi || !link_up)
3062 return ICE_ERR_PARAM;
3063
3064 phy_info = &pi->phy;
3065
3066 if (phy_info->get_link_info) {
3067 status = ice_update_link_info(pi);
3068
3069 if (status)
3070 ice_debug(pi->hw, ICE_DBG_LINK,
3071 "get link status error, status = %d\n",
3072 status);
3073 }
3074
3075 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3076
3077 return status;
3078}
3079
3080/**
3081 * ice_aq_set_link_restart_an
3082 * @pi: pointer to the port information structure
3083 * @ena_link: if true: enable link, if false: disable link
3084 * @cd: pointer to command details structure or NULL
3085 *
3086 * Sets up the link and restarts the Auto-Negotiation over the link.
3087 */
3088enum ice_status
3089ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3090 struct ice_sq_cd *cd)
3091{
3092 struct ice_aqc_restart_an *cmd;
3093 struct ice_aq_desc desc;
3094
3095 cmd = &desc.params.restart_an;
3096
3097 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3098
3099 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3100 cmd->lport_num = pi->lport;
3101 if (ena_link)
3102 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3103 else
3104 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3105
3106 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3107}
3108
3109/**
3110 * ice_aq_set_event_mask
3111 * @hw: pointer to the HW struct
3112 * @port_num: port number of the physical function
3113 * @mask: event mask to be set
3114 * @cd: pointer to command details structure or NULL
3115 *
3116 * Set event mask (0x0613)
3117 */
3118enum ice_status
3119ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3120 struct ice_sq_cd *cd)
3121{
3122 struct ice_aqc_set_event_mask *cmd;
3123 struct ice_aq_desc desc;
3124
3125 cmd = &desc.params.set_event_mask;
3126
3127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3128
3129 cmd->lport_num = port_num;
3130
3131 cmd->event_mask = cpu_to_le16(mask);
3132 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3133}
3134
3135/**
3136 * ice_aq_set_mac_loopback
3137 * @hw: pointer to the HW struct
3138 * @ena_lpbk: Enable or Disable loopback
3139 * @cd: pointer to command details structure or NULL
3140 *
3141 * Enable/disable loopback on a given port
3142 */
3143enum ice_status
3144ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3145{
3146 struct ice_aqc_set_mac_lb *cmd;
3147 struct ice_aq_desc desc;
3148
3149 cmd = &desc.params.set_mac_lb;
3150
3151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3152 if (ena_lpbk)
3153 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3154
3155 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3156}
3157
3158/**
3159 * ice_aq_set_port_id_led
3160 * @pi: pointer to the port information
3161 * @is_orig_mode: is this LED set to original mode (by the net-list)
3162 * @cd: pointer to command details structure or NULL
3163 *
3164 * Set LED value for the given port (0x06e9)
3165 */
3166enum ice_status
3167ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3168 struct ice_sq_cd *cd)
3169{
3170 struct ice_aqc_set_port_id_led *cmd;
3171 struct ice_hw *hw = pi->hw;
3172 struct ice_aq_desc desc;
3173
3174 cmd = &desc.params.set_port_id_led;
3175
3176 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3177
3178 if (is_orig_mode)
3179 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3180 else
3181 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3182
3183 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3184}
3185
3186/**
3187 * ice_aq_sff_eeprom
3188 * @hw: pointer to the HW struct
3189 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3190 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3191 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3192 * @page: QSFP page
3193 * @set_page: set or ignore the page
3194 * @data: pointer to data buffer to be read/written to the I2C device.
3195 * @length: 1-16 for read, 1 for write.
3196 * @write: 0 read, 1 for write.
3197 * @cd: pointer to command details structure or NULL
3198 *
3199 * Read/Write SFF EEPROM (0x06EE)
3200 */
3201enum ice_status
3202ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3203 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3204 bool write, struct ice_sq_cd *cd)
3205{
3206 struct ice_aqc_sff_eeprom *cmd;
3207 struct ice_aq_desc desc;
3208 enum ice_status status;
3209
3210 if (!data || (mem_addr & 0xff00))
3211 return ICE_ERR_PARAM;
3212
3213 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3214 cmd = &desc.params.read_write_sff_param;
3215 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3216 cmd->lport_num = (u8)(lport & 0xff);
3217 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3218 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3219 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3220 ((set_page <<
3221 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3222 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3223 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3224 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3225 if (write)
3226 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3227
3228 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3229 return status;
3230}
3231
3232/**
3233 * __ice_aq_get_set_rss_lut
3234 * @hw: pointer to the hardware structure
3235 * @vsi_id: VSI FW index
3236 * @lut_type: LUT table type
3237 * @lut: pointer to the LUT buffer provided by the caller
3238 * @lut_size: size of the LUT buffer
3239 * @glob_lut_idx: global LUT index
3240 * @set: set true to set the table, false to get the table
3241 *
3242 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3243 */
3244static enum ice_status
3245__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3246 u16 lut_size, u8 glob_lut_idx, bool set)
3247{
3248 struct ice_aqc_get_set_rss_lut *cmd_resp;
3249 struct ice_aq_desc desc;
3250 enum ice_status status;
3251 u16 flags = 0;
3252
3253 cmd_resp = &desc.params.get_set_rss_lut;
3254
3255 if (set) {
3256 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3257 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3258 } else {
3259 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3260 }
3261
3262 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3263 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3264 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3265 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3266
3267 switch (lut_type) {
3268 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3269 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3270 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3271 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3272 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3273 break;
3274 default:
3275 status = ICE_ERR_PARAM;
3276 goto ice_aq_get_set_rss_lut_exit;
3277 }
3278
3279 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3280 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3281 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3282
3283 if (!set)
3284 goto ice_aq_get_set_rss_lut_send;
3285 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3286 if (!set)
3287 goto ice_aq_get_set_rss_lut_send;
3288 } else {
3289 goto ice_aq_get_set_rss_lut_send;
3290 }
3291
3292 /* LUT size is only valid for Global and PF table types */
3293 switch (lut_size) {
3294 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3295 break;
3296 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3297 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3298 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3299 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3300 break;
3301 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3302 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3303 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3304 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3305 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3306 break;
3307 }
3308 fallthrough;
3309 default:
3310 status = ICE_ERR_PARAM;
3311 goto ice_aq_get_set_rss_lut_exit;
3312 }
3313
3314ice_aq_get_set_rss_lut_send:
3315 cmd_resp->flags = cpu_to_le16(flags);
3316 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3317
3318ice_aq_get_set_rss_lut_exit:
3319 return status;
3320}
3321
3322/**
3323 * ice_aq_get_rss_lut
3324 * @hw: pointer to the hardware structure
3325 * @vsi_handle: software VSI handle
3326 * @lut_type: LUT table type
3327 * @lut: pointer to the LUT buffer provided by the caller
3328 * @lut_size: size of the LUT buffer
3329 *
3330 * get the RSS lookup table, PF or VSI type
3331 */
3332enum ice_status
3333ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3334 u8 *lut, u16 lut_size)
3335{
3336 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3337 return ICE_ERR_PARAM;
3338
3339 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3340 lut_type, lut, lut_size, 0, false);
3341}
3342
3343/**
3344 * ice_aq_set_rss_lut
3345 * @hw: pointer to the hardware structure
3346 * @vsi_handle: software VSI handle
3347 * @lut_type: LUT table type
3348 * @lut: pointer to the LUT buffer provided by the caller
3349 * @lut_size: size of the LUT buffer
3350 *
3351 * set the RSS lookup table, PF or VSI type
3352 */
3353enum ice_status
3354ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3355 u8 *lut, u16 lut_size)
3356{
3357 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3358 return ICE_ERR_PARAM;
3359
3360 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3361 lut_type, lut, lut_size, 0, true);
3362}
3363
3364/**
3365 * __ice_aq_get_set_rss_key
3366 * @hw: pointer to the HW struct
3367 * @vsi_id: VSI FW index
3368 * @key: pointer to key info struct
3369 * @set: set true to set the key, false to get the key
3370 *
3371 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3372 */
3373static enum
3374ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3375 struct ice_aqc_get_set_rss_keys *key,
3376 bool set)
3377{
3378 struct ice_aqc_get_set_rss_key *cmd_resp;
3379 u16 key_size = sizeof(*key);
3380 struct ice_aq_desc desc;
3381
3382 cmd_resp = &desc.params.get_set_rss_key;
3383
3384 if (set) {
3385 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3386 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3387 } else {
3388 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3389 }
3390
3391 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3392 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3393 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3394 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3395
3396 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3397}
3398
3399/**
3400 * ice_aq_get_rss_key
3401 * @hw: pointer to the HW struct
3402 * @vsi_handle: software VSI handle
3403 * @key: pointer to key info struct
3404 *
3405 * get the RSS key per VSI
3406 */
3407enum ice_status
3408ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3409 struct ice_aqc_get_set_rss_keys *key)
3410{
3411 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3412 return ICE_ERR_PARAM;
3413
3414 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3415 key, false);
3416}
3417
3418/**
3419 * ice_aq_set_rss_key
3420 * @hw: pointer to the HW struct
3421 * @vsi_handle: software VSI handle
3422 * @keys: pointer to key info struct
3423 *
3424 * set the RSS key per VSI
3425 */
3426enum ice_status
3427ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3428 struct ice_aqc_get_set_rss_keys *keys)
3429{
3430 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3431 return ICE_ERR_PARAM;
3432
3433 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3434 keys, true);
3435}
3436
3437/**
3438 * ice_aq_add_lan_txq
3439 * @hw: pointer to the hardware structure
3440 * @num_qgrps: Number of added queue groups
3441 * @qg_list: list of queue groups to be added
3442 * @buf_size: size of buffer for indirect command
3443 * @cd: pointer to command details structure or NULL
3444 *
3445 * Add Tx LAN queue (0x0C30)
3446 *
3447 * NOTE:
3448 * Prior to calling add Tx LAN queue:
3449 * Initialize the following as part of the Tx queue context:
3450 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3451 * Cache profile and Packet shaper profile.
3452 *
3453 * After add Tx LAN queue AQ command is completed:
3454 * Interrupts should be associated with specific queues,
3455 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3456 * flow.
3457 */
3458static enum ice_status
3459ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3460 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3461 struct ice_sq_cd *cd)
3462{
3463 struct ice_aqc_add_tx_qgrp *list;
3464 struct ice_aqc_add_txqs *cmd;
3465 struct ice_aq_desc desc;
3466 u16 i, sum_size = 0;
3467
3468 cmd = &desc.params.add_txqs;
3469
3470 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3471
3472 if (!qg_list)
3473 return ICE_ERR_PARAM;
3474
3475 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3476 return ICE_ERR_PARAM;
3477
3478 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3479 sum_size += struct_size(list, txqs, list->num_txqs);
3480 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3481 list->num_txqs);
3482 }
3483
3484 if (buf_size != sum_size)
3485 return ICE_ERR_PARAM;
3486
3487 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3488
3489 cmd->num_qgrps = num_qgrps;
3490
3491 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3492}
3493
3494/**
3495 * ice_aq_dis_lan_txq
3496 * @hw: pointer to the hardware structure
3497 * @num_qgrps: number of groups in the list
3498 * @qg_list: the list of groups to disable
3499 * @buf_size: the total size of the qg_list buffer in bytes
3500 * @rst_src: if called due to reset, specifies the reset source
3501 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3502 * @cd: pointer to command details structure or NULL
3503 *
3504 * Disable LAN Tx queue (0x0C31)
3505 */
3506static enum ice_status
3507ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3508 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3509 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3510 struct ice_sq_cd *cd)
3511{
3512 struct ice_aqc_dis_txq_item *item;
3513 struct ice_aqc_dis_txqs *cmd;
3514 struct ice_aq_desc desc;
3515 enum ice_status status;
3516 u16 i, sz = 0;
3517
3518 cmd = &desc.params.dis_txqs;
3519 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3520
3521 /* qg_list can be NULL only in VM/VF reset flow */
3522 if (!qg_list && !rst_src)
3523 return ICE_ERR_PARAM;
3524
3525 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3526 return ICE_ERR_PARAM;
3527
3528 cmd->num_entries = num_qgrps;
3529
3530 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3531 ICE_AQC_Q_DIS_TIMEOUT_M);
3532
3533 switch (rst_src) {
3534 case ICE_VM_RESET:
3535 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3536 cmd->vmvf_and_timeout |=
3537 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3538 break;
3539 case ICE_VF_RESET:
3540 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3541 /* In this case, FW expects vmvf_num to be absolute VF ID */
3542 cmd->vmvf_and_timeout |=
3543 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3544 ICE_AQC_Q_DIS_VMVF_NUM_M);
3545 break;
3546 case ICE_NO_RESET:
3547 default:
3548 break;
3549 }
3550
3551 /* flush pipe on time out */
3552 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3553 /* If no queue group info, we are in a reset flow. Issue the AQ */
3554 if (!qg_list)
3555 goto do_aq;
3556
3557 /* set RD bit to indicate that command buffer is provided by the driver
3558 * and it needs to be read by the firmware
3559 */
3560 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3561
3562 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3563 u16 item_size = struct_size(item, q_id, item->num_qs);
3564
3565 /* If the num of queues is even, add 2 bytes of padding */
3566 if ((item->num_qs % 2) == 0)
3567 item_size += 2;
3568
3569 sz += item_size;
3570
3571 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3572 }
3573
3574 if (buf_size != sz)
3575 return ICE_ERR_PARAM;
3576
3577do_aq:
3578 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3579 if (status) {
3580 if (!qg_list)
3581 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3582 vmvf_num, hw->adminq.sq_last_status);
3583 else
3584 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3585 le16_to_cpu(qg_list[0].q_id[0]),
3586 hw->adminq.sq_last_status);
3587 }
3588 return status;
3589}
3590
3591/* End of FW Admin Queue command wrappers */
3592
3593/**
3594 * ice_write_byte - write a byte to a packed context structure
3595 * @src_ctx: the context structure to read from
3596 * @dest_ctx: the context to be written to
3597 * @ce_info: a description of the struct to be filled
3598 */
3599static void
3600ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3601{
3602 u8 src_byte, dest_byte, mask;
3603 u8 *from, *dest;
3604 u16 shift_width;
3605
3606 /* copy from the next struct field */
3607 from = src_ctx + ce_info->offset;
3608
3609 /* prepare the bits and mask */
3610 shift_width = ce_info->lsb % 8;
3611 mask = (u8)(BIT(ce_info->width) - 1);
3612
3613 src_byte = *from;
3614 src_byte &= mask;
3615
3616 /* shift to correct alignment */
3617 mask <<= shift_width;
3618 src_byte <<= shift_width;
3619
3620 /* get the current bits from the target bit string */
3621 dest = dest_ctx + (ce_info->lsb / 8);
3622
3623 memcpy(&dest_byte, dest, sizeof(dest_byte));
3624
3625 dest_byte &= ~mask; /* get the bits not changing */
3626 dest_byte |= src_byte; /* add in the new bits */
3627
3628 /* put it all back */
3629 memcpy(dest, &dest_byte, sizeof(dest_byte));
3630}
3631
3632/**
3633 * ice_write_word - write a word to a packed context structure
3634 * @src_ctx: the context structure to read from
3635 * @dest_ctx: the context to be written to
3636 * @ce_info: a description of the struct to be filled
3637 */
3638static void
3639ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3640{
3641 u16 src_word, mask;
3642 __le16 dest_word;
3643 u8 *from, *dest;
3644 u16 shift_width;
3645
3646 /* copy from the next struct field */
3647 from = src_ctx + ce_info->offset;
3648
3649 /* prepare the bits and mask */
3650 shift_width = ce_info->lsb % 8;
3651 mask = BIT(ce_info->width) - 1;
3652
3653 /* don't swizzle the bits until after the mask because the mask bits
3654 * will be in a different bit position on big endian machines
3655 */
3656 src_word = *(u16 *)from;
3657 src_word &= mask;
3658
3659 /* shift to correct alignment */
3660 mask <<= shift_width;
3661 src_word <<= shift_width;
3662
3663 /* get the current bits from the target bit string */
3664 dest = dest_ctx + (ce_info->lsb / 8);
3665
3666 memcpy(&dest_word, dest, sizeof(dest_word));
3667
3668 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3669 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3670
3671 /* put it all back */
3672 memcpy(dest, &dest_word, sizeof(dest_word));
3673}
3674
3675/**
3676 * ice_write_dword - write a dword to a packed context structure
3677 * @src_ctx: the context structure to read from
3678 * @dest_ctx: the context to be written to
3679 * @ce_info: a description of the struct to be filled
3680 */
3681static void
3682ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3683{
3684 u32 src_dword, mask;
3685 __le32 dest_dword;
3686 u8 *from, *dest;
3687 u16 shift_width;
3688
3689 /* copy from the next struct field */
3690 from = src_ctx + ce_info->offset;
3691
3692 /* prepare the bits and mask */
3693 shift_width = ce_info->lsb % 8;
3694
3695 /* if the field width is exactly 32 on an x86 machine, then the shift
3696 * operation will not work because the SHL instructions count is masked
3697 * to 5 bits so the shift will do nothing
3698 */
3699 if (ce_info->width < 32)
3700 mask = BIT(ce_info->width) - 1;
3701 else
3702 mask = (u32)~0;
3703
3704 /* don't swizzle the bits until after the mask because the mask bits
3705 * will be in a different bit position on big endian machines
3706 */
3707 src_dword = *(u32 *)from;
3708 src_dword &= mask;
3709
3710 /* shift to correct alignment */
3711 mask <<= shift_width;
3712 src_dword <<= shift_width;
3713
3714 /* get the current bits from the target bit string */
3715 dest = dest_ctx + (ce_info->lsb / 8);
3716
3717 memcpy(&dest_dword, dest, sizeof(dest_dword));
3718
3719 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3720 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
3721
3722 /* put it all back */
3723 memcpy(dest, &dest_dword, sizeof(dest_dword));
3724}
3725
3726/**
3727 * ice_write_qword - write a qword to a packed context structure
3728 * @src_ctx: the context structure to read from
3729 * @dest_ctx: the context to be written to
3730 * @ce_info: a description of the struct to be filled
3731 */
3732static void
3733ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3734{
3735 u64 src_qword, mask;
3736 __le64 dest_qword;
3737 u8 *from, *dest;
3738 u16 shift_width;
3739
3740 /* copy from the next struct field */
3741 from = src_ctx + ce_info->offset;
3742
3743 /* prepare the bits and mask */
3744 shift_width = ce_info->lsb % 8;
3745
3746 /* if the field width is exactly 64 on an x86 machine, then the shift
3747 * operation will not work because the SHL instructions count is masked
3748 * to 6 bits so the shift will do nothing
3749 */
3750 if (ce_info->width < 64)
3751 mask = BIT_ULL(ce_info->width) - 1;
3752 else
3753 mask = (u64)~0;
3754
3755 /* don't swizzle the bits until after the mask because the mask bits
3756 * will be in a different bit position on big endian machines
3757 */
3758 src_qword = *(u64 *)from;
3759 src_qword &= mask;
3760
3761 /* shift to correct alignment */
3762 mask <<= shift_width;
3763 src_qword <<= shift_width;
3764
3765 /* get the current bits from the target bit string */
3766 dest = dest_ctx + (ce_info->lsb / 8);
3767
3768 memcpy(&dest_qword, dest, sizeof(dest_qword));
3769
3770 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
3771 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
3772
3773 /* put it all back */
3774 memcpy(dest, &dest_qword, sizeof(dest_qword));
3775}
3776
3777/**
3778 * ice_set_ctx - set context bits in packed structure
3779 * @hw: pointer to the hardware structure
3780 * @src_ctx: pointer to a generic non-packed context structure
3781 * @dest_ctx: pointer to memory for the packed structure
3782 * @ce_info: a description of the structure to be transformed
3783 */
3784enum ice_status
3785ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3786 const struct ice_ctx_ele *ce_info)
3787{
3788 int f;
3789
3790 for (f = 0; ce_info[f].width; f++) {
3791 /* We have to deal with each element of the FW response
3792 * using the correct size so that we are correct regardless
3793 * of the endianness of the machine.
3794 */
3795 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3796 ice_debug(hw, ICE_DBG_QCTX,
3797 "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3798 f, ce_info[f].width, ce_info[f].size_of);
3799 continue;
3800 }
3801 switch (ce_info[f].size_of) {
3802 case sizeof(u8):
3803 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3804 break;
3805 case sizeof(u16):
3806 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3807 break;
3808 case sizeof(u32):
3809 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3810 break;
3811 case sizeof(u64):
3812 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3813 break;
3814 default:
3815 return ICE_ERR_INVAL_SIZE;
3816 }
3817 }
3818
3819 return 0;
3820}
3821
3822/**
3823 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3824 * @hw: pointer to the HW struct
3825 * @vsi_handle: software VSI handle
3826 * @tc: TC number
3827 * @q_handle: software queue handle
3828 */
3829struct ice_q_ctx *
3830ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3831{
3832 struct ice_vsi_ctx *vsi;
3833 struct ice_q_ctx *q_ctx;
3834
3835 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3836 if (!vsi)
3837 return NULL;
3838 if (q_handle >= vsi->num_lan_q_entries[tc])
3839 return NULL;
3840 if (!vsi->lan_q_ctx[tc])
3841 return NULL;
3842 q_ctx = vsi->lan_q_ctx[tc];
3843 return &q_ctx[q_handle];
3844}
3845
3846/**
3847 * ice_ena_vsi_txq
3848 * @pi: port information structure
3849 * @vsi_handle: software VSI handle
3850 * @tc: TC number
3851 * @q_handle: software queue handle
3852 * @num_qgrps: Number of added queue groups
3853 * @buf: list of queue groups to be added
3854 * @buf_size: size of buffer for indirect command
3855 * @cd: pointer to command details structure or NULL
3856 *
3857 * This function adds one LAN queue
3858 */
3859enum ice_status
3860ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3861 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3862 struct ice_sq_cd *cd)
3863{
3864 struct ice_aqc_txsched_elem_data node = { 0 };
3865 struct ice_sched_node *parent;
3866 struct ice_q_ctx *q_ctx;
3867 enum ice_status status;
3868 struct ice_hw *hw;
3869
3870 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3871 return ICE_ERR_CFG;
3872
3873 if (num_qgrps > 1 || buf->num_txqs > 1)
3874 return ICE_ERR_MAX_LIMIT;
3875
3876 hw = pi->hw;
3877
3878 if (!ice_is_vsi_valid(hw, vsi_handle))
3879 return ICE_ERR_PARAM;
3880
3881 mutex_lock(&pi->sched_lock);
3882
3883 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3884 if (!q_ctx) {
3885 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3886 q_handle);
3887 status = ICE_ERR_PARAM;
3888 goto ena_txq_exit;
3889 }
3890
3891 /* find a parent node */
3892 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3893 ICE_SCHED_NODE_OWNER_LAN);
3894 if (!parent) {
3895 status = ICE_ERR_PARAM;
3896 goto ena_txq_exit;
3897 }
3898
3899 buf->parent_teid = parent->info.node_teid;
3900 node.parent_teid = parent->info.node_teid;
3901 /* Mark that the values in the "generic" section as valid. The default
3902 * value in the "generic" section is zero. This means that :
3903 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3904 * - 0 priority among siblings, indicated by Bit 1-3.
3905 * - WFQ, indicated by Bit 4.
3906 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3907 * Bit 5-6.
3908 * - Bit 7 is reserved.
3909 * Without setting the generic section as valid in valid_sections, the
3910 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3911 */
3912 buf->txqs[0].info.valid_sections =
3913 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
3914 ICE_AQC_ELEM_VALID_EIR;
3915 buf->txqs[0].info.generic = 0;
3916 buf->txqs[0].info.cir_bw.bw_profile_idx =
3917 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3918 buf->txqs[0].info.cir_bw.bw_alloc =
3919 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3920 buf->txqs[0].info.eir_bw.bw_profile_idx =
3921 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3922 buf->txqs[0].info.eir_bw.bw_alloc =
3923 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3924
3925 /* add the LAN queue */
3926 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3927 if (status) {
3928 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3929 le16_to_cpu(buf->txqs[0].txq_id),
3930 hw->adminq.sq_last_status);
3931 goto ena_txq_exit;
3932 }
3933
3934 node.node_teid = buf->txqs[0].q_teid;
3935 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3936 q_ctx->q_handle = q_handle;
3937 q_ctx->q_teid = le32_to_cpu(node.node_teid);
3938
3939 /* add a leaf node into scheduler tree queue layer */
3940 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3941 if (!status)
3942 status = ice_sched_replay_q_bw(pi, q_ctx);
3943
3944ena_txq_exit:
3945 mutex_unlock(&pi->sched_lock);
3946 return status;
3947}
3948
3949/**
3950 * ice_dis_vsi_txq
3951 * @pi: port information structure
3952 * @vsi_handle: software VSI handle
3953 * @tc: TC number
3954 * @num_queues: number of queues
3955 * @q_handles: pointer to software queue handle array
3956 * @q_ids: pointer to the q_id array
3957 * @q_teids: pointer to queue node teids
3958 * @rst_src: if called due to reset, specifies the reset source
3959 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3960 * @cd: pointer to command details structure or NULL
3961 *
3962 * This function removes queues and their corresponding nodes in SW DB
3963 */
3964enum ice_status
3965ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3966 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3967 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3968 struct ice_sq_cd *cd)
3969{
3970 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3971 struct ice_aqc_dis_txq_item *qg_list;
3972 struct ice_q_ctx *q_ctx;
3973 struct ice_hw *hw;
3974 u16 i, buf_size;
3975
3976 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3977 return ICE_ERR_CFG;
3978
3979 hw = pi->hw;
3980
3981 if (!num_queues) {
3982 /* if queue is disabled already yet the disable queue command
3983 * has to be sent to complete the VF reset, then call
3984 * ice_aq_dis_lan_txq without any queue information
3985 */
3986 if (rst_src)
3987 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
3988 vmvf_num, NULL);
3989 return ICE_ERR_CFG;
3990 }
3991
3992 buf_size = struct_size(qg_list, q_id, 1);
3993 qg_list = kzalloc(buf_size, GFP_KERNEL);
3994 if (!qg_list)
3995 return ICE_ERR_NO_MEMORY;
3996
3997 mutex_lock(&pi->sched_lock);
3998
3999 for (i = 0; i < num_queues; i++) {
4000 struct ice_sched_node *node;
4001
4002 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4003 if (!node)
4004 continue;
4005 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4006 if (!q_ctx) {
4007 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4008 q_handles[i]);
4009 continue;
4010 }
4011 if (q_ctx->q_handle != q_handles[i]) {
4012 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4013 q_ctx->q_handle, q_handles[i]);
4014 continue;
4015 }
4016 qg_list->parent_teid = node->info.parent_teid;
4017 qg_list->num_qs = 1;
4018 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4019 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4020 vmvf_num, cd);
4021
4022 if (status)
4023 break;
4024 ice_free_sched_node(pi, node);
4025 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4026 }
4027 mutex_unlock(&pi->sched_lock);
4028 kfree(qg_list);
4029 return status;
4030}
4031
4032/**
4033 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4034 * @pi: port information structure
4035 * @vsi_handle: software VSI handle
4036 * @tc_bitmap: TC bitmap
4037 * @maxqs: max queues array per TC
4038 * @owner: LAN or RDMA
4039 *
4040 * This function adds/updates the VSI queues per TC.
4041 */
4042static enum ice_status
4043ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4044 u16 *maxqs, u8 owner)
4045{
4046 enum ice_status status = 0;
4047 u8 i;
4048
4049 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4050 return ICE_ERR_CFG;
4051
4052 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4053 return ICE_ERR_PARAM;
4054
4055 mutex_lock(&pi->sched_lock);
4056
4057 ice_for_each_traffic_class(i) {
4058 /* configuration is possible only if TC node is present */
4059 if (!ice_sched_get_tc_node(pi, i))
4060 continue;
4061
4062 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4063 ice_is_tc_ena(tc_bitmap, i));
4064 if (status)
4065 break;
4066 }
4067
4068 mutex_unlock(&pi->sched_lock);
4069 return status;
4070}
4071
4072/**
4073 * ice_cfg_vsi_lan - configure VSI LAN queues
4074 * @pi: port information structure
4075 * @vsi_handle: software VSI handle
4076 * @tc_bitmap: TC bitmap
4077 * @max_lanqs: max LAN queues array per TC
4078 *
4079 * This function adds/updates the VSI LAN queues per TC.
4080 */
4081enum ice_status
4082ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4083 u16 *max_lanqs)
4084{
4085 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4086 ICE_SCHED_NODE_OWNER_LAN);
4087}
4088
4089/**
4090 * ice_replay_pre_init - replay pre initialization
4091 * @hw: pointer to the HW struct
4092 *
4093 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4094 */
4095static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4096{
4097 struct ice_switch_info *sw = hw->switch_info;
4098 u8 i;
4099
4100 /* Delete old entries from replay filter list head if there is any */
4101 ice_rm_all_sw_replay_rule_info(hw);
4102 /* In start of replay, move entries into replay_rules list, it
4103 * will allow adding rules entries back to filt_rules list,
4104 * which is operational list.
4105 */
4106 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4107 list_replace_init(&sw->recp_list[i].filt_rules,
4108 &sw->recp_list[i].filt_replay_rules);
4109
4110 return 0;
4111}
4112
4113/**
4114 * ice_replay_vsi - replay VSI configuration
4115 * @hw: pointer to the HW struct
4116 * @vsi_handle: driver VSI handle
4117 *
4118 * Restore all VSI configuration after reset. It is required to call this
4119 * function with main VSI first.
4120 */
4121enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4122{
4123 enum ice_status status;
4124
4125 if (!ice_is_vsi_valid(hw, vsi_handle))
4126 return ICE_ERR_PARAM;
4127
4128 /* Replay pre-initialization if there is any */
4129 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4130 status = ice_replay_pre_init(hw);
4131 if (status)
4132 return status;
4133 }
4134 /* Replay per VSI all RSS configurations */
4135 status = ice_replay_rss_cfg(hw, vsi_handle);
4136 if (status)
4137 return status;
4138 /* Replay per VSI all filters */
4139 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4140 return status;
4141}
4142
4143/**
4144 * ice_replay_post - post replay configuration cleanup
4145 * @hw: pointer to the HW struct
4146 *
4147 * Post replay cleanup.
4148 */
4149void ice_replay_post(struct ice_hw *hw)
4150{
4151 /* Delete old entries from replay filter list head */
4152 ice_rm_all_sw_replay_rule_info(hw);
4153}
4154
4155/**
4156 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4157 * @hw: ptr to the hardware info
4158 * @reg: offset of 64 bit HW register to read from
4159 * @prev_stat_loaded: bool to specify if previous stats are loaded
4160 * @prev_stat: ptr to previous loaded stat value
4161 * @cur_stat: ptr to current stat value
4162 */
4163void
4164ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4165 u64 *prev_stat, u64 *cur_stat)
4166{
4167 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4168
4169 /* device stats are not reset at PFR, they likely will not be zeroed
4170 * when the driver starts. Thus, save the value from the first read
4171 * without adding to the statistic value so that we report stats which
4172 * count up from zero.
4173 */
4174 if (!prev_stat_loaded) {
4175 *prev_stat = new_data;
4176 return;
4177 }
4178
4179 /* Calculate the difference between the new and old values, and then
4180 * add it to the software stat value.
4181 */
4182 if (new_data >= *prev_stat)
4183 *cur_stat += new_data - *prev_stat;
4184 else
4185 /* to manage the potential roll-over */
4186 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4187
4188 /* Update the previously stored value to prepare for next read */
4189 *prev_stat = new_data;
4190}
4191
4192/**
4193 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4194 * @hw: ptr to the hardware info
4195 * @reg: offset of HW register to read from
4196 * @prev_stat_loaded: bool to specify if previous stats are loaded
4197 * @prev_stat: ptr to previous loaded stat value
4198 * @cur_stat: ptr to current stat value
4199 */
4200void
4201ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4202 u64 *prev_stat, u64 *cur_stat)
4203{
4204 u32 new_data;
4205
4206 new_data = rd32(hw, reg);
4207
4208 /* device stats are not reset at PFR, they likely will not be zeroed
4209 * when the driver starts. Thus, save the value from the first read
4210 * without adding to the statistic value so that we report stats which
4211 * count up from zero.
4212 */
4213 if (!prev_stat_loaded) {
4214 *prev_stat = new_data;
4215 return;
4216 }
4217
4218 /* Calculate the difference between the new and old values, and then
4219 * add it to the software stat value.
4220 */
4221 if (new_data >= *prev_stat)
4222 *cur_stat += new_data - *prev_stat;
4223 else
4224 /* to manage the potential roll-over */
4225 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4226
4227 /* Update the previously stored value to prepare for next read */
4228 *prev_stat = new_data;
4229}
4230
4231/**
4232 * ice_sched_query_elem - query element information from HW
4233 * @hw: pointer to the HW struct
4234 * @node_teid: node TEID to be queried
4235 * @buf: buffer to element information
4236 *
4237 * This function queries HW element information
4238 */
4239enum ice_status
4240ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4241 struct ice_aqc_txsched_elem_data *buf)
4242{
4243 u16 buf_size, num_elem_ret = 0;
4244 enum ice_status status;
4245
4246 buf_size = sizeof(*buf);
4247 memset(buf, 0, buf_size);
4248 buf->node_teid = cpu_to_le32(node_teid);
4249 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4250 NULL);
4251 if (status || num_elem_ret != 1)
4252 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4253 return status;
4254}
4255
4256/**
4257 * ice_fw_supports_link_override
4258 * @hw: pointer to the hardware structure
4259 *
4260 * Checks if the firmware supports link override
4261 */
4262bool ice_fw_supports_link_override(struct ice_hw *hw)
4263{
4264 /* Currently, only supported for E810 devices */
4265 if (hw->mac_type != ICE_MAC_E810)
4266 return false;
4267
4268 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4269 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4270 return true;
4271 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4272 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4273 return true;
4274 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4275 return true;
4276 }
4277
4278 return false;
4279}
4280
4281/**
4282 * ice_get_link_default_override
4283 * @ldo: pointer to the link default override struct
4284 * @pi: pointer to the port info struct
4285 *
4286 * Gets the link default override for a port
4287 */
4288enum ice_status
4289ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4290 struct ice_port_info *pi)
4291{
4292 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4293 struct ice_hw *hw = pi->hw;
4294 enum ice_status status;
4295
4296 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4297 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4298 if (status) {
4299 ice_debug(hw, ICE_DBG_INIT,
4300 "Failed to read link override TLV.\n");
4301 return status;
4302 }
4303
4304 /* Each port has its own config; calculate for our port */
4305 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4306 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4307
4308 /* link options first */
4309 status = ice_read_sr_word(hw, tlv_start, &buf);
4310 if (status) {
4311 ice_debug(hw, ICE_DBG_INIT,
4312 "Failed to read override link options.\n");
4313 return status;
4314 }
4315 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4316 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4317 ICE_LINK_OVERRIDE_PHY_CFG_S;
4318
4319 /* link PHY config */
4320 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4321 status = ice_read_sr_word(hw, offset, &buf);
4322 if (status) {
4323 ice_debug(hw, ICE_DBG_INIT,
4324 "Failed to read override phy config.\n");
4325 return status;
4326 }
4327 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4328
4329 /* PHY types low */
4330 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4331 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4332 status = ice_read_sr_word(hw, (offset + i), &buf);
4333 if (status) {
4334 ice_debug(hw, ICE_DBG_INIT,
4335 "Failed to read override link options.\n");
4336 return status;
4337 }
4338 /* shift 16 bits at a time to fill 64 bits */
4339 ldo->phy_type_low |= ((u64)buf << (i * 16));
4340 }
4341
4342 /* PHY types high */
4343 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4344 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4345 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4346 status = ice_read_sr_word(hw, (offset + i), &buf);
4347 if (status) {
4348 ice_debug(hw, ICE_DBG_INIT,
4349 "Failed to read override link options.\n");
4350 return status;
4351 }
4352 /* shift 16 bits at a time to fill 64 bits */
4353 ldo->phy_type_high |= ((u64)buf << (i * 16));
4354 }
4355
4356 return status;
4357}
4358
4359/**
4360 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4361 * @caps: get PHY capability data
4362 */
4363bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4364{
4365 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4366 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4367 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4368 ICE_AQC_PHY_AN_EN_CLAUSE37))
4369 return true;
4370
4371 return false;
4372}
4373
4374/**
4375 * ice_aq_set_lldp_mib - Set the LLDP MIB
4376 * @hw: pointer to the HW struct
4377 * @mib_type: Local, Remote or both Local and Remote MIBs
4378 * @buf: pointer to the caller-supplied buffer to store the MIB block
4379 * @buf_size: size of the buffer (in bytes)
4380 * @cd: pointer to command details structure or NULL
4381 *
4382 * Set the LLDP MIB. (0x0A08)
4383 */
4384enum ice_status
4385ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4386 struct ice_sq_cd *cd)
4387{
4388 struct ice_aqc_lldp_set_local_mib *cmd;
4389 struct ice_aq_desc desc;
4390
4391 cmd = &desc.params.lldp_set_mib;
4392
4393 if (buf_size == 0 || !buf)
4394 return ICE_ERR_PARAM;
4395
4396 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4397
4398 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4399 desc.datalen = cpu_to_le16(buf_size);
4400
4401 cmd->type = mib_type;
4402 cmd->length = cpu_to_le16(buf_size);
4403
4404 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4405}