Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_lib.h"
6#include "ice_sched.h"
7#include "ice_adminq_cmd.h"
8#include "ice_flow.h"
9
10#define ICE_PF_RESET_WAIT_COUNT 300
11
12/**
13 * ice_set_mac_type - Sets MAC type
14 * @hw: pointer to the HW structure
15 *
16 * This function sets the MAC type of the adapter based on the
17 * vendor ID and device ID stored in the HW structure.
18 */
19static enum ice_status ice_set_mac_type(struct ice_hw *hw)
20{
21 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
22 return ICE_ERR_DEVICE_NOT_SUPPORTED;
23
24 switch (hw->device_id) {
25 case ICE_DEV_ID_E810C_BACKPLANE:
26 case ICE_DEV_ID_E810C_QSFP:
27 case ICE_DEV_ID_E810C_SFP:
28 case ICE_DEV_ID_E810_XXV_BACKPLANE:
29 case ICE_DEV_ID_E810_XXV_QSFP:
30 case ICE_DEV_ID_E810_XXV_SFP:
31 hw->mac_type = ICE_MAC_E810;
32 break;
33 case ICE_DEV_ID_E823C_10G_BASE_T:
34 case ICE_DEV_ID_E823C_BACKPLANE:
35 case ICE_DEV_ID_E823C_QSFP:
36 case ICE_DEV_ID_E823C_SFP:
37 case ICE_DEV_ID_E823C_SGMII:
38 case ICE_DEV_ID_E822C_10G_BASE_T:
39 case ICE_DEV_ID_E822C_BACKPLANE:
40 case ICE_DEV_ID_E822C_QSFP:
41 case ICE_DEV_ID_E822C_SFP:
42 case ICE_DEV_ID_E822C_SGMII:
43 case ICE_DEV_ID_E822L_10G_BASE_T:
44 case ICE_DEV_ID_E822L_BACKPLANE:
45 case ICE_DEV_ID_E822L_SFP:
46 case ICE_DEV_ID_E822L_SGMII:
47 case ICE_DEV_ID_E823L_10G_BASE_T:
48 case ICE_DEV_ID_E823L_1GBE:
49 case ICE_DEV_ID_E823L_BACKPLANE:
50 case ICE_DEV_ID_E823L_QSFP:
51 case ICE_DEV_ID_E823L_SFP:
52 hw->mac_type = ICE_MAC_GENERIC;
53 break;
54 default:
55 hw->mac_type = ICE_MAC_UNKNOWN;
56 break;
57 }
58
59 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
60 return 0;
61}
62
63/**
64 * ice_is_e810
65 * @hw: pointer to the hardware structure
66 *
67 * returns true if the device is E810 based, false if not.
68 */
69bool ice_is_e810(struct ice_hw *hw)
70{
71 return hw->mac_type == ICE_MAC_E810;
72}
73
74/**
75 * ice_clear_pf_cfg - Clear PF configuration
76 * @hw: pointer to the hardware structure
77 *
78 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
79 * configuration, flow director filters, etc.).
80 */
81enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
82{
83 struct ice_aq_desc desc;
84
85 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
86
87 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
88}
89
90/**
91 * ice_aq_manage_mac_read - manage MAC address read command
92 * @hw: pointer to the HW struct
93 * @buf: a virtual buffer to hold the manage MAC read response
94 * @buf_size: Size of the virtual buffer
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function is used to return per PF station MAC address (0x0107).
98 * NOTE: Upon successful completion of this command, MAC address information
99 * is returned in user specified buffer. Please interpret user specified
100 * buffer as "manage_mac_read" response.
101 * Response such as various MAC addresses are stored in HW struct (port.mac)
102 * ice_discover_dev_caps is expected to be called before this function is
103 * called.
104 */
105static enum ice_status
106ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
107 struct ice_sq_cd *cd)
108{
109 struct ice_aqc_manage_mac_read_resp *resp;
110 struct ice_aqc_manage_mac_read *cmd;
111 struct ice_aq_desc desc;
112 enum ice_status status;
113 u16 flags;
114 u8 i;
115
116 cmd = &desc.params.mac_read;
117
118 if (buf_size < sizeof(*resp))
119 return ICE_ERR_BUF_TOO_SHORT;
120
121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
122
123 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
124 if (status)
125 return status;
126
127 resp = buf;
128 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
129
130 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
131 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
132 return ICE_ERR_CFG;
133 }
134
135 /* A single port can report up to two (LAN and WoL) addresses */
136 for (i = 0; i < cmd->num_addr; i++)
137 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
138 ether_addr_copy(hw->port_info->mac.lan_addr,
139 resp[i].mac_addr);
140 ether_addr_copy(hw->port_info->mac.perm_addr,
141 resp[i].mac_addr);
142 break;
143 }
144
145 return 0;
146}
147
148/**
149 * ice_aq_get_phy_caps - returns PHY capabilities
150 * @pi: port information structure
151 * @qual_mods: report qualified modules
152 * @report_mode: report mode capabilities
153 * @pcaps: structure for PHY capabilities to be filled
154 * @cd: pointer to command details structure or NULL
155 *
156 * Returns the various PHY capabilities supported on the Port (0x0600)
157 */
158enum ice_status
159ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
160 struct ice_aqc_get_phy_caps_data *pcaps,
161 struct ice_sq_cd *cd)
162{
163 struct ice_aqc_get_phy_caps *cmd;
164 u16 pcaps_size = sizeof(*pcaps);
165 struct ice_aq_desc desc;
166 enum ice_status status;
167 struct ice_hw *hw;
168
169 cmd = &desc.params.get_phy;
170
171 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
172 return ICE_ERR_PARAM;
173 hw = pi->hw;
174
175 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
176 !ice_fw_supports_report_dflt_cfg(hw))
177 return ICE_ERR_PARAM;
178
179 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
180
181 if (qual_mods)
182 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
183
184 cmd->param0 |= cpu_to_le16(report_mode);
185 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
186
187 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
188 report_mode);
189 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
190 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
191 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
192 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
193 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
194 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
195 pcaps->low_power_ctrl_an);
196 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
197 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
198 pcaps->eeer_value);
199 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
200 pcaps->link_fec_options);
201 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
202 pcaps->module_compliance_enforcement);
203 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
204 pcaps->extended_compliance_code);
205 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
206 pcaps->module_type[0]);
207 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
208 pcaps->module_type[1]);
209 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
210 pcaps->module_type[2]);
211
212 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
213 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
214 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
215 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
216 sizeof(pi->phy.link_info.module_type));
217 }
218
219 return status;
220}
221
222/**
223 * ice_aq_get_link_topo_handle - get link topology node return status
224 * @pi: port information structure
225 * @node_type: requested node type
226 * @cd: pointer to command details structure or NULL
227 *
228 * Get link topology node return status for specified node type (0x06E0)
229 *
230 * Node type cage can be used to determine if cage is present. If AQC
231 * returns error (ENOENT), then no cage present. If no cage present, then
232 * connection type is backplane or BASE-T.
233 */
234static enum ice_status
235ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
236 struct ice_sq_cd *cd)
237{
238 struct ice_aqc_get_link_topo *cmd;
239 struct ice_aq_desc desc;
240
241 cmd = &desc.params.get_link_topo;
242
243 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
244
245 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
246 ICE_AQC_LINK_TOPO_NODE_CTX_S);
247
248 /* set node type */
249 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
250
251 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
252}
253
254/**
255 * ice_is_media_cage_present
256 * @pi: port information structure
257 *
258 * Returns true if media cage is present, else false. If no cage, then
259 * media type is backplane or BASE-T.
260 */
261static bool ice_is_media_cage_present(struct ice_port_info *pi)
262{
263 /* Node type cage can be used to determine if cage is present. If AQC
264 * returns error (ENOENT), then no cage present. If no cage present then
265 * connection type is backplane or BASE-T.
266 */
267 return !ice_aq_get_link_topo_handle(pi,
268 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
269 NULL);
270}
271
272/**
273 * ice_get_media_type - Gets media type
274 * @pi: port information structure
275 */
276static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
277{
278 struct ice_link_status *hw_link_info;
279
280 if (!pi)
281 return ICE_MEDIA_UNKNOWN;
282
283 hw_link_info = &pi->phy.link_info;
284 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
285 /* If more than one media type is selected, report unknown */
286 return ICE_MEDIA_UNKNOWN;
287
288 if (hw_link_info->phy_type_low) {
289 /* 1G SGMII is a special case where some DA cable PHYs
290 * may show this as an option when it really shouldn't
291 * be since SGMII is meant to be between a MAC and a PHY
292 * in a backplane. Try to detect this case and handle it
293 */
294 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
295 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
296 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
297 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
298 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
299 return ICE_MEDIA_DA;
300
301 switch (hw_link_info->phy_type_low) {
302 case ICE_PHY_TYPE_LOW_1000BASE_SX:
303 case ICE_PHY_TYPE_LOW_1000BASE_LX:
304 case ICE_PHY_TYPE_LOW_10GBASE_SR:
305 case ICE_PHY_TYPE_LOW_10GBASE_LR:
306 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
307 case ICE_PHY_TYPE_LOW_25GBASE_SR:
308 case ICE_PHY_TYPE_LOW_25GBASE_LR:
309 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
310 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
311 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
312 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
313 case ICE_PHY_TYPE_LOW_50GBASE_SR:
314 case ICE_PHY_TYPE_LOW_50GBASE_FR:
315 case ICE_PHY_TYPE_LOW_50GBASE_LR:
316 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
317 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
318 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
319 case ICE_PHY_TYPE_LOW_100GBASE_DR:
320 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
321 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
322 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
323 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
324 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
325 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
326 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
327 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
328 return ICE_MEDIA_FIBER;
329 case ICE_PHY_TYPE_LOW_100BASE_TX:
330 case ICE_PHY_TYPE_LOW_1000BASE_T:
331 case ICE_PHY_TYPE_LOW_2500BASE_T:
332 case ICE_PHY_TYPE_LOW_5GBASE_T:
333 case ICE_PHY_TYPE_LOW_10GBASE_T:
334 case ICE_PHY_TYPE_LOW_25GBASE_T:
335 return ICE_MEDIA_BASET;
336 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
337 case ICE_PHY_TYPE_LOW_25GBASE_CR:
338 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
339 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
340 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
341 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
342 case ICE_PHY_TYPE_LOW_50GBASE_CP:
343 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
344 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
345 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
346 return ICE_MEDIA_DA;
347 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
348 case ICE_PHY_TYPE_LOW_40G_XLAUI:
349 case ICE_PHY_TYPE_LOW_50G_LAUI2:
350 case ICE_PHY_TYPE_LOW_50G_AUI2:
351 case ICE_PHY_TYPE_LOW_50G_AUI1:
352 case ICE_PHY_TYPE_LOW_100G_AUI4:
353 case ICE_PHY_TYPE_LOW_100G_CAUI4:
354 if (ice_is_media_cage_present(pi))
355 return ICE_MEDIA_DA;
356 fallthrough;
357 case ICE_PHY_TYPE_LOW_1000BASE_KX:
358 case ICE_PHY_TYPE_LOW_2500BASE_KX:
359 case ICE_PHY_TYPE_LOW_2500BASE_X:
360 case ICE_PHY_TYPE_LOW_5GBASE_KR:
361 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
362 case ICE_PHY_TYPE_LOW_25GBASE_KR:
363 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
364 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
365 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
366 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
367 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
368 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
369 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
370 return ICE_MEDIA_BACKPLANE;
371 }
372 } else {
373 switch (hw_link_info->phy_type_high) {
374 case ICE_PHY_TYPE_HIGH_100G_AUI2:
375 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
376 if (ice_is_media_cage_present(pi))
377 return ICE_MEDIA_DA;
378 fallthrough;
379 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
380 return ICE_MEDIA_BACKPLANE;
381 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
382 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
383 return ICE_MEDIA_FIBER;
384 }
385 }
386 return ICE_MEDIA_UNKNOWN;
387}
388
389/**
390 * ice_aq_get_link_info
391 * @pi: port information structure
392 * @ena_lse: enable/disable LinkStatusEvent reporting
393 * @link: pointer to link status structure - optional
394 * @cd: pointer to command details structure or NULL
395 *
396 * Get Link Status (0x607). Returns the link status of the adapter.
397 */
398enum ice_status
399ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
400 struct ice_link_status *link, struct ice_sq_cd *cd)
401{
402 struct ice_aqc_get_link_status_data link_data = { 0 };
403 struct ice_aqc_get_link_status *resp;
404 struct ice_link_status *li_old, *li;
405 enum ice_media_type *hw_media_type;
406 struct ice_fc_info *hw_fc_info;
407 bool tx_pause, rx_pause;
408 struct ice_aq_desc desc;
409 enum ice_status status;
410 struct ice_hw *hw;
411 u16 cmd_flags;
412
413 if (!pi)
414 return ICE_ERR_PARAM;
415 hw = pi->hw;
416 li_old = &pi->phy.link_info_old;
417 hw_media_type = &pi->phy.media_type;
418 li = &pi->phy.link_info;
419 hw_fc_info = &pi->fc;
420
421 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
422 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
423 resp = &desc.params.get_link_status;
424 resp->cmd_flags = cpu_to_le16(cmd_flags);
425 resp->lport_num = pi->lport;
426
427 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
428
429 if (status)
430 return status;
431
432 /* save off old link status information */
433 *li_old = *li;
434
435 /* update current link status information */
436 li->link_speed = le16_to_cpu(link_data.link_speed);
437 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
438 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
439 *hw_media_type = ice_get_media_type(pi);
440 li->link_info = link_data.link_info;
441 li->link_cfg_err = link_data.link_cfg_err;
442 li->an_info = link_data.an_info;
443 li->ext_info = link_data.ext_info;
444 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
445 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
446 li->topo_media_conflict = link_data.topo_media_conflict;
447 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
448 ICE_AQ_CFG_PACING_TYPE_M);
449
450 /* update fc info */
451 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
452 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
453 if (tx_pause && rx_pause)
454 hw_fc_info->current_mode = ICE_FC_FULL;
455 else if (tx_pause)
456 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
457 else if (rx_pause)
458 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
459 else
460 hw_fc_info->current_mode = ICE_FC_NONE;
461
462 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
463
464 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
465 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
466 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
467 (unsigned long long)li->phy_type_low);
468 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
469 (unsigned long long)li->phy_type_high);
470 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
471 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
472 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
473 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
474 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
475 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
476 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
477 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
478 li->max_frame_size);
479 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
480
481 /* save link status information */
482 if (link)
483 *link = *li;
484
485 /* flag cleared so calling functions don't call AQ again */
486 pi->phy.get_link_info = false;
487
488 return 0;
489}
490
491/**
492 * ice_fill_tx_timer_and_fc_thresh
493 * @hw: pointer to the HW struct
494 * @cmd: pointer to MAC cfg structure
495 *
496 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
497 * descriptor
498 */
499static void
500ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
501 struct ice_aqc_set_mac_cfg *cmd)
502{
503 u16 fc_thres_val, tx_timer_val;
504 u32 val;
505
506 /* We read back the transmit timer and FC threshold value of
507 * LFC. Thus, we will use index =
508 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
509 *
510 * Also, because we are operating on transmit timer and FC
511 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
512 */
513#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
514
515 /* Retrieve the transmit timer */
516 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
517 tx_timer_val = val &
518 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
519 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
520
521 /* Retrieve the FC threshold */
522 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
523 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
524
525 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
526}
527
528/**
529 * ice_aq_set_mac_cfg
530 * @hw: pointer to the HW struct
531 * @max_frame_size: Maximum Frame Size to be supported
532 * @cd: pointer to command details structure or NULL
533 *
534 * Set MAC configuration (0x0603)
535 */
536enum ice_status
537ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
538{
539 struct ice_aqc_set_mac_cfg *cmd;
540 struct ice_aq_desc desc;
541
542 cmd = &desc.params.set_mac_cfg;
543
544 if (max_frame_size == 0)
545 return ICE_ERR_PARAM;
546
547 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
548
549 cmd->max_frame_size = cpu_to_le16(max_frame_size);
550
551 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
552
553 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
554}
555
556/**
557 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
558 * @hw: pointer to the HW struct
559 */
560static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
561{
562 struct ice_switch_info *sw;
563 enum ice_status status;
564
565 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
566 sizeof(*hw->switch_info), GFP_KERNEL);
567 sw = hw->switch_info;
568
569 if (!sw)
570 return ICE_ERR_NO_MEMORY;
571
572 INIT_LIST_HEAD(&sw->vsi_list_map_head);
573
574 status = ice_init_def_sw_recp(hw);
575 if (status) {
576 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
577 return status;
578 }
579 return 0;
580}
581
582/**
583 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
584 * @hw: pointer to the HW struct
585 */
586static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
587{
588 struct ice_switch_info *sw = hw->switch_info;
589 struct ice_vsi_list_map_info *v_pos_map;
590 struct ice_vsi_list_map_info *v_tmp_map;
591 struct ice_sw_recipe *recps;
592 u8 i;
593
594 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
595 list_entry) {
596 list_del(&v_pos_map->list_entry);
597 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
598 }
599 recps = hw->switch_info->recp_list;
600 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
601 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
602
603 recps[i].root_rid = i;
604 mutex_destroy(&recps[i].filt_rule_lock);
605 list_for_each_entry_safe(lst_itr, tmp_entry,
606 &recps[i].filt_rules, list_entry) {
607 list_del(&lst_itr->list_entry);
608 devm_kfree(ice_hw_to_dev(hw), lst_itr);
609 }
610 }
611 ice_rm_all_sw_replay_rule_info(hw);
612 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
613 devm_kfree(ice_hw_to_dev(hw), sw);
614}
615
616/**
617 * ice_get_fw_log_cfg - get FW logging configuration
618 * @hw: pointer to the HW struct
619 */
620static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
621{
622 struct ice_aq_desc desc;
623 enum ice_status status;
624 __le16 *config;
625 u16 size;
626
627 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
628 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
629 if (!config)
630 return ICE_ERR_NO_MEMORY;
631
632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
633
634 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
635 if (!status) {
636 u16 i;
637
638 /* Save FW logging information into the HW structure */
639 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
640 u16 v, m, flgs;
641
642 v = le16_to_cpu(config[i]);
643 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
644 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
645
646 if (m < ICE_AQC_FW_LOG_ID_MAX)
647 hw->fw_log.evnts[m].cur = flgs;
648 }
649 }
650
651 devm_kfree(ice_hw_to_dev(hw), config);
652
653 return status;
654}
655
656/**
657 * ice_cfg_fw_log - configure FW logging
658 * @hw: pointer to the HW struct
659 * @enable: enable certain FW logging events if true, disable all if false
660 *
661 * This function enables/disables the FW logging via Rx CQ events and a UART
662 * port based on predetermined configurations. FW logging via the Rx CQ can be
663 * enabled/disabled for individual PF's. However, FW logging via the UART can
664 * only be enabled/disabled for all PFs on the same device.
665 *
666 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
667 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
668 * before initializing the device.
669 *
670 * When re/configuring FW logging, callers need to update the "cfg" elements of
671 * the hw->fw_log.evnts array with the desired logging event configurations for
672 * modules of interest. When disabling FW logging completely, the callers can
673 * just pass false in the "enable" parameter. On completion, the function will
674 * update the "cur" element of the hw->fw_log.evnts array with the resulting
675 * logging event configurations of the modules that are being re/configured. FW
676 * logging modules that are not part of a reconfiguration operation retain their
677 * previous states.
678 *
679 * Before resetting the device, it is recommended that the driver disables FW
680 * logging before shutting down the control queue. When disabling FW logging
681 * ("enable" = false), the latest configurations of FW logging events stored in
682 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
683 * a device reset.
684 *
685 * When enabling FW logging to emit log messages via the Rx CQ during the
686 * device's initialization phase, a mechanism alternative to interrupt handlers
687 * needs to be used to extract FW log messages from the Rx CQ periodically and
688 * to prevent the Rx CQ from being full and stalling other types of control
689 * messages from FW to SW. Interrupts are typically disabled during the device's
690 * initialization phase.
691 */
692static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
693{
694 struct ice_aqc_fw_logging *cmd;
695 enum ice_status status = 0;
696 u16 i, chgs = 0, len = 0;
697 struct ice_aq_desc desc;
698 __le16 *data = NULL;
699 u8 actv_evnts = 0;
700 void *buf = NULL;
701
702 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
703 return 0;
704
705 /* Disable FW logging only when the control queue is still responsive */
706 if (!enable &&
707 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
708 return 0;
709
710 /* Get current FW log settings */
711 status = ice_get_fw_log_cfg(hw);
712 if (status)
713 return status;
714
715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
716 cmd = &desc.params.fw_logging;
717
718 /* Indicate which controls are valid */
719 if (hw->fw_log.cq_en)
720 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
721
722 if (hw->fw_log.uart_en)
723 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
724
725 if (enable) {
726 /* Fill in an array of entries with FW logging modules and
727 * logging events being reconfigured.
728 */
729 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
730 u16 val;
731
732 /* Keep track of enabled event types */
733 actv_evnts |= hw->fw_log.evnts[i].cfg;
734
735 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
736 continue;
737
738 if (!data) {
739 data = devm_kcalloc(ice_hw_to_dev(hw),
740 ICE_AQC_FW_LOG_ID_MAX,
741 sizeof(*data),
742 GFP_KERNEL);
743 if (!data)
744 return ICE_ERR_NO_MEMORY;
745 }
746
747 val = i << ICE_AQC_FW_LOG_ID_S;
748 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
749 data[chgs++] = cpu_to_le16(val);
750 }
751
752 /* Only enable FW logging if at least one module is specified.
753 * If FW logging is currently enabled but all modules are not
754 * enabled to emit log messages, disable FW logging altogether.
755 */
756 if (actv_evnts) {
757 /* Leave if there is effectively no change */
758 if (!chgs)
759 goto out;
760
761 if (hw->fw_log.cq_en)
762 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
763
764 if (hw->fw_log.uart_en)
765 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
766
767 buf = data;
768 len = sizeof(*data) * chgs;
769 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
770 }
771 }
772
773 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
774 if (!status) {
775 /* Update the current configuration to reflect events enabled.
776 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
777 * logging mode is enabled for the device. They do not reflect
778 * actual modules being enabled to emit log messages. So, their
779 * values remain unchanged even when all modules are disabled.
780 */
781 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
782
783 hw->fw_log.actv_evnts = actv_evnts;
784 for (i = 0; i < cnt; i++) {
785 u16 v, m;
786
787 if (!enable) {
788 /* When disabling all FW logging events as part
789 * of device's de-initialization, the original
790 * configurations are retained, and can be used
791 * to reconfigure FW logging later if the device
792 * is re-initialized.
793 */
794 hw->fw_log.evnts[i].cur = 0;
795 continue;
796 }
797
798 v = le16_to_cpu(data[i]);
799 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
800 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
801 }
802 }
803
804out:
805 if (data)
806 devm_kfree(ice_hw_to_dev(hw), data);
807
808 return status;
809}
810
811/**
812 * ice_output_fw_log
813 * @hw: pointer to the HW struct
814 * @desc: pointer to the AQ message descriptor
815 * @buf: pointer to the buffer accompanying the AQ message
816 *
817 * Formats a FW Log message and outputs it via the standard driver logs.
818 */
819void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
820{
821 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
822 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
823 le16_to_cpu(desc->datalen));
824 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
825}
826
827/**
828 * ice_get_itr_intrl_gran
829 * @hw: pointer to the HW struct
830 *
831 * Determines the ITR/INTRL granularities based on the maximum aggregate
832 * bandwidth according to the device's configuration during power-on.
833 */
834static void ice_get_itr_intrl_gran(struct ice_hw *hw)
835{
836 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
837 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
838 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
839
840 switch (max_agg_bw) {
841 case ICE_MAX_AGG_BW_200G:
842 case ICE_MAX_AGG_BW_100G:
843 case ICE_MAX_AGG_BW_50G:
844 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
845 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
846 break;
847 case ICE_MAX_AGG_BW_25G:
848 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
849 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
850 break;
851 }
852}
853
854/**
855 * ice_init_hw - main hardware initialization routine
856 * @hw: pointer to the hardware structure
857 */
858enum ice_status ice_init_hw(struct ice_hw *hw)
859{
860 struct ice_aqc_get_phy_caps_data *pcaps;
861 enum ice_status status;
862 u16 mac_buf_len;
863 void *mac_buf;
864
865 /* Set MAC type based on DeviceID */
866 status = ice_set_mac_type(hw);
867 if (status)
868 return status;
869
870 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
871 PF_FUNC_RID_FUNC_NUM_M) >>
872 PF_FUNC_RID_FUNC_NUM_S;
873
874 status = ice_reset(hw, ICE_RESET_PFR);
875 if (status)
876 return status;
877
878 ice_get_itr_intrl_gran(hw);
879
880 status = ice_create_all_ctrlq(hw);
881 if (status)
882 goto err_unroll_cqinit;
883
884 /* Enable FW logging. Not fatal if this fails. */
885 status = ice_cfg_fw_log(hw, true);
886 if (status)
887 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
888
889 status = ice_clear_pf_cfg(hw);
890 if (status)
891 goto err_unroll_cqinit;
892
893 /* Set bit to enable Flow Director filters */
894 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
895 INIT_LIST_HEAD(&hw->fdir_list_head);
896
897 ice_clear_pxe_mode(hw);
898
899 status = ice_init_nvm(hw);
900 if (status)
901 goto err_unroll_cqinit;
902
903 status = ice_get_caps(hw);
904 if (status)
905 goto err_unroll_cqinit;
906
907 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
908 sizeof(*hw->port_info), GFP_KERNEL);
909 if (!hw->port_info) {
910 status = ICE_ERR_NO_MEMORY;
911 goto err_unroll_cqinit;
912 }
913
914 /* set the back pointer to HW */
915 hw->port_info->hw = hw;
916
917 /* Initialize port_info struct with switch configuration data */
918 status = ice_get_initial_sw_cfg(hw);
919 if (status)
920 goto err_unroll_alloc;
921
922 hw->evb_veb = true;
923
924 /* Query the allocated resources for Tx scheduler */
925 status = ice_sched_query_res_alloc(hw);
926 if (status) {
927 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
928 goto err_unroll_alloc;
929 }
930 ice_sched_get_psm_clk_freq(hw);
931
932 /* Initialize port_info struct with scheduler data */
933 status = ice_sched_init_port(hw->port_info);
934 if (status)
935 goto err_unroll_sched;
936
937 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
938 if (!pcaps) {
939 status = ICE_ERR_NO_MEMORY;
940 goto err_unroll_sched;
941 }
942
943 /* Initialize port_info struct with PHY capabilities */
944 status = ice_aq_get_phy_caps(hw->port_info, false,
945 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
946 NULL);
947 devm_kfree(ice_hw_to_dev(hw), pcaps);
948 if (status)
949 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
950 status);
951
952 /* Initialize port_info struct with link information */
953 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
954 if (status)
955 goto err_unroll_sched;
956
957 /* need a valid SW entry point to build a Tx tree */
958 if (!hw->sw_entry_point_layer) {
959 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
960 status = ICE_ERR_CFG;
961 goto err_unroll_sched;
962 }
963 INIT_LIST_HEAD(&hw->agg_list);
964 /* Initialize max burst size */
965 if (!hw->max_burst_size)
966 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
967
968 status = ice_init_fltr_mgmt_struct(hw);
969 if (status)
970 goto err_unroll_sched;
971
972 /* Get MAC information */
973 /* A single port can report up to two (LAN and WoL) addresses */
974 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
975 sizeof(struct ice_aqc_manage_mac_read_resp),
976 GFP_KERNEL);
977 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
978
979 if (!mac_buf) {
980 status = ICE_ERR_NO_MEMORY;
981 goto err_unroll_fltr_mgmt_struct;
982 }
983
984 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
985 devm_kfree(ice_hw_to_dev(hw), mac_buf);
986
987 if (status)
988 goto err_unroll_fltr_mgmt_struct;
989 /* enable jumbo frame support at MAC level */
990 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
991 if (status)
992 goto err_unroll_fltr_mgmt_struct;
993 /* Obtain counter base index which would be used by flow director */
994 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
995 if (status)
996 goto err_unroll_fltr_mgmt_struct;
997 status = ice_init_hw_tbls(hw);
998 if (status)
999 goto err_unroll_fltr_mgmt_struct;
1000 mutex_init(&hw->tnl_lock);
1001 return 0;
1002
1003err_unroll_fltr_mgmt_struct:
1004 ice_cleanup_fltr_mgmt_struct(hw);
1005err_unroll_sched:
1006 ice_sched_cleanup_all(hw);
1007err_unroll_alloc:
1008 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1009err_unroll_cqinit:
1010 ice_destroy_all_ctrlq(hw);
1011 return status;
1012}
1013
1014/**
1015 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1016 * @hw: pointer to the hardware structure
1017 *
1018 * This should be called only during nominal operation, not as a result of
1019 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1020 * applicable initializations if it fails for any reason.
1021 */
1022void ice_deinit_hw(struct ice_hw *hw)
1023{
1024 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1025 ice_cleanup_fltr_mgmt_struct(hw);
1026
1027 ice_sched_cleanup_all(hw);
1028 ice_sched_clear_agg(hw);
1029 ice_free_seg(hw);
1030 ice_free_hw_tbls(hw);
1031 mutex_destroy(&hw->tnl_lock);
1032
1033 if (hw->port_info) {
1034 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1035 hw->port_info = NULL;
1036 }
1037
1038 /* Attempt to disable FW logging before shutting down control queues */
1039 ice_cfg_fw_log(hw, false);
1040 ice_destroy_all_ctrlq(hw);
1041
1042 /* Clear VSI contexts if not already cleared */
1043 ice_clear_all_vsi_ctx(hw);
1044}
1045
1046/**
1047 * ice_check_reset - Check to see if a global reset is complete
1048 * @hw: pointer to the hardware structure
1049 */
1050enum ice_status ice_check_reset(struct ice_hw *hw)
1051{
1052 u32 cnt, reg = 0, grst_timeout, uld_mask;
1053
1054 /* Poll for Device Active state in case a recent CORER, GLOBR,
1055 * or EMPR has occurred. The grst delay value is in 100ms units.
1056 * Add 1sec for outstanding AQ commands that can take a long time.
1057 */
1058 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1059 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1060
1061 for (cnt = 0; cnt < grst_timeout; cnt++) {
1062 mdelay(100);
1063 reg = rd32(hw, GLGEN_RSTAT);
1064 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1065 break;
1066 }
1067
1068 if (cnt == grst_timeout) {
1069 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1070 return ICE_ERR_RESET_FAILED;
1071 }
1072
1073#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1074 GLNVM_ULD_PCIER_DONE_1_M |\
1075 GLNVM_ULD_CORER_DONE_M |\
1076 GLNVM_ULD_GLOBR_DONE_M |\
1077 GLNVM_ULD_POR_DONE_M |\
1078 GLNVM_ULD_POR_DONE_1_M |\
1079 GLNVM_ULD_PCIER_DONE_2_M)
1080
1081 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1082 GLNVM_ULD_PE_DONE_M : 0);
1083
1084 /* Device is Active; check Global Reset processes are done */
1085 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1086 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1087 if (reg == uld_mask) {
1088 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1089 break;
1090 }
1091 mdelay(10);
1092 }
1093
1094 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1095 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1096 reg);
1097 return ICE_ERR_RESET_FAILED;
1098 }
1099
1100 return 0;
1101}
1102
1103/**
1104 * ice_pf_reset - Reset the PF
1105 * @hw: pointer to the hardware structure
1106 *
1107 * If a global reset has been triggered, this function checks
1108 * for its completion and then issues the PF reset
1109 */
1110static enum ice_status ice_pf_reset(struct ice_hw *hw)
1111{
1112 u32 cnt, reg;
1113
1114 /* If at function entry a global reset was already in progress, i.e.
1115 * state is not 'device active' or any of the reset done bits are not
1116 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1117 * global reset is done.
1118 */
1119 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1120 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1121 /* poll on global reset currently in progress until done */
1122 if (ice_check_reset(hw))
1123 return ICE_ERR_RESET_FAILED;
1124
1125 return 0;
1126 }
1127
1128 /* Reset the PF */
1129 reg = rd32(hw, PFGEN_CTRL);
1130
1131 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1132
1133 /* Wait for the PFR to complete. The wait time is the global config lock
1134 * timeout plus the PFR timeout which will account for a possible reset
1135 * that is occurring during a download package operation.
1136 */
1137 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1138 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1139 reg = rd32(hw, PFGEN_CTRL);
1140 if (!(reg & PFGEN_CTRL_PFSWR_M))
1141 break;
1142
1143 mdelay(1);
1144 }
1145
1146 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1147 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1148 return ICE_ERR_RESET_FAILED;
1149 }
1150
1151 return 0;
1152}
1153
1154/**
1155 * ice_reset - Perform different types of reset
1156 * @hw: pointer to the hardware structure
1157 * @req: reset request
1158 *
1159 * This function triggers a reset as specified by the req parameter.
1160 *
1161 * Note:
1162 * If anything other than a PF reset is triggered, PXE mode is restored.
1163 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1164 * interface has been restored in the rebuild flow.
1165 */
1166enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1167{
1168 u32 val = 0;
1169
1170 switch (req) {
1171 case ICE_RESET_PFR:
1172 return ice_pf_reset(hw);
1173 case ICE_RESET_CORER:
1174 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1175 val = GLGEN_RTRIG_CORER_M;
1176 break;
1177 case ICE_RESET_GLOBR:
1178 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1179 val = GLGEN_RTRIG_GLOBR_M;
1180 break;
1181 default:
1182 return ICE_ERR_PARAM;
1183 }
1184
1185 val |= rd32(hw, GLGEN_RTRIG);
1186 wr32(hw, GLGEN_RTRIG, val);
1187 ice_flush(hw);
1188
1189 /* wait for the FW to be ready */
1190 return ice_check_reset(hw);
1191}
1192
1193/**
1194 * ice_copy_rxq_ctx_to_hw
1195 * @hw: pointer to the hardware structure
1196 * @ice_rxq_ctx: pointer to the rxq context
1197 * @rxq_index: the index of the Rx queue
1198 *
1199 * Copies rxq context from dense structure to HW register space
1200 */
1201static enum ice_status
1202ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1203{
1204 u8 i;
1205
1206 if (!ice_rxq_ctx)
1207 return ICE_ERR_BAD_PTR;
1208
1209 if (rxq_index > QRX_CTRL_MAX_INDEX)
1210 return ICE_ERR_PARAM;
1211
1212 /* Copy each dword separately to HW */
1213 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1214 wr32(hw, QRX_CONTEXT(i, rxq_index),
1215 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1216
1217 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1218 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1219 }
1220
1221 return 0;
1222}
1223
1224/* LAN Rx Queue Context */
1225static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1226 /* Field Width LSB */
1227 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1228 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1229 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1230 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1231 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1232 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1233 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1234 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1235 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1236 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1237 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1238 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1239 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1240 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1241 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1242 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1243 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1244 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1245 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1246 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1247 { 0 }
1248};
1249
1250/**
1251 * ice_write_rxq_ctx
1252 * @hw: pointer to the hardware structure
1253 * @rlan_ctx: pointer to the rxq context
1254 * @rxq_index: the index of the Rx queue
1255 *
1256 * Converts rxq context from sparse to dense structure and then writes
1257 * it to HW register space and enables the hardware to prefetch descriptors
1258 * instead of only fetching them on demand
1259 */
1260enum ice_status
1261ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1262 u32 rxq_index)
1263{
1264 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1265
1266 if (!rlan_ctx)
1267 return ICE_ERR_BAD_PTR;
1268
1269 rlan_ctx->prefena = 1;
1270
1271 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1272 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1273}
1274
1275/* LAN Tx Queue Context */
1276const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1277 /* Field Width LSB */
1278 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1279 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1280 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1281 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1282 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1283 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1284 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1285 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1286 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1287 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1288 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1289 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1290 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1291 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1292 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1293 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1294 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1295 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1296 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1297 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1298 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1299 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1300 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1301 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1302 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1303 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1304 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1305 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1306 { 0 }
1307};
1308
1309/* Sideband Queue command wrappers */
1310
1311/**
1312 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1313 * @hw: pointer to the HW struct
1314 * @desc: descriptor describing the command
1315 * @buf: buffer to use for indirect commands (NULL for direct commands)
1316 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1317 * @cd: pointer to command details structure
1318 */
1319static int
1320ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1321 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1322{
1323 return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
1324 (struct ice_aq_desc *)desc,
1325 buf, buf_size, cd));
1326}
1327
1328/**
1329 * ice_sbq_rw_reg - Fill Sideband Queue command
1330 * @hw: pointer to the HW struct
1331 * @in: message info to be filled in descriptor
1332 */
1333int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1334{
1335 struct ice_sbq_cmd_desc desc = {0};
1336 struct ice_sbq_msg_req msg = {0};
1337 u16 msg_len;
1338 int status;
1339
1340 msg_len = sizeof(msg);
1341
1342 msg.dest_dev = in->dest_dev;
1343 msg.opcode = in->opcode;
1344 msg.flags = ICE_SBQ_MSG_FLAGS;
1345 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1346 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1347 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1348
1349 if (in->opcode)
1350 msg.data = cpu_to_le32(in->data);
1351 else
1352 /* data read comes back in completion, so shorten the struct by
1353 * sizeof(msg.data)
1354 */
1355 msg_len -= sizeof(msg.data);
1356
1357 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1358 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1359 desc.param0.cmd_len = cpu_to_le16(msg_len);
1360 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1361 if (!status && !in->opcode)
1362 in->data = le32_to_cpu
1363 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1364 return status;
1365}
1366
1367/* FW Admin Queue command wrappers */
1368
1369/* Software lock/mutex that is meant to be held while the Global Config Lock
1370 * in firmware is acquired by the software to prevent most (but not all) types
1371 * of AQ commands from being sent to FW
1372 */
1373DEFINE_MUTEX(ice_global_cfg_lock_sw);
1374
1375/**
1376 * ice_should_retry_sq_send_cmd
1377 * @opcode: AQ opcode
1378 *
1379 * Decide if we should retry the send command routine for the ATQ, depending
1380 * on the opcode.
1381 */
1382static bool ice_should_retry_sq_send_cmd(u16 opcode)
1383{
1384 switch (opcode) {
1385 case ice_aqc_opc_get_link_topo:
1386 case ice_aqc_opc_lldp_stop:
1387 case ice_aqc_opc_lldp_start:
1388 case ice_aqc_opc_lldp_filter_ctrl:
1389 return true;
1390 }
1391
1392 return false;
1393}
1394
1395/**
1396 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1397 * @hw: pointer to the HW struct
1398 * @cq: pointer to the specific Control queue
1399 * @desc: prefilled descriptor describing the command
1400 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1401 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1402 * @cd: pointer to command details structure
1403 *
1404 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1405 * Queue if the EBUSY AQ error is returned.
1406 */
1407static enum ice_status
1408ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1409 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1410 struct ice_sq_cd *cd)
1411{
1412 struct ice_aq_desc desc_cpy;
1413 enum ice_status status;
1414 bool is_cmd_for_retry;
1415 u8 *buf_cpy = NULL;
1416 u8 idx = 0;
1417 u16 opcode;
1418
1419 opcode = le16_to_cpu(desc->opcode);
1420 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1421 memset(&desc_cpy, 0, sizeof(desc_cpy));
1422
1423 if (is_cmd_for_retry) {
1424 if (buf) {
1425 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1426 if (!buf_cpy)
1427 return ICE_ERR_NO_MEMORY;
1428 }
1429
1430 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1431 }
1432
1433 do {
1434 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1435
1436 if (!is_cmd_for_retry || !status ||
1437 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1438 break;
1439
1440 if (buf_cpy)
1441 memcpy(buf, buf_cpy, buf_size);
1442
1443 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1444
1445 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1446
1447 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1448
1449 kfree(buf_cpy);
1450
1451 return status;
1452}
1453
1454/**
1455 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1456 * @hw: pointer to the HW struct
1457 * @desc: descriptor describing the command
1458 * @buf: buffer to use for indirect commands (NULL for direct commands)
1459 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1460 * @cd: pointer to command details structure
1461 *
1462 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1463 */
1464enum ice_status
1465ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1466 u16 buf_size, struct ice_sq_cd *cd)
1467{
1468 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1469 bool lock_acquired = false;
1470 enum ice_status status;
1471
1472 /* When a package download is in process (i.e. when the firmware's
1473 * Global Configuration Lock resource is held), only the Download
1474 * Package, Get Version, Get Package Info List and Release Resource
1475 * (with resource ID set to Global Config Lock) AdminQ commands are
1476 * allowed; all others must block until the package download completes
1477 * and the Global Config Lock is released. See also
1478 * ice_acquire_global_cfg_lock().
1479 */
1480 switch (le16_to_cpu(desc->opcode)) {
1481 case ice_aqc_opc_download_pkg:
1482 case ice_aqc_opc_get_pkg_info_list:
1483 case ice_aqc_opc_get_ver:
1484 break;
1485 case ice_aqc_opc_release_res:
1486 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1487 break;
1488 fallthrough;
1489 default:
1490 mutex_lock(&ice_global_cfg_lock_sw);
1491 lock_acquired = true;
1492 break;
1493 }
1494
1495 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1496 if (lock_acquired)
1497 mutex_unlock(&ice_global_cfg_lock_sw);
1498
1499 return status;
1500}
1501
1502/**
1503 * ice_aq_get_fw_ver
1504 * @hw: pointer to the HW struct
1505 * @cd: pointer to command details structure or NULL
1506 *
1507 * Get the firmware version (0x0001) from the admin queue commands
1508 */
1509enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1510{
1511 struct ice_aqc_get_ver *resp;
1512 struct ice_aq_desc desc;
1513 enum ice_status status;
1514
1515 resp = &desc.params.get_ver;
1516
1517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1518
1519 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1520
1521 if (!status) {
1522 hw->fw_branch = resp->fw_branch;
1523 hw->fw_maj_ver = resp->fw_major;
1524 hw->fw_min_ver = resp->fw_minor;
1525 hw->fw_patch = resp->fw_patch;
1526 hw->fw_build = le32_to_cpu(resp->fw_build);
1527 hw->api_branch = resp->api_branch;
1528 hw->api_maj_ver = resp->api_major;
1529 hw->api_min_ver = resp->api_minor;
1530 hw->api_patch = resp->api_patch;
1531 }
1532
1533 return status;
1534}
1535
1536/**
1537 * ice_aq_send_driver_ver
1538 * @hw: pointer to the HW struct
1539 * @dv: driver's major, minor version
1540 * @cd: pointer to command details structure or NULL
1541 *
1542 * Send the driver version (0x0002) to the firmware
1543 */
1544enum ice_status
1545ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1546 struct ice_sq_cd *cd)
1547{
1548 struct ice_aqc_driver_ver *cmd;
1549 struct ice_aq_desc desc;
1550 u16 len;
1551
1552 cmd = &desc.params.driver_ver;
1553
1554 if (!dv)
1555 return ICE_ERR_PARAM;
1556
1557 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1558
1559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1560 cmd->major_ver = dv->major_ver;
1561 cmd->minor_ver = dv->minor_ver;
1562 cmd->build_ver = dv->build_ver;
1563 cmd->subbuild_ver = dv->subbuild_ver;
1564
1565 len = 0;
1566 while (len < sizeof(dv->driver_string) &&
1567 isascii(dv->driver_string[len]) && dv->driver_string[len])
1568 len++;
1569
1570 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1571}
1572
1573/**
1574 * ice_aq_q_shutdown
1575 * @hw: pointer to the HW struct
1576 * @unloading: is the driver unloading itself
1577 *
1578 * Tell the Firmware that we're shutting down the AdminQ and whether
1579 * or not the driver is unloading as well (0x0003).
1580 */
1581enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1582{
1583 struct ice_aqc_q_shutdown *cmd;
1584 struct ice_aq_desc desc;
1585
1586 cmd = &desc.params.q_shutdown;
1587
1588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1589
1590 if (unloading)
1591 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1592
1593 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1594}
1595
1596/**
1597 * ice_aq_req_res
1598 * @hw: pointer to the HW struct
1599 * @res: resource ID
1600 * @access: access type
1601 * @sdp_number: resource number
1602 * @timeout: the maximum time in ms that the driver may hold the resource
1603 * @cd: pointer to command details structure or NULL
1604 *
1605 * Requests common resource using the admin queue commands (0x0008).
1606 * When attempting to acquire the Global Config Lock, the driver can
1607 * learn of three states:
1608 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1609 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1610 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1611 * successfully downloaded the package; the driver does
1612 * not have to download the package and can continue
1613 * loading
1614 *
1615 * Note that if the caller is in an acquire lock, perform action, release lock
1616 * phase of operation, it is possible that the FW may detect a timeout and issue
1617 * a CORER. In this case, the driver will receive a CORER interrupt and will
1618 * have to determine its cause. The calling thread that is handling this flow
1619 * will likely get an error propagated back to it indicating the Download
1620 * Package, Update Package or the Release Resource AQ commands timed out.
1621 */
1622static enum ice_status
1623ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1624 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1625 struct ice_sq_cd *cd)
1626{
1627 struct ice_aqc_req_res *cmd_resp;
1628 struct ice_aq_desc desc;
1629 enum ice_status status;
1630
1631 cmd_resp = &desc.params.res_owner;
1632
1633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1634
1635 cmd_resp->res_id = cpu_to_le16(res);
1636 cmd_resp->access_type = cpu_to_le16(access);
1637 cmd_resp->res_number = cpu_to_le32(sdp_number);
1638 cmd_resp->timeout = cpu_to_le32(*timeout);
1639 *timeout = 0;
1640
1641 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1642
1643 /* The completion specifies the maximum time in ms that the driver
1644 * may hold the resource in the Timeout field.
1645 */
1646
1647 /* Global config lock response utilizes an additional status field.
1648 *
1649 * If the Global config lock resource is held by some other driver, the
1650 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1651 * and the timeout field indicates the maximum time the current owner
1652 * of the resource has to free it.
1653 */
1654 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1655 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1656 *timeout = le32_to_cpu(cmd_resp->timeout);
1657 return 0;
1658 } else if (le16_to_cpu(cmd_resp->status) ==
1659 ICE_AQ_RES_GLBL_IN_PROG) {
1660 *timeout = le32_to_cpu(cmd_resp->timeout);
1661 return ICE_ERR_AQ_ERROR;
1662 } else if (le16_to_cpu(cmd_resp->status) ==
1663 ICE_AQ_RES_GLBL_DONE) {
1664 return ICE_ERR_AQ_NO_WORK;
1665 }
1666
1667 /* invalid FW response, force a timeout immediately */
1668 *timeout = 0;
1669 return ICE_ERR_AQ_ERROR;
1670 }
1671
1672 /* If the resource is held by some other driver, the command completes
1673 * with a busy return value and the timeout field indicates the maximum
1674 * time the current owner of the resource has to free it.
1675 */
1676 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1677 *timeout = le32_to_cpu(cmd_resp->timeout);
1678
1679 return status;
1680}
1681
1682/**
1683 * ice_aq_release_res
1684 * @hw: pointer to the HW struct
1685 * @res: resource ID
1686 * @sdp_number: resource number
1687 * @cd: pointer to command details structure or NULL
1688 *
1689 * release common resource using the admin queue commands (0x0009)
1690 */
1691static enum ice_status
1692ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1693 struct ice_sq_cd *cd)
1694{
1695 struct ice_aqc_req_res *cmd;
1696 struct ice_aq_desc desc;
1697
1698 cmd = &desc.params.res_owner;
1699
1700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1701
1702 cmd->res_id = cpu_to_le16(res);
1703 cmd->res_number = cpu_to_le32(sdp_number);
1704
1705 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1706}
1707
1708/**
1709 * ice_acquire_res
1710 * @hw: pointer to the HW structure
1711 * @res: resource ID
1712 * @access: access type (read or write)
1713 * @timeout: timeout in milliseconds
1714 *
1715 * This function will attempt to acquire the ownership of a resource.
1716 */
1717enum ice_status
1718ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1719 enum ice_aq_res_access_type access, u32 timeout)
1720{
1721#define ICE_RES_POLLING_DELAY_MS 10
1722 u32 delay = ICE_RES_POLLING_DELAY_MS;
1723 u32 time_left = timeout;
1724 enum ice_status status;
1725
1726 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1727
1728 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1729 * previously acquired the resource and performed any necessary updates;
1730 * in this case the caller does not obtain the resource and has no
1731 * further work to do.
1732 */
1733 if (status == ICE_ERR_AQ_NO_WORK)
1734 goto ice_acquire_res_exit;
1735
1736 if (status)
1737 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1738
1739 /* If necessary, poll until the current lock owner timeouts */
1740 timeout = time_left;
1741 while (status && timeout && time_left) {
1742 mdelay(delay);
1743 timeout = (timeout > delay) ? timeout - delay : 0;
1744 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1745
1746 if (status == ICE_ERR_AQ_NO_WORK)
1747 /* lock free, but no work to do */
1748 break;
1749
1750 if (!status)
1751 /* lock acquired */
1752 break;
1753 }
1754 if (status && status != ICE_ERR_AQ_NO_WORK)
1755 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1756
1757ice_acquire_res_exit:
1758 if (status == ICE_ERR_AQ_NO_WORK) {
1759 if (access == ICE_RES_WRITE)
1760 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1761 else
1762 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1763 }
1764 return status;
1765}
1766
1767/**
1768 * ice_release_res
1769 * @hw: pointer to the HW structure
1770 * @res: resource ID
1771 *
1772 * This function will release a resource using the proper Admin Command.
1773 */
1774void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1775{
1776 enum ice_status status;
1777 u32 total_delay = 0;
1778
1779 status = ice_aq_release_res(hw, res, 0, NULL);
1780
1781 /* there are some rare cases when trying to release the resource
1782 * results in an admin queue timeout, so handle them correctly
1783 */
1784 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1785 (total_delay < hw->adminq.sq_cmd_timeout)) {
1786 mdelay(1);
1787 status = ice_aq_release_res(hw, res, 0, NULL);
1788 total_delay++;
1789 }
1790}
1791
1792/**
1793 * ice_aq_alloc_free_res - command to allocate/free resources
1794 * @hw: pointer to the HW struct
1795 * @num_entries: number of resource entries in buffer
1796 * @buf: Indirect buffer to hold data parameters and response
1797 * @buf_size: size of buffer for indirect commands
1798 * @opc: pass in the command opcode
1799 * @cd: pointer to command details structure or NULL
1800 *
1801 * Helper function to allocate/free resources using the admin queue commands
1802 */
1803enum ice_status
1804ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1805 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1806 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1807{
1808 struct ice_aqc_alloc_free_res_cmd *cmd;
1809 struct ice_aq_desc desc;
1810
1811 cmd = &desc.params.sw_res_ctrl;
1812
1813 if (!buf)
1814 return ICE_ERR_PARAM;
1815
1816 if (buf_size < flex_array_size(buf, elem, num_entries))
1817 return ICE_ERR_PARAM;
1818
1819 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1820
1821 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1822
1823 cmd->num_entries = cpu_to_le16(num_entries);
1824
1825 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1826}
1827
1828/**
1829 * ice_alloc_hw_res - allocate resource
1830 * @hw: pointer to the HW struct
1831 * @type: type of resource
1832 * @num: number of resources to allocate
1833 * @btm: allocate from bottom
1834 * @res: pointer to array that will receive the resources
1835 */
1836enum ice_status
1837ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1838{
1839 struct ice_aqc_alloc_free_res_elem *buf;
1840 enum ice_status status;
1841 u16 buf_len;
1842
1843 buf_len = struct_size(buf, elem, num);
1844 buf = kzalloc(buf_len, GFP_KERNEL);
1845 if (!buf)
1846 return ICE_ERR_NO_MEMORY;
1847
1848 /* Prepare buffer to allocate resource. */
1849 buf->num_elems = cpu_to_le16(num);
1850 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1851 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1852 if (btm)
1853 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1854
1855 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1856 ice_aqc_opc_alloc_res, NULL);
1857 if (status)
1858 goto ice_alloc_res_exit;
1859
1860 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1861
1862ice_alloc_res_exit:
1863 kfree(buf);
1864 return status;
1865}
1866
1867/**
1868 * ice_free_hw_res - free allocated HW resource
1869 * @hw: pointer to the HW struct
1870 * @type: type of resource to free
1871 * @num: number of resources
1872 * @res: pointer to array that contains the resources to free
1873 */
1874enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1875{
1876 struct ice_aqc_alloc_free_res_elem *buf;
1877 enum ice_status status;
1878 u16 buf_len;
1879
1880 buf_len = struct_size(buf, elem, num);
1881 buf = kzalloc(buf_len, GFP_KERNEL);
1882 if (!buf)
1883 return ICE_ERR_NO_MEMORY;
1884
1885 /* Prepare buffer to free resource. */
1886 buf->num_elems = cpu_to_le16(num);
1887 buf->res_type = cpu_to_le16(type);
1888 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1889
1890 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1891 ice_aqc_opc_free_res, NULL);
1892 if (status)
1893 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1894
1895 kfree(buf);
1896 return status;
1897}
1898
1899/**
1900 * ice_get_num_per_func - determine number of resources per PF
1901 * @hw: pointer to the HW structure
1902 * @max: value to be evenly split between each PF
1903 *
1904 * Determine the number of valid functions by going through the bitmap returned
1905 * from parsing capabilities and use this to calculate the number of resources
1906 * per PF based on the max value passed in.
1907 */
1908static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1909{
1910 u8 funcs;
1911
1912#define ICE_CAPS_VALID_FUNCS_M 0xFF
1913 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1914 ICE_CAPS_VALID_FUNCS_M);
1915
1916 if (!funcs)
1917 return 0;
1918
1919 return max / funcs;
1920}
1921
1922/**
1923 * ice_parse_common_caps - parse common device/function capabilities
1924 * @hw: pointer to the HW struct
1925 * @caps: pointer to common capabilities structure
1926 * @elem: the capability element to parse
1927 * @prefix: message prefix for tracing capabilities
1928 *
1929 * Given a capability element, extract relevant details into the common
1930 * capability structure.
1931 *
1932 * Returns: true if the capability matches one of the common capability ids,
1933 * false otherwise.
1934 */
1935static bool
1936ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1937 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1938{
1939 u32 logical_id = le32_to_cpu(elem->logical_id);
1940 u32 phys_id = le32_to_cpu(elem->phys_id);
1941 u32 number = le32_to_cpu(elem->number);
1942 u16 cap = le16_to_cpu(elem->cap);
1943 bool found = true;
1944
1945 switch (cap) {
1946 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1947 caps->valid_functions = number;
1948 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1949 caps->valid_functions);
1950 break;
1951 case ICE_AQC_CAPS_SRIOV:
1952 caps->sr_iov_1_1 = (number == 1);
1953 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1954 caps->sr_iov_1_1);
1955 break;
1956 case ICE_AQC_CAPS_DCB:
1957 caps->dcb = (number == 1);
1958 caps->active_tc_bitmap = logical_id;
1959 caps->maxtc = phys_id;
1960 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1961 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1962 caps->active_tc_bitmap);
1963 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1964 break;
1965 case ICE_AQC_CAPS_RSS:
1966 caps->rss_table_size = number;
1967 caps->rss_table_entry_width = logical_id;
1968 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1969 caps->rss_table_size);
1970 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1971 caps->rss_table_entry_width);
1972 break;
1973 case ICE_AQC_CAPS_RXQS:
1974 caps->num_rxq = number;
1975 caps->rxq_first_id = phys_id;
1976 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1977 caps->num_rxq);
1978 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1979 caps->rxq_first_id);
1980 break;
1981 case ICE_AQC_CAPS_TXQS:
1982 caps->num_txq = number;
1983 caps->txq_first_id = phys_id;
1984 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1985 caps->num_txq);
1986 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1987 caps->txq_first_id);
1988 break;
1989 case ICE_AQC_CAPS_MSIX:
1990 caps->num_msix_vectors = number;
1991 caps->msix_vector_first_id = phys_id;
1992 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1993 caps->num_msix_vectors);
1994 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1995 caps->msix_vector_first_id);
1996 break;
1997 case ICE_AQC_CAPS_PENDING_NVM_VER:
1998 caps->nvm_update_pending_nvm = true;
1999 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2000 break;
2001 case ICE_AQC_CAPS_PENDING_OROM_VER:
2002 caps->nvm_update_pending_orom = true;
2003 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2004 break;
2005 case ICE_AQC_CAPS_PENDING_NET_VER:
2006 caps->nvm_update_pending_netlist = true;
2007 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2008 break;
2009 case ICE_AQC_CAPS_NVM_MGMT:
2010 caps->nvm_unified_update =
2011 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2012 true : false;
2013 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2014 caps->nvm_unified_update);
2015 break;
2016 case ICE_AQC_CAPS_RDMA:
2017 caps->rdma = (number == 1);
2018 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2019 break;
2020 case ICE_AQC_CAPS_MAX_MTU:
2021 caps->max_mtu = number;
2022 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2023 prefix, caps->max_mtu);
2024 break;
2025 default:
2026 /* Not one of the recognized common capabilities */
2027 found = false;
2028 }
2029
2030 return found;
2031}
2032
2033/**
2034 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2035 * @hw: pointer to the HW structure
2036 * @caps: pointer to capabilities structure to fix
2037 *
2038 * Re-calculate the capabilities that are dependent on the number of physical
2039 * ports; i.e. some features are not supported or function differently on
2040 * devices with more than 4 ports.
2041 */
2042static void
2043ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2044{
2045 /* This assumes device capabilities are always scanned before function
2046 * capabilities during the initialization flow.
2047 */
2048 if (hw->dev_caps.num_funcs > 4) {
2049 /* Max 4 TCs per port */
2050 caps->maxtc = 4;
2051 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2052 caps->maxtc);
2053 if (caps->rdma) {
2054 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2055 caps->rdma = 0;
2056 }
2057
2058 /* print message only when processing device capabilities
2059 * during initialization.
2060 */
2061 if (caps == &hw->dev_caps.common_cap)
2062 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2063 }
2064}
2065
2066/**
2067 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2068 * @hw: pointer to the HW struct
2069 * @func_p: pointer to function capabilities structure
2070 * @cap: pointer to the capability element to parse
2071 *
2072 * Extract function capabilities for ICE_AQC_CAPS_VF.
2073 */
2074static void
2075ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2076 struct ice_aqc_list_caps_elem *cap)
2077{
2078 u32 logical_id = le32_to_cpu(cap->logical_id);
2079 u32 number = le32_to_cpu(cap->number);
2080
2081 func_p->num_allocd_vfs = number;
2082 func_p->vf_base_id = logical_id;
2083 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2084 func_p->num_allocd_vfs);
2085 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2086 func_p->vf_base_id);
2087}
2088
2089/**
2090 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2091 * @hw: pointer to the HW struct
2092 * @func_p: pointer to function capabilities structure
2093 * @cap: pointer to the capability element to parse
2094 *
2095 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2096 */
2097static void
2098ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2099 struct ice_aqc_list_caps_elem *cap)
2100{
2101 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2102 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2103 le32_to_cpu(cap->number));
2104 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2105 func_p->guar_num_vsi);
2106}
2107
2108/**
2109 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2110 * @hw: pointer to the HW struct
2111 * @func_p: pointer to function capabilities structure
2112 * @cap: pointer to the capability element to parse
2113 *
2114 * Extract function capabilities for ICE_AQC_CAPS_1588.
2115 */
2116static void
2117ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2118 struct ice_aqc_list_caps_elem *cap)
2119{
2120 struct ice_ts_func_info *info = &func_p->ts_func_info;
2121 u32 number = le32_to_cpu(cap->number);
2122
2123 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2124 func_p->common_cap.ieee_1588 = info->ena;
2125
2126 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2127 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2128 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2129 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2130
2131 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2132 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2133
2134 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2135 func_p->common_cap.ieee_1588);
2136 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2137 info->src_tmr_owned);
2138 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2139 info->tmr_ena);
2140 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2141 info->tmr_index_owned);
2142 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2143 info->tmr_index_assoc);
2144 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2145 info->clk_freq);
2146 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2147 info->clk_src);
2148}
2149
2150/**
2151 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2152 * @hw: pointer to the HW struct
2153 * @func_p: pointer to function capabilities structure
2154 *
2155 * Extract function capabilities for ICE_AQC_CAPS_FD.
2156 */
2157static void
2158ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2159{
2160 u32 reg_val, val;
2161
2162 reg_val = rd32(hw, GLQF_FD_SIZE);
2163 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2164 GLQF_FD_SIZE_FD_GSIZE_S;
2165 func_p->fd_fltr_guar =
2166 ice_get_num_per_func(hw, val);
2167 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2168 GLQF_FD_SIZE_FD_BSIZE_S;
2169 func_p->fd_fltr_best_effort = val;
2170
2171 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2172 func_p->fd_fltr_guar);
2173 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2174 func_p->fd_fltr_best_effort);
2175}
2176
2177/**
2178 * ice_parse_func_caps - Parse function capabilities
2179 * @hw: pointer to the HW struct
2180 * @func_p: pointer to function capabilities structure
2181 * @buf: buffer containing the function capability records
2182 * @cap_count: the number of capabilities
2183 *
2184 * Helper function to parse function (0x000A) capabilities list. For
2185 * capabilities shared between device and function, this relies on
2186 * ice_parse_common_caps.
2187 *
2188 * Loop through the list of provided capabilities and extract the relevant
2189 * data into the function capabilities structured.
2190 */
2191static void
2192ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2193 void *buf, u32 cap_count)
2194{
2195 struct ice_aqc_list_caps_elem *cap_resp;
2196 u32 i;
2197
2198 cap_resp = buf;
2199
2200 memset(func_p, 0, sizeof(*func_p));
2201
2202 for (i = 0; i < cap_count; i++) {
2203 u16 cap = le16_to_cpu(cap_resp[i].cap);
2204 bool found;
2205
2206 found = ice_parse_common_caps(hw, &func_p->common_cap,
2207 &cap_resp[i], "func caps");
2208
2209 switch (cap) {
2210 case ICE_AQC_CAPS_VF:
2211 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2212 break;
2213 case ICE_AQC_CAPS_VSI:
2214 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2215 break;
2216 case ICE_AQC_CAPS_1588:
2217 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2218 break;
2219 case ICE_AQC_CAPS_FD:
2220 ice_parse_fdir_func_caps(hw, func_p);
2221 break;
2222 default:
2223 /* Don't list common capabilities as unknown */
2224 if (!found)
2225 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2226 i, cap);
2227 break;
2228 }
2229 }
2230
2231 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2232}
2233
2234/**
2235 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2236 * @hw: pointer to the HW struct
2237 * @dev_p: pointer to device capabilities structure
2238 * @cap: capability element to parse
2239 *
2240 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2241 */
2242static void
2243ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2244 struct ice_aqc_list_caps_elem *cap)
2245{
2246 u32 number = le32_to_cpu(cap->number);
2247
2248 dev_p->num_funcs = hweight32(number);
2249 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2250 dev_p->num_funcs);
2251}
2252
2253/**
2254 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2255 * @hw: pointer to the HW struct
2256 * @dev_p: pointer to device capabilities structure
2257 * @cap: capability element to parse
2258 *
2259 * Parse ICE_AQC_CAPS_VF for device capabilities.
2260 */
2261static void
2262ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2263 struct ice_aqc_list_caps_elem *cap)
2264{
2265 u32 number = le32_to_cpu(cap->number);
2266
2267 dev_p->num_vfs_exposed = number;
2268 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2269 dev_p->num_vfs_exposed);
2270}
2271
2272/**
2273 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2274 * @hw: pointer to the HW struct
2275 * @dev_p: pointer to device capabilities structure
2276 * @cap: capability element to parse
2277 *
2278 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2279 */
2280static void
2281ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2282 struct ice_aqc_list_caps_elem *cap)
2283{
2284 u32 number = le32_to_cpu(cap->number);
2285
2286 dev_p->num_vsi_allocd_to_host = number;
2287 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2288 dev_p->num_vsi_allocd_to_host);
2289}
2290
2291/**
2292 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2293 * @hw: pointer to the HW struct
2294 * @dev_p: pointer to device capabilities structure
2295 * @cap: capability element to parse
2296 *
2297 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2298 */
2299static void
2300ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2301 struct ice_aqc_list_caps_elem *cap)
2302{
2303 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2304 u32 logical_id = le32_to_cpu(cap->logical_id);
2305 u32 phys_id = le32_to_cpu(cap->phys_id);
2306 u32 number = le32_to_cpu(cap->number);
2307
2308 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2309 dev_p->common_cap.ieee_1588 = info->ena;
2310
2311 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2312 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2313 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2314
2315 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2316 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2317 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2318
2319 info->ena_ports = logical_id;
2320 info->tmr_own_map = phys_id;
2321
2322 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2323 dev_p->common_cap.ieee_1588);
2324 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2325 info->tmr0_owner);
2326 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2327 info->tmr0_owned);
2328 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2329 info->tmr0_ena);
2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2331 info->tmr1_owner);
2332 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2333 info->tmr1_owned);
2334 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2335 info->tmr1_ena);
2336 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2337 info->ena_ports);
2338 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2339 info->tmr_own_map);
2340}
2341
2342/**
2343 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2344 * @hw: pointer to the HW struct
2345 * @dev_p: pointer to device capabilities structure
2346 * @cap: capability element to parse
2347 *
2348 * Parse ICE_AQC_CAPS_FD for device capabilities.
2349 */
2350static void
2351ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2352 struct ice_aqc_list_caps_elem *cap)
2353{
2354 u32 number = le32_to_cpu(cap->number);
2355
2356 dev_p->num_flow_director_fltr = number;
2357 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2358 dev_p->num_flow_director_fltr);
2359}
2360
2361/**
2362 * ice_parse_dev_caps - Parse device capabilities
2363 * @hw: pointer to the HW struct
2364 * @dev_p: pointer to device capabilities structure
2365 * @buf: buffer containing the device capability records
2366 * @cap_count: the number of capabilities
2367 *
2368 * Helper device to parse device (0x000B) capabilities list. For
2369 * capabilities shared between device and function, this relies on
2370 * ice_parse_common_caps.
2371 *
2372 * Loop through the list of provided capabilities and extract the relevant
2373 * data into the device capabilities structured.
2374 */
2375static void
2376ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2377 void *buf, u32 cap_count)
2378{
2379 struct ice_aqc_list_caps_elem *cap_resp;
2380 u32 i;
2381
2382 cap_resp = buf;
2383
2384 memset(dev_p, 0, sizeof(*dev_p));
2385
2386 for (i = 0; i < cap_count; i++) {
2387 u16 cap = le16_to_cpu(cap_resp[i].cap);
2388 bool found;
2389
2390 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2391 &cap_resp[i], "dev caps");
2392
2393 switch (cap) {
2394 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2395 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2396 break;
2397 case ICE_AQC_CAPS_VF:
2398 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2399 break;
2400 case ICE_AQC_CAPS_VSI:
2401 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2402 break;
2403 case ICE_AQC_CAPS_1588:
2404 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2405 break;
2406 case ICE_AQC_CAPS_FD:
2407 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2408 break;
2409 default:
2410 /* Don't list common capabilities as unknown */
2411 if (!found)
2412 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2413 i, cap);
2414 break;
2415 }
2416 }
2417
2418 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2419}
2420
2421/**
2422 * ice_aq_list_caps - query function/device capabilities
2423 * @hw: pointer to the HW struct
2424 * @buf: a buffer to hold the capabilities
2425 * @buf_size: size of the buffer
2426 * @cap_count: if not NULL, set to the number of capabilities reported
2427 * @opc: capabilities type to discover, device or function
2428 * @cd: pointer to command details structure or NULL
2429 *
2430 * Get the function (0x000A) or device (0x000B) capabilities description from
2431 * firmware and store it in the buffer.
2432 *
2433 * If the cap_count pointer is not NULL, then it is set to the number of
2434 * capabilities firmware will report. Note that if the buffer size is too
2435 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2436 * cap_count will still be updated in this case. It is recommended that the
2437 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2438 * firmware could return) to avoid this.
2439 */
2440enum ice_status
2441ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2442 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2443{
2444 struct ice_aqc_list_caps *cmd;
2445 struct ice_aq_desc desc;
2446 enum ice_status status;
2447
2448 cmd = &desc.params.get_cap;
2449
2450 if (opc != ice_aqc_opc_list_func_caps &&
2451 opc != ice_aqc_opc_list_dev_caps)
2452 return ICE_ERR_PARAM;
2453
2454 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2455 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2456
2457 if (cap_count)
2458 *cap_count = le32_to_cpu(cmd->count);
2459
2460 return status;
2461}
2462
2463/**
2464 * ice_discover_dev_caps - Read and extract device capabilities
2465 * @hw: pointer to the hardware structure
2466 * @dev_caps: pointer to device capabilities structure
2467 *
2468 * Read the device capabilities and extract them into the dev_caps structure
2469 * for later use.
2470 */
2471enum ice_status
2472ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2473{
2474 enum ice_status status;
2475 u32 cap_count = 0;
2476 void *cbuf;
2477
2478 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2479 if (!cbuf)
2480 return ICE_ERR_NO_MEMORY;
2481
2482 /* Although the driver doesn't know the number of capabilities the
2483 * device will return, we can simply send a 4KB buffer, the maximum
2484 * possible size that firmware can return.
2485 */
2486 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2487
2488 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2489 ice_aqc_opc_list_dev_caps, NULL);
2490 if (!status)
2491 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2492 kfree(cbuf);
2493
2494 return status;
2495}
2496
2497/**
2498 * ice_discover_func_caps - Read and extract function capabilities
2499 * @hw: pointer to the hardware structure
2500 * @func_caps: pointer to function capabilities structure
2501 *
2502 * Read the function capabilities and extract them into the func_caps structure
2503 * for later use.
2504 */
2505static enum ice_status
2506ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2507{
2508 enum ice_status status;
2509 u32 cap_count = 0;
2510 void *cbuf;
2511
2512 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2513 if (!cbuf)
2514 return ICE_ERR_NO_MEMORY;
2515
2516 /* Although the driver doesn't know the number of capabilities the
2517 * device will return, we can simply send a 4KB buffer, the maximum
2518 * possible size that firmware can return.
2519 */
2520 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2521
2522 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2523 ice_aqc_opc_list_func_caps, NULL);
2524 if (!status)
2525 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2526 kfree(cbuf);
2527
2528 return status;
2529}
2530
2531/**
2532 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2533 * @hw: pointer to the hardware structure
2534 */
2535void ice_set_safe_mode_caps(struct ice_hw *hw)
2536{
2537 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2538 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2539 struct ice_hw_common_caps cached_caps;
2540 u32 num_funcs;
2541
2542 /* cache some func_caps values that should be restored after memset */
2543 cached_caps = func_caps->common_cap;
2544
2545 /* unset func capabilities */
2546 memset(func_caps, 0, sizeof(*func_caps));
2547
2548#define ICE_RESTORE_FUNC_CAP(name) \
2549 func_caps->common_cap.name = cached_caps.name
2550
2551 /* restore cached values */
2552 ICE_RESTORE_FUNC_CAP(valid_functions);
2553 ICE_RESTORE_FUNC_CAP(txq_first_id);
2554 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2555 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2556 ICE_RESTORE_FUNC_CAP(max_mtu);
2557 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2558 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2559 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2560 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2561
2562 /* one Tx and one Rx queue in safe mode */
2563 func_caps->common_cap.num_rxq = 1;
2564 func_caps->common_cap.num_txq = 1;
2565
2566 /* two MSIX vectors, one for traffic and one for misc causes */
2567 func_caps->common_cap.num_msix_vectors = 2;
2568 func_caps->guar_num_vsi = 1;
2569
2570 /* cache some dev_caps values that should be restored after memset */
2571 cached_caps = dev_caps->common_cap;
2572 num_funcs = dev_caps->num_funcs;
2573
2574 /* unset dev capabilities */
2575 memset(dev_caps, 0, sizeof(*dev_caps));
2576
2577#define ICE_RESTORE_DEV_CAP(name) \
2578 dev_caps->common_cap.name = cached_caps.name
2579
2580 /* restore cached values */
2581 ICE_RESTORE_DEV_CAP(valid_functions);
2582 ICE_RESTORE_DEV_CAP(txq_first_id);
2583 ICE_RESTORE_DEV_CAP(rxq_first_id);
2584 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2585 ICE_RESTORE_DEV_CAP(max_mtu);
2586 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2587 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2588 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2589 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2590 dev_caps->num_funcs = num_funcs;
2591
2592 /* one Tx and one Rx queue per function in safe mode */
2593 dev_caps->common_cap.num_rxq = num_funcs;
2594 dev_caps->common_cap.num_txq = num_funcs;
2595
2596 /* two MSIX vectors per function */
2597 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2598}
2599
2600/**
2601 * ice_get_caps - get info about the HW
2602 * @hw: pointer to the hardware structure
2603 */
2604enum ice_status ice_get_caps(struct ice_hw *hw)
2605{
2606 enum ice_status status;
2607
2608 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2609 if (status)
2610 return status;
2611
2612 return ice_discover_func_caps(hw, &hw->func_caps);
2613}
2614
2615/**
2616 * ice_aq_manage_mac_write - manage MAC address write command
2617 * @hw: pointer to the HW struct
2618 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2619 * @flags: flags to control write behavior
2620 * @cd: pointer to command details structure or NULL
2621 *
2622 * This function is used to write MAC address to the NVM (0x0108).
2623 */
2624enum ice_status
2625ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2626 struct ice_sq_cd *cd)
2627{
2628 struct ice_aqc_manage_mac_write *cmd;
2629 struct ice_aq_desc desc;
2630
2631 cmd = &desc.params.mac_write;
2632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2633
2634 cmd->flags = flags;
2635 ether_addr_copy(cmd->mac_addr, mac_addr);
2636
2637 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2638}
2639
2640/**
2641 * ice_aq_clear_pxe_mode
2642 * @hw: pointer to the HW struct
2643 *
2644 * Tell the firmware that the driver is taking over from PXE (0x0110).
2645 */
2646static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2647{
2648 struct ice_aq_desc desc;
2649
2650 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2651 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2652
2653 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2654}
2655
2656/**
2657 * ice_clear_pxe_mode - clear pxe operations mode
2658 * @hw: pointer to the HW struct
2659 *
2660 * Make sure all PXE mode settings are cleared, including things
2661 * like descriptor fetch/write-back mode.
2662 */
2663void ice_clear_pxe_mode(struct ice_hw *hw)
2664{
2665 if (ice_check_sq_alive(hw, &hw->adminq))
2666 ice_aq_clear_pxe_mode(hw);
2667}
2668
2669/**
2670 * ice_get_link_speed_based_on_phy_type - returns link speed
2671 * @phy_type_low: lower part of phy_type
2672 * @phy_type_high: higher part of phy_type
2673 *
2674 * This helper function will convert an entry in PHY type structure
2675 * [phy_type_low, phy_type_high] to its corresponding link speed.
2676 * Note: In the structure of [phy_type_low, phy_type_high], there should
2677 * be one bit set, as this function will convert one PHY type to its
2678 * speed.
2679 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2680 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2681 */
2682static u16
2683ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2684{
2685 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2686 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2687
2688 switch (phy_type_low) {
2689 case ICE_PHY_TYPE_LOW_100BASE_TX:
2690 case ICE_PHY_TYPE_LOW_100M_SGMII:
2691 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2692 break;
2693 case ICE_PHY_TYPE_LOW_1000BASE_T:
2694 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2695 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2696 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2697 case ICE_PHY_TYPE_LOW_1G_SGMII:
2698 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2699 break;
2700 case ICE_PHY_TYPE_LOW_2500BASE_T:
2701 case ICE_PHY_TYPE_LOW_2500BASE_X:
2702 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2703 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2704 break;
2705 case ICE_PHY_TYPE_LOW_5GBASE_T:
2706 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2707 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2708 break;
2709 case ICE_PHY_TYPE_LOW_10GBASE_T:
2710 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2711 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2712 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2713 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2714 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2715 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2716 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2717 break;
2718 case ICE_PHY_TYPE_LOW_25GBASE_T:
2719 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2720 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2721 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2722 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2723 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2724 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2725 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2726 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2727 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2728 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2729 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2730 break;
2731 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2732 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2733 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2734 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2735 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2736 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2737 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2738 break;
2739 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2740 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2741 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2742 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2743 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2744 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2745 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2746 case ICE_PHY_TYPE_LOW_50G_AUI2:
2747 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2748 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2749 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2750 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2751 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2752 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2753 case ICE_PHY_TYPE_LOW_50G_AUI1:
2754 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2755 break;
2756 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2757 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2758 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2759 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2760 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2761 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2762 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2763 case ICE_PHY_TYPE_LOW_100G_AUI4:
2764 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2765 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2766 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2767 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2768 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2769 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2770 break;
2771 default:
2772 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2773 break;
2774 }
2775
2776 switch (phy_type_high) {
2777 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2778 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2779 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2780 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2781 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2782 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2783 break;
2784 default:
2785 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2786 break;
2787 }
2788
2789 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2790 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2791 return ICE_AQ_LINK_SPEED_UNKNOWN;
2792 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2793 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2794 return ICE_AQ_LINK_SPEED_UNKNOWN;
2795 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2796 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2797 return speed_phy_type_low;
2798 else
2799 return speed_phy_type_high;
2800}
2801
2802/**
2803 * ice_update_phy_type
2804 * @phy_type_low: pointer to the lower part of phy_type
2805 * @phy_type_high: pointer to the higher part of phy_type
2806 * @link_speeds_bitmap: targeted link speeds bitmap
2807 *
2808 * Note: For the link_speeds_bitmap structure, you can check it at
2809 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2810 * link_speeds_bitmap include multiple speeds.
2811 *
2812 * Each entry in this [phy_type_low, phy_type_high] structure will
2813 * present a certain link speed. This helper function will turn on bits
2814 * in [phy_type_low, phy_type_high] structure based on the value of
2815 * link_speeds_bitmap input parameter.
2816 */
2817void
2818ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2819 u16 link_speeds_bitmap)
2820{
2821 u64 pt_high;
2822 u64 pt_low;
2823 int index;
2824 u16 speed;
2825
2826 /* We first check with low part of phy_type */
2827 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2828 pt_low = BIT_ULL(index);
2829 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2830
2831 if (link_speeds_bitmap & speed)
2832 *phy_type_low |= BIT_ULL(index);
2833 }
2834
2835 /* We then check with high part of phy_type */
2836 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2837 pt_high = BIT_ULL(index);
2838 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2839
2840 if (link_speeds_bitmap & speed)
2841 *phy_type_high |= BIT_ULL(index);
2842 }
2843}
2844
2845/**
2846 * ice_aq_set_phy_cfg
2847 * @hw: pointer to the HW struct
2848 * @pi: port info structure of the interested logical port
2849 * @cfg: structure with PHY configuration data to be set
2850 * @cd: pointer to command details structure or NULL
2851 *
2852 * Set the various PHY configuration parameters supported on the Port.
2853 * One or more of the Set PHY config parameters may be ignored in an MFP
2854 * mode as the PF may not have the privilege to set some of the PHY Config
2855 * parameters. This status will be indicated by the command response (0x0601).
2856 */
2857enum ice_status
2858ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2859 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2860{
2861 struct ice_aq_desc desc;
2862 enum ice_status status;
2863
2864 if (!cfg)
2865 return ICE_ERR_PARAM;
2866
2867 /* Ensure that only valid bits of cfg->caps can be turned on. */
2868 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2869 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2870 cfg->caps);
2871
2872 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2873 }
2874
2875 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2876 desc.params.set_phy.lport_num = pi->lport;
2877 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2878
2879 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2880 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2881 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2882 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2883 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2884 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2885 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2886 cfg->low_power_ctrl_an);
2887 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2888 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2889 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2890 cfg->link_fec_opt);
2891
2892 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2893 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2894 status = 0;
2895
2896 if (!status)
2897 pi->phy.curr_user_phy_cfg = *cfg;
2898
2899 return status;
2900}
2901
2902/**
2903 * ice_update_link_info - update status of the HW network link
2904 * @pi: port info structure of the interested logical port
2905 */
2906enum ice_status ice_update_link_info(struct ice_port_info *pi)
2907{
2908 struct ice_link_status *li;
2909 enum ice_status status;
2910
2911 if (!pi)
2912 return ICE_ERR_PARAM;
2913
2914 li = &pi->phy.link_info;
2915
2916 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2917 if (status)
2918 return status;
2919
2920 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2921 struct ice_aqc_get_phy_caps_data *pcaps;
2922 struct ice_hw *hw;
2923
2924 hw = pi->hw;
2925 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2926 GFP_KERNEL);
2927 if (!pcaps)
2928 return ICE_ERR_NO_MEMORY;
2929
2930 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2931 pcaps, NULL);
2932
2933 devm_kfree(ice_hw_to_dev(hw), pcaps);
2934 }
2935
2936 return status;
2937}
2938
2939/**
2940 * ice_cache_phy_user_req
2941 * @pi: port information structure
2942 * @cache_data: PHY logging data
2943 * @cache_mode: PHY logging mode
2944 *
2945 * Log the user request on (FC, FEC, SPEED) for later use.
2946 */
2947static void
2948ice_cache_phy_user_req(struct ice_port_info *pi,
2949 struct ice_phy_cache_mode_data cache_data,
2950 enum ice_phy_cache_mode cache_mode)
2951{
2952 if (!pi)
2953 return;
2954
2955 switch (cache_mode) {
2956 case ICE_FC_MODE:
2957 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2958 break;
2959 case ICE_SPEED_MODE:
2960 pi->phy.curr_user_speed_req =
2961 cache_data.data.curr_user_speed_req;
2962 break;
2963 case ICE_FEC_MODE:
2964 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2965 break;
2966 default:
2967 break;
2968 }
2969}
2970
2971/**
2972 * ice_caps_to_fc_mode
2973 * @caps: PHY capabilities
2974 *
2975 * Convert PHY FC capabilities to ice FC mode
2976 */
2977enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2978{
2979 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2980 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2981 return ICE_FC_FULL;
2982
2983 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2984 return ICE_FC_TX_PAUSE;
2985
2986 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2987 return ICE_FC_RX_PAUSE;
2988
2989 return ICE_FC_NONE;
2990}
2991
2992/**
2993 * ice_caps_to_fec_mode
2994 * @caps: PHY capabilities
2995 * @fec_options: Link FEC options
2996 *
2997 * Convert PHY FEC capabilities to ice FEC mode
2998 */
2999enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3000{
3001 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3002 return ICE_FEC_AUTO;
3003
3004 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3005 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3006 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3007 ICE_AQC_PHY_FEC_25G_KR_REQ))
3008 return ICE_FEC_BASER;
3009
3010 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3011 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3012 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3013 return ICE_FEC_RS;
3014
3015 return ICE_FEC_NONE;
3016}
3017
3018/**
3019 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3020 * @pi: port information structure
3021 * @cfg: PHY configuration data to set FC mode
3022 * @req_mode: FC mode to configure
3023 */
3024enum ice_status
3025ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3026 enum ice_fc_mode req_mode)
3027{
3028 struct ice_phy_cache_mode_data cache_data;
3029 u8 pause_mask = 0x0;
3030
3031 if (!pi || !cfg)
3032 return ICE_ERR_BAD_PTR;
3033
3034 switch (req_mode) {
3035 case ICE_FC_FULL:
3036 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3037 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3038 break;
3039 case ICE_FC_RX_PAUSE:
3040 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3041 break;
3042 case ICE_FC_TX_PAUSE:
3043 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3044 break;
3045 default:
3046 break;
3047 }
3048
3049 /* clear the old pause settings */
3050 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3051 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3052
3053 /* set the new capabilities */
3054 cfg->caps |= pause_mask;
3055
3056 /* Cache user FC request */
3057 cache_data.data.curr_user_fc_req = req_mode;
3058 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3059
3060 return 0;
3061}
3062
3063/**
3064 * ice_set_fc
3065 * @pi: port information structure
3066 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3067 * @ena_auto_link_update: enable automatic link update
3068 *
3069 * Set the requested flow control mode.
3070 */
3071enum ice_status
3072ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3073{
3074 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3075 struct ice_aqc_get_phy_caps_data *pcaps;
3076 enum ice_status status;
3077 struct ice_hw *hw;
3078
3079 if (!pi || !aq_failures)
3080 return ICE_ERR_BAD_PTR;
3081
3082 *aq_failures = 0;
3083 hw = pi->hw;
3084
3085 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3086 if (!pcaps)
3087 return ICE_ERR_NO_MEMORY;
3088
3089 /* Get the current PHY config */
3090 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3091 pcaps, NULL);
3092 if (status) {
3093 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3094 goto out;
3095 }
3096
3097 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3098
3099 /* Configure the set PHY data */
3100 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3101 if (status)
3102 goto out;
3103
3104 /* If the capabilities have changed, then set the new config */
3105 if (cfg.caps != pcaps->caps) {
3106 int retry_count, retry_max = 10;
3107
3108 /* Auto restart link so settings take effect */
3109 if (ena_auto_link_update)
3110 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3111
3112 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3113 if (status) {
3114 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3115 goto out;
3116 }
3117
3118 /* Update the link info
3119 * It sometimes takes a really long time for link to
3120 * come back from the atomic reset. Thus, we wait a
3121 * little bit.
3122 */
3123 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3124 status = ice_update_link_info(pi);
3125
3126 if (!status)
3127 break;
3128
3129 mdelay(100);
3130 }
3131
3132 if (status)
3133 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3134 }
3135
3136out:
3137 devm_kfree(ice_hw_to_dev(hw), pcaps);
3138 return status;
3139}
3140
3141/**
3142 * ice_phy_caps_equals_cfg
3143 * @phy_caps: PHY capabilities
3144 * @phy_cfg: PHY configuration
3145 *
3146 * Helper function to determine if PHY capabilities matches PHY
3147 * configuration
3148 */
3149bool
3150ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3151 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3152{
3153 u8 caps_mask, cfg_mask;
3154
3155 if (!phy_caps || !phy_cfg)
3156 return false;
3157
3158 /* These bits are not common between capabilities and configuration.
3159 * Do not use them to determine equality.
3160 */
3161 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3162 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3163 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3164
3165 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3166 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3167 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3168 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3169 phy_caps->eee_cap != phy_cfg->eee_cap ||
3170 phy_caps->eeer_value != phy_cfg->eeer_value ||
3171 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3172 return false;
3173
3174 return true;
3175}
3176
3177/**
3178 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3179 * @pi: port information structure
3180 * @caps: PHY ability structure to copy date from
3181 * @cfg: PHY configuration structure to copy data to
3182 *
3183 * Helper function to copy AQC PHY get ability data to PHY set configuration
3184 * data structure
3185 */
3186void
3187ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3188 struct ice_aqc_get_phy_caps_data *caps,
3189 struct ice_aqc_set_phy_cfg_data *cfg)
3190{
3191 if (!pi || !caps || !cfg)
3192 return;
3193
3194 memset(cfg, 0, sizeof(*cfg));
3195 cfg->phy_type_low = caps->phy_type_low;
3196 cfg->phy_type_high = caps->phy_type_high;
3197 cfg->caps = caps->caps;
3198 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3199 cfg->eee_cap = caps->eee_cap;
3200 cfg->eeer_value = caps->eeer_value;
3201 cfg->link_fec_opt = caps->link_fec_options;
3202 cfg->module_compliance_enforcement =
3203 caps->module_compliance_enforcement;
3204}
3205
3206/**
3207 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3208 * @pi: port information structure
3209 * @cfg: PHY configuration data to set FEC mode
3210 * @fec: FEC mode to configure
3211 */
3212enum ice_status
3213ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3214 enum ice_fec_mode fec)
3215{
3216 struct ice_aqc_get_phy_caps_data *pcaps;
3217 enum ice_status status;
3218 struct ice_hw *hw;
3219
3220 if (!pi || !cfg)
3221 return ICE_ERR_BAD_PTR;
3222
3223 hw = pi->hw;
3224
3225 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3226 if (!pcaps)
3227 return ICE_ERR_NO_MEMORY;
3228
3229 status = ice_aq_get_phy_caps(pi, false,
3230 (ice_fw_supports_report_dflt_cfg(hw) ?
3231 ICE_AQC_REPORT_DFLT_CFG :
3232 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3233 if (status)
3234 goto out;
3235
3236 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3237 cfg->link_fec_opt = pcaps->link_fec_options;
3238
3239 switch (fec) {
3240 case ICE_FEC_BASER:
3241 /* Clear RS bits, and AND BASE-R ability
3242 * bits and OR request bits.
3243 */
3244 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3245 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3246 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3247 ICE_AQC_PHY_FEC_25G_KR_REQ;
3248 break;
3249 case ICE_FEC_RS:
3250 /* Clear BASE-R bits, and AND RS ability
3251 * bits and OR request bits.
3252 */
3253 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3254 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3255 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3256 break;
3257 case ICE_FEC_NONE:
3258 /* Clear all FEC option bits. */
3259 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3260 break;
3261 case ICE_FEC_AUTO:
3262 /* AND auto FEC bit, and all caps bits. */
3263 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3264 cfg->link_fec_opt |= pcaps->link_fec_options;
3265 break;
3266 default:
3267 status = ICE_ERR_PARAM;
3268 break;
3269 }
3270
3271 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3272 !ice_fw_supports_report_dflt_cfg(hw)) {
3273 struct ice_link_default_override_tlv tlv;
3274
3275 if (ice_get_link_default_override(&tlv, pi))
3276 goto out;
3277
3278 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3279 (tlv.options & ICE_LINK_OVERRIDE_EN))
3280 cfg->link_fec_opt = tlv.fec_options;
3281 }
3282
3283out:
3284 kfree(pcaps);
3285
3286 return status;
3287}
3288
3289/**
3290 * ice_get_link_status - get status of the HW network link
3291 * @pi: port information structure
3292 * @link_up: pointer to bool (true/false = linkup/linkdown)
3293 *
3294 * Variable link_up is true if link is up, false if link is down.
3295 * The variable link_up is invalid if status is non zero. As a
3296 * result of this call, link status reporting becomes enabled
3297 */
3298enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3299{
3300 struct ice_phy_info *phy_info;
3301 enum ice_status status = 0;
3302
3303 if (!pi || !link_up)
3304 return ICE_ERR_PARAM;
3305
3306 phy_info = &pi->phy;
3307
3308 if (phy_info->get_link_info) {
3309 status = ice_update_link_info(pi);
3310
3311 if (status)
3312 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3313 status);
3314 }
3315
3316 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3317
3318 return status;
3319}
3320
3321/**
3322 * ice_aq_set_link_restart_an
3323 * @pi: pointer to the port information structure
3324 * @ena_link: if true: enable link, if false: disable link
3325 * @cd: pointer to command details structure or NULL
3326 *
3327 * Sets up the link and restarts the Auto-Negotiation over the link.
3328 */
3329enum ice_status
3330ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3331 struct ice_sq_cd *cd)
3332{
3333 struct ice_aqc_restart_an *cmd;
3334 struct ice_aq_desc desc;
3335
3336 cmd = &desc.params.restart_an;
3337
3338 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3339
3340 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3341 cmd->lport_num = pi->lport;
3342 if (ena_link)
3343 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3344 else
3345 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3346
3347 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3348}
3349
3350/**
3351 * ice_aq_set_event_mask
3352 * @hw: pointer to the HW struct
3353 * @port_num: port number of the physical function
3354 * @mask: event mask to be set
3355 * @cd: pointer to command details structure or NULL
3356 *
3357 * Set event mask (0x0613)
3358 */
3359enum ice_status
3360ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3361 struct ice_sq_cd *cd)
3362{
3363 struct ice_aqc_set_event_mask *cmd;
3364 struct ice_aq_desc desc;
3365
3366 cmd = &desc.params.set_event_mask;
3367
3368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3369
3370 cmd->lport_num = port_num;
3371
3372 cmd->event_mask = cpu_to_le16(mask);
3373 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3374}
3375
3376/**
3377 * ice_aq_set_mac_loopback
3378 * @hw: pointer to the HW struct
3379 * @ena_lpbk: Enable or Disable loopback
3380 * @cd: pointer to command details structure or NULL
3381 *
3382 * Enable/disable loopback on a given port
3383 */
3384enum ice_status
3385ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3386{
3387 struct ice_aqc_set_mac_lb *cmd;
3388 struct ice_aq_desc desc;
3389
3390 cmd = &desc.params.set_mac_lb;
3391
3392 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3393 if (ena_lpbk)
3394 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3395
3396 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3397}
3398
3399/**
3400 * ice_aq_set_port_id_led
3401 * @pi: pointer to the port information
3402 * @is_orig_mode: is this LED set to original mode (by the net-list)
3403 * @cd: pointer to command details structure or NULL
3404 *
3405 * Set LED value for the given port (0x06e9)
3406 */
3407enum ice_status
3408ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3409 struct ice_sq_cd *cd)
3410{
3411 struct ice_aqc_set_port_id_led *cmd;
3412 struct ice_hw *hw = pi->hw;
3413 struct ice_aq_desc desc;
3414
3415 cmd = &desc.params.set_port_id_led;
3416
3417 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3418
3419 if (is_orig_mode)
3420 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3421 else
3422 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3423
3424 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3425}
3426
3427/**
3428 * ice_aq_sff_eeprom
3429 * @hw: pointer to the HW struct
3430 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3431 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3432 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3433 * @page: QSFP page
3434 * @set_page: set or ignore the page
3435 * @data: pointer to data buffer to be read/written to the I2C device.
3436 * @length: 1-16 for read, 1 for write.
3437 * @write: 0 read, 1 for write.
3438 * @cd: pointer to command details structure or NULL
3439 *
3440 * Read/Write SFF EEPROM (0x06EE)
3441 */
3442enum ice_status
3443ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3444 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3445 bool write, struct ice_sq_cd *cd)
3446{
3447 struct ice_aqc_sff_eeprom *cmd;
3448 struct ice_aq_desc desc;
3449 enum ice_status status;
3450
3451 if (!data || (mem_addr & 0xff00))
3452 return ICE_ERR_PARAM;
3453
3454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3455 cmd = &desc.params.read_write_sff_param;
3456 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3457 cmd->lport_num = (u8)(lport & 0xff);
3458 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3459 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3460 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3461 ((set_page <<
3462 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3463 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3464 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3465 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3466 if (write)
3467 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3468
3469 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3470 return status;
3471}
3472
3473/**
3474 * __ice_aq_get_set_rss_lut
3475 * @hw: pointer to the hardware structure
3476 * @params: RSS LUT parameters
3477 * @set: set true to set the table, false to get the table
3478 *
3479 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3480 */
3481static enum ice_status
3482__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3483{
3484 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3485 struct ice_aqc_get_set_rss_lut *cmd_resp;
3486 struct ice_aq_desc desc;
3487 enum ice_status status;
3488 u8 *lut;
3489
3490 if (!params)
3491 return ICE_ERR_PARAM;
3492
3493 vsi_handle = params->vsi_handle;
3494 lut = params->lut;
3495
3496 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3497 return ICE_ERR_PARAM;
3498
3499 lut_size = params->lut_size;
3500 lut_type = params->lut_type;
3501 glob_lut_idx = params->global_lut_id;
3502 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3503
3504 cmd_resp = &desc.params.get_set_rss_lut;
3505
3506 if (set) {
3507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3508 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3509 } else {
3510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3511 }
3512
3513 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3514 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3515 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3516 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3517
3518 switch (lut_type) {
3519 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3520 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3521 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3522 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3523 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3524 break;
3525 default:
3526 status = ICE_ERR_PARAM;
3527 goto ice_aq_get_set_rss_lut_exit;
3528 }
3529
3530 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3531 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3532 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3533
3534 if (!set)
3535 goto ice_aq_get_set_rss_lut_send;
3536 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3537 if (!set)
3538 goto ice_aq_get_set_rss_lut_send;
3539 } else {
3540 goto ice_aq_get_set_rss_lut_send;
3541 }
3542
3543 /* LUT size is only valid for Global and PF table types */
3544 switch (lut_size) {
3545 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3546 break;
3547 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3548 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3549 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3550 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3551 break;
3552 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3553 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3554 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3555 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3556 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3557 break;
3558 }
3559 fallthrough;
3560 default:
3561 status = ICE_ERR_PARAM;
3562 goto ice_aq_get_set_rss_lut_exit;
3563 }
3564
3565ice_aq_get_set_rss_lut_send:
3566 cmd_resp->flags = cpu_to_le16(flags);
3567 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3568
3569ice_aq_get_set_rss_lut_exit:
3570 return status;
3571}
3572
3573/**
3574 * ice_aq_get_rss_lut
3575 * @hw: pointer to the hardware structure
3576 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3577 *
3578 * get the RSS lookup table, PF or VSI type
3579 */
3580enum ice_status
3581ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3582{
3583 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3584}
3585
3586/**
3587 * ice_aq_set_rss_lut
3588 * @hw: pointer to the hardware structure
3589 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3590 *
3591 * set the RSS lookup table, PF or VSI type
3592 */
3593enum ice_status
3594ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3595{
3596 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3597}
3598
3599/**
3600 * __ice_aq_get_set_rss_key
3601 * @hw: pointer to the HW struct
3602 * @vsi_id: VSI FW index
3603 * @key: pointer to key info struct
3604 * @set: set true to set the key, false to get the key
3605 *
3606 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3607 */
3608static enum
3609ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3610 struct ice_aqc_get_set_rss_keys *key,
3611 bool set)
3612{
3613 struct ice_aqc_get_set_rss_key *cmd_resp;
3614 u16 key_size = sizeof(*key);
3615 struct ice_aq_desc desc;
3616
3617 cmd_resp = &desc.params.get_set_rss_key;
3618
3619 if (set) {
3620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3621 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3622 } else {
3623 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3624 }
3625
3626 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3627 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3628 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3629 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3630
3631 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3632}
3633
3634/**
3635 * ice_aq_get_rss_key
3636 * @hw: pointer to the HW struct
3637 * @vsi_handle: software VSI handle
3638 * @key: pointer to key info struct
3639 *
3640 * get the RSS key per VSI
3641 */
3642enum ice_status
3643ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3644 struct ice_aqc_get_set_rss_keys *key)
3645{
3646 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3647 return ICE_ERR_PARAM;
3648
3649 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3650 key, false);
3651}
3652
3653/**
3654 * ice_aq_set_rss_key
3655 * @hw: pointer to the HW struct
3656 * @vsi_handle: software VSI handle
3657 * @keys: pointer to key info struct
3658 *
3659 * set the RSS key per VSI
3660 */
3661enum ice_status
3662ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3663 struct ice_aqc_get_set_rss_keys *keys)
3664{
3665 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3666 return ICE_ERR_PARAM;
3667
3668 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3669 keys, true);
3670}
3671
3672/**
3673 * ice_aq_add_lan_txq
3674 * @hw: pointer to the hardware structure
3675 * @num_qgrps: Number of added queue groups
3676 * @qg_list: list of queue groups to be added
3677 * @buf_size: size of buffer for indirect command
3678 * @cd: pointer to command details structure or NULL
3679 *
3680 * Add Tx LAN queue (0x0C30)
3681 *
3682 * NOTE:
3683 * Prior to calling add Tx LAN queue:
3684 * Initialize the following as part of the Tx queue context:
3685 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3686 * Cache profile and Packet shaper profile.
3687 *
3688 * After add Tx LAN queue AQ command is completed:
3689 * Interrupts should be associated with specific queues,
3690 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3691 * flow.
3692 */
3693static enum ice_status
3694ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3695 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3696 struct ice_sq_cd *cd)
3697{
3698 struct ice_aqc_add_tx_qgrp *list;
3699 struct ice_aqc_add_txqs *cmd;
3700 struct ice_aq_desc desc;
3701 u16 i, sum_size = 0;
3702
3703 cmd = &desc.params.add_txqs;
3704
3705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3706
3707 if (!qg_list)
3708 return ICE_ERR_PARAM;
3709
3710 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3711 return ICE_ERR_PARAM;
3712
3713 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3714 sum_size += struct_size(list, txqs, list->num_txqs);
3715 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3716 list->num_txqs);
3717 }
3718
3719 if (buf_size != sum_size)
3720 return ICE_ERR_PARAM;
3721
3722 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3723
3724 cmd->num_qgrps = num_qgrps;
3725
3726 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3727}
3728
3729/**
3730 * ice_aq_dis_lan_txq
3731 * @hw: pointer to the hardware structure
3732 * @num_qgrps: number of groups in the list
3733 * @qg_list: the list of groups to disable
3734 * @buf_size: the total size of the qg_list buffer in bytes
3735 * @rst_src: if called due to reset, specifies the reset source
3736 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3737 * @cd: pointer to command details structure or NULL
3738 *
3739 * Disable LAN Tx queue (0x0C31)
3740 */
3741static enum ice_status
3742ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3743 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3744 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3745 struct ice_sq_cd *cd)
3746{
3747 struct ice_aqc_dis_txq_item *item;
3748 struct ice_aqc_dis_txqs *cmd;
3749 struct ice_aq_desc desc;
3750 enum ice_status status;
3751 u16 i, sz = 0;
3752
3753 cmd = &desc.params.dis_txqs;
3754 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3755
3756 /* qg_list can be NULL only in VM/VF reset flow */
3757 if (!qg_list && !rst_src)
3758 return ICE_ERR_PARAM;
3759
3760 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3761 return ICE_ERR_PARAM;
3762
3763 cmd->num_entries = num_qgrps;
3764
3765 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3766 ICE_AQC_Q_DIS_TIMEOUT_M);
3767
3768 switch (rst_src) {
3769 case ICE_VM_RESET:
3770 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3771 cmd->vmvf_and_timeout |=
3772 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3773 break;
3774 case ICE_VF_RESET:
3775 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3776 /* In this case, FW expects vmvf_num to be absolute VF ID */
3777 cmd->vmvf_and_timeout |=
3778 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3779 ICE_AQC_Q_DIS_VMVF_NUM_M);
3780 break;
3781 case ICE_NO_RESET:
3782 default:
3783 break;
3784 }
3785
3786 /* flush pipe on time out */
3787 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3788 /* If no queue group info, we are in a reset flow. Issue the AQ */
3789 if (!qg_list)
3790 goto do_aq;
3791
3792 /* set RD bit to indicate that command buffer is provided by the driver
3793 * and it needs to be read by the firmware
3794 */
3795 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3796
3797 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3798 u16 item_size = struct_size(item, q_id, item->num_qs);
3799
3800 /* If the num of queues is even, add 2 bytes of padding */
3801 if ((item->num_qs % 2) == 0)
3802 item_size += 2;
3803
3804 sz += item_size;
3805
3806 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3807 }
3808
3809 if (buf_size != sz)
3810 return ICE_ERR_PARAM;
3811
3812do_aq:
3813 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3814 if (status) {
3815 if (!qg_list)
3816 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3817 vmvf_num, hw->adminq.sq_last_status);
3818 else
3819 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3820 le16_to_cpu(qg_list[0].q_id[0]),
3821 hw->adminq.sq_last_status);
3822 }
3823 return status;
3824}
3825
3826/**
3827 * ice_aq_add_rdma_qsets
3828 * @hw: pointer to the hardware structure
3829 * @num_qset_grps: Number of RDMA Qset groups
3830 * @qset_list: list of Qset groups to be added
3831 * @buf_size: size of buffer for indirect command
3832 * @cd: pointer to command details structure or NULL
3833 *
3834 * Add Tx RDMA Qsets (0x0C33)
3835 */
3836static int
3837ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3838 struct ice_aqc_add_rdma_qset_data *qset_list,
3839 u16 buf_size, struct ice_sq_cd *cd)
3840{
3841 struct ice_aqc_add_rdma_qset_data *list;
3842 struct ice_aqc_add_rdma_qset *cmd;
3843 struct ice_aq_desc desc;
3844 u16 i, sum_size = 0;
3845
3846 cmd = &desc.params.add_rdma_qset;
3847
3848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3849
3850 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3851 return -EINVAL;
3852
3853 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3854 u16 num_qsets = le16_to_cpu(list->num_qsets);
3855
3856 sum_size += struct_size(list, rdma_qsets, num_qsets);
3857 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3858 num_qsets);
3859 }
3860
3861 if (buf_size != sum_size)
3862 return -EINVAL;
3863
3864 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3865
3866 cmd->num_qset_grps = num_qset_grps;
3867
3868 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
3869 buf_size, cd));
3870}
3871
3872/* End of FW Admin Queue command wrappers */
3873
3874/**
3875 * ice_write_byte - write a byte to a packed context structure
3876 * @src_ctx: the context structure to read from
3877 * @dest_ctx: the context to be written to
3878 * @ce_info: a description of the struct to be filled
3879 */
3880static void
3881ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3882{
3883 u8 src_byte, dest_byte, mask;
3884 u8 *from, *dest;
3885 u16 shift_width;
3886
3887 /* copy from the next struct field */
3888 from = src_ctx + ce_info->offset;
3889
3890 /* prepare the bits and mask */
3891 shift_width = ce_info->lsb % 8;
3892 mask = (u8)(BIT(ce_info->width) - 1);
3893
3894 src_byte = *from;
3895 src_byte &= mask;
3896
3897 /* shift to correct alignment */
3898 mask <<= shift_width;
3899 src_byte <<= shift_width;
3900
3901 /* get the current bits from the target bit string */
3902 dest = dest_ctx + (ce_info->lsb / 8);
3903
3904 memcpy(&dest_byte, dest, sizeof(dest_byte));
3905
3906 dest_byte &= ~mask; /* get the bits not changing */
3907 dest_byte |= src_byte; /* add in the new bits */
3908
3909 /* put it all back */
3910 memcpy(dest, &dest_byte, sizeof(dest_byte));
3911}
3912
3913/**
3914 * ice_write_word - write a word to a packed context structure
3915 * @src_ctx: the context structure to read from
3916 * @dest_ctx: the context to be written to
3917 * @ce_info: a description of the struct to be filled
3918 */
3919static void
3920ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3921{
3922 u16 src_word, mask;
3923 __le16 dest_word;
3924 u8 *from, *dest;
3925 u16 shift_width;
3926
3927 /* copy from the next struct field */
3928 from = src_ctx + ce_info->offset;
3929
3930 /* prepare the bits and mask */
3931 shift_width = ce_info->lsb % 8;
3932 mask = BIT(ce_info->width) - 1;
3933
3934 /* don't swizzle the bits until after the mask because the mask bits
3935 * will be in a different bit position on big endian machines
3936 */
3937 src_word = *(u16 *)from;
3938 src_word &= mask;
3939
3940 /* shift to correct alignment */
3941 mask <<= shift_width;
3942 src_word <<= shift_width;
3943
3944 /* get the current bits from the target bit string */
3945 dest = dest_ctx + (ce_info->lsb / 8);
3946
3947 memcpy(&dest_word, dest, sizeof(dest_word));
3948
3949 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3950 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3951
3952 /* put it all back */
3953 memcpy(dest, &dest_word, sizeof(dest_word));
3954}
3955
3956/**
3957 * ice_write_dword - write a dword to a packed context structure
3958 * @src_ctx: the context structure to read from
3959 * @dest_ctx: the context to be written to
3960 * @ce_info: a description of the struct to be filled
3961 */
3962static void
3963ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3964{
3965 u32 src_dword, mask;
3966 __le32 dest_dword;
3967 u8 *from, *dest;
3968 u16 shift_width;
3969
3970 /* copy from the next struct field */
3971 from = src_ctx + ce_info->offset;
3972
3973 /* prepare the bits and mask */
3974 shift_width = ce_info->lsb % 8;
3975
3976 /* if the field width is exactly 32 on an x86 machine, then the shift
3977 * operation will not work because the SHL instructions count is masked
3978 * to 5 bits so the shift will do nothing
3979 */
3980 if (ce_info->width < 32)
3981 mask = BIT(ce_info->width) - 1;
3982 else
3983 mask = (u32)~0;
3984
3985 /* don't swizzle the bits until after the mask because the mask bits
3986 * will be in a different bit position on big endian machines
3987 */
3988 src_dword = *(u32 *)from;
3989 src_dword &= mask;
3990
3991 /* shift to correct alignment */
3992 mask <<= shift_width;
3993 src_dword <<= shift_width;
3994
3995 /* get the current bits from the target bit string */
3996 dest = dest_ctx + (ce_info->lsb / 8);
3997
3998 memcpy(&dest_dword, dest, sizeof(dest_dword));
3999
4000 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
4001 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
4002
4003 /* put it all back */
4004 memcpy(dest, &dest_dword, sizeof(dest_dword));
4005}
4006
4007/**
4008 * ice_write_qword - write a qword to a packed context structure
4009 * @src_ctx: the context structure to read from
4010 * @dest_ctx: the context to be written to
4011 * @ce_info: a description of the struct to be filled
4012 */
4013static void
4014ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4015{
4016 u64 src_qword, mask;
4017 __le64 dest_qword;
4018 u8 *from, *dest;
4019 u16 shift_width;
4020
4021 /* copy from the next struct field */
4022 from = src_ctx + ce_info->offset;
4023
4024 /* prepare the bits and mask */
4025 shift_width = ce_info->lsb % 8;
4026
4027 /* if the field width is exactly 64 on an x86 machine, then the shift
4028 * operation will not work because the SHL instructions count is masked
4029 * to 6 bits so the shift will do nothing
4030 */
4031 if (ce_info->width < 64)
4032 mask = BIT_ULL(ce_info->width) - 1;
4033 else
4034 mask = (u64)~0;
4035
4036 /* don't swizzle the bits until after the mask because the mask bits
4037 * will be in a different bit position on big endian machines
4038 */
4039 src_qword = *(u64 *)from;
4040 src_qword &= mask;
4041
4042 /* shift to correct alignment */
4043 mask <<= shift_width;
4044 src_qword <<= shift_width;
4045
4046 /* get the current bits from the target bit string */
4047 dest = dest_ctx + (ce_info->lsb / 8);
4048
4049 memcpy(&dest_qword, dest, sizeof(dest_qword));
4050
4051 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
4052 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
4053
4054 /* put it all back */
4055 memcpy(dest, &dest_qword, sizeof(dest_qword));
4056}
4057
4058/**
4059 * ice_set_ctx - set context bits in packed structure
4060 * @hw: pointer to the hardware structure
4061 * @src_ctx: pointer to a generic non-packed context structure
4062 * @dest_ctx: pointer to memory for the packed structure
4063 * @ce_info: a description of the structure to be transformed
4064 */
4065enum ice_status
4066ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4067 const struct ice_ctx_ele *ce_info)
4068{
4069 int f;
4070
4071 for (f = 0; ce_info[f].width; f++) {
4072 /* We have to deal with each element of the FW response
4073 * using the correct size so that we are correct regardless
4074 * of the endianness of the machine.
4075 */
4076 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4077 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4078 f, ce_info[f].width, ce_info[f].size_of);
4079 continue;
4080 }
4081 switch (ce_info[f].size_of) {
4082 case sizeof(u8):
4083 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4084 break;
4085 case sizeof(u16):
4086 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4087 break;
4088 case sizeof(u32):
4089 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4090 break;
4091 case sizeof(u64):
4092 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4093 break;
4094 default:
4095 return ICE_ERR_INVAL_SIZE;
4096 }
4097 }
4098
4099 return 0;
4100}
4101
4102/**
4103 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4104 * @hw: pointer to the HW struct
4105 * @vsi_handle: software VSI handle
4106 * @tc: TC number
4107 * @q_handle: software queue handle
4108 */
4109struct ice_q_ctx *
4110ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4111{
4112 struct ice_vsi_ctx *vsi;
4113 struct ice_q_ctx *q_ctx;
4114
4115 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4116 if (!vsi)
4117 return NULL;
4118 if (q_handle >= vsi->num_lan_q_entries[tc])
4119 return NULL;
4120 if (!vsi->lan_q_ctx[tc])
4121 return NULL;
4122 q_ctx = vsi->lan_q_ctx[tc];
4123 return &q_ctx[q_handle];
4124}
4125
4126/**
4127 * ice_ena_vsi_txq
4128 * @pi: port information structure
4129 * @vsi_handle: software VSI handle
4130 * @tc: TC number
4131 * @q_handle: software queue handle
4132 * @num_qgrps: Number of added queue groups
4133 * @buf: list of queue groups to be added
4134 * @buf_size: size of buffer for indirect command
4135 * @cd: pointer to command details structure or NULL
4136 *
4137 * This function adds one LAN queue
4138 */
4139enum ice_status
4140ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4141 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4142 struct ice_sq_cd *cd)
4143{
4144 struct ice_aqc_txsched_elem_data node = { 0 };
4145 struct ice_sched_node *parent;
4146 struct ice_q_ctx *q_ctx;
4147 enum ice_status status;
4148 struct ice_hw *hw;
4149
4150 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4151 return ICE_ERR_CFG;
4152
4153 if (num_qgrps > 1 || buf->num_txqs > 1)
4154 return ICE_ERR_MAX_LIMIT;
4155
4156 hw = pi->hw;
4157
4158 if (!ice_is_vsi_valid(hw, vsi_handle))
4159 return ICE_ERR_PARAM;
4160
4161 mutex_lock(&pi->sched_lock);
4162
4163 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4164 if (!q_ctx) {
4165 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4166 q_handle);
4167 status = ICE_ERR_PARAM;
4168 goto ena_txq_exit;
4169 }
4170
4171 /* find a parent node */
4172 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4173 ICE_SCHED_NODE_OWNER_LAN);
4174 if (!parent) {
4175 status = ICE_ERR_PARAM;
4176 goto ena_txq_exit;
4177 }
4178
4179 buf->parent_teid = parent->info.node_teid;
4180 node.parent_teid = parent->info.node_teid;
4181 /* Mark that the values in the "generic" section as valid. The default
4182 * value in the "generic" section is zero. This means that :
4183 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4184 * - 0 priority among siblings, indicated by Bit 1-3.
4185 * - WFQ, indicated by Bit 4.
4186 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4187 * Bit 5-6.
4188 * - Bit 7 is reserved.
4189 * Without setting the generic section as valid in valid_sections, the
4190 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4191 */
4192 buf->txqs[0].info.valid_sections =
4193 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4194 ICE_AQC_ELEM_VALID_EIR;
4195 buf->txqs[0].info.generic = 0;
4196 buf->txqs[0].info.cir_bw.bw_profile_idx =
4197 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4198 buf->txqs[0].info.cir_bw.bw_alloc =
4199 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4200 buf->txqs[0].info.eir_bw.bw_profile_idx =
4201 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4202 buf->txqs[0].info.eir_bw.bw_alloc =
4203 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4204
4205 /* add the LAN queue */
4206 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4207 if (status) {
4208 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4209 le16_to_cpu(buf->txqs[0].txq_id),
4210 hw->adminq.sq_last_status);
4211 goto ena_txq_exit;
4212 }
4213
4214 node.node_teid = buf->txqs[0].q_teid;
4215 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4216 q_ctx->q_handle = q_handle;
4217 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4218
4219 /* add a leaf node into scheduler tree queue layer */
4220 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4221 if (!status)
4222 status = ice_sched_replay_q_bw(pi, q_ctx);
4223
4224ena_txq_exit:
4225 mutex_unlock(&pi->sched_lock);
4226 return status;
4227}
4228
4229/**
4230 * ice_dis_vsi_txq
4231 * @pi: port information structure
4232 * @vsi_handle: software VSI handle
4233 * @tc: TC number
4234 * @num_queues: number of queues
4235 * @q_handles: pointer to software queue handle array
4236 * @q_ids: pointer to the q_id array
4237 * @q_teids: pointer to queue node teids
4238 * @rst_src: if called due to reset, specifies the reset source
4239 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4240 * @cd: pointer to command details structure or NULL
4241 *
4242 * This function removes queues and their corresponding nodes in SW DB
4243 */
4244enum ice_status
4245ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4246 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4247 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4248 struct ice_sq_cd *cd)
4249{
4250 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4251 struct ice_aqc_dis_txq_item *qg_list;
4252 struct ice_q_ctx *q_ctx;
4253 struct ice_hw *hw;
4254 u16 i, buf_size;
4255
4256 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4257 return ICE_ERR_CFG;
4258
4259 hw = pi->hw;
4260
4261 if (!num_queues) {
4262 /* if queue is disabled already yet the disable queue command
4263 * has to be sent to complete the VF reset, then call
4264 * ice_aq_dis_lan_txq without any queue information
4265 */
4266 if (rst_src)
4267 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4268 vmvf_num, NULL);
4269 return ICE_ERR_CFG;
4270 }
4271
4272 buf_size = struct_size(qg_list, q_id, 1);
4273 qg_list = kzalloc(buf_size, GFP_KERNEL);
4274 if (!qg_list)
4275 return ICE_ERR_NO_MEMORY;
4276
4277 mutex_lock(&pi->sched_lock);
4278
4279 for (i = 0; i < num_queues; i++) {
4280 struct ice_sched_node *node;
4281
4282 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4283 if (!node)
4284 continue;
4285 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4286 if (!q_ctx) {
4287 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4288 q_handles[i]);
4289 continue;
4290 }
4291 if (q_ctx->q_handle != q_handles[i]) {
4292 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4293 q_ctx->q_handle, q_handles[i]);
4294 continue;
4295 }
4296 qg_list->parent_teid = node->info.parent_teid;
4297 qg_list->num_qs = 1;
4298 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4299 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4300 vmvf_num, cd);
4301
4302 if (status)
4303 break;
4304 ice_free_sched_node(pi, node);
4305 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4306 }
4307 mutex_unlock(&pi->sched_lock);
4308 kfree(qg_list);
4309 return status;
4310}
4311
4312/**
4313 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4314 * @pi: port information structure
4315 * @vsi_handle: software VSI handle
4316 * @tc_bitmap: TC bitmap
4317 * @maxqs: max queues array per TC
4318 * @owner: LAN or RDMA
4319 *
4320 * This function adds/updates the VSI queues per TC.
4321 */
4322static enum ice_status
4323ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4324 u16 *maxqs, u8 owner)
4325{
4326 enum ice_status status = 0;
4327 u8 i;
4328
4329 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4330 return ICE_ERR_CFG;
4331
4332 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4333 return ICE_ERR_PARAM;
4334
4335 mutex_lock(&pi->sched_lock);
4336
4337 ice_for_each_traffic_class(i) {
4338 /* configuration is possible only if TC node is present */
4339 if (!ice_sched_get_tc_node(pi, i))
4340 continue;
4341
4342 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4343 ice_is_tc_ena(tc_bitmap, i));
4344 if (status)
4345 break;
4346 }
4347
4348 mutex_unlock(&pi->sched_lock);
4349 return status;
4350}
4351
4352/**
4353 * ice_cfg_vsi_lan - configure VSI LAN queues
4354 * @pi: port information structure
4355 * @vsi_handle: software VSI handle
4356 * @tc_bitmap: TC bitmap
4357 * @max_lanqs: max LAN queues array per TC
4358 *
4359 * This function adds/updates the VSI LAN queues per TC.
4360 */
4361enum ice_status
4362ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4363 u16 *max_lanqs)
4364{
4365 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4366 ICE_SCHED_NODE_OWNER_LAN);
4367}
4368
4369/**
4370 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4371 * @pi: port information structure
4372 * @vsi_handle: software VSI handle
4373 * @tc_bitmap: TC bitmap
4374 * @max_rdmaqs: max RDMA queues array per TC
4375 *
4376 * This function adds/updates the VSI RDMA queues per TC.
4377 */
4378int
4379ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4380 u16 *max_rdmaqs)
4381{
4382 return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
4383 max_rdmaqs,
4384 ICE_SCHED_NODE_OWNER_RDMA));
4385}
4386
4387/**
4388 * ice_ena_vsi_rdma_qset
4389 * @pi: port information structure
4390 * @vsi_handle: software VSI handle
4391 * @tc: TC number
4392 * @rdma_qset: pointer to RDMA Qset
4393 * @num_qsets: number of RDMA Qsets
4394 * @qset_teid: pointer to Qset node TEIDs
4395 *
4396 * This function adds RDMA Qset
4397 */
4398int
4399ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4400 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4401{
4402 struct ice_aqc_txsched_elem_data node = { 0 };
4403 struct ice_aqc_add_rdma_qset_data *buf;
4404 struct ice_sched_node *parent;
4405 enum ice_status status;
4406 struct ice_hw *hw;
4407 u16 i, buf_size;
4408 int ret;
4409
4410 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4411 return -EIO;
4412 hw = pi->hw;
4413
4414 if (!ice_is_vsi_valid(hw, vsi_handle))
4415 return -EINVAL;
4416
4417 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4418 buf = kzalloc(buf_size, GFP_KERNEL);
4419 if (!buf)
4420 return -ENOMEM;
4421 mutex_lock(&pi->sched_lock);
4422
4423 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4424 ICE_SCHED_NODE_OWNER_RDMA);
4425 if (!parent) {
4426 ret = -EINVAL;
4427 goto rdma_error_exit;
4428 }
4429 buf->parent_teid = parent->info.node_teid;
4430 node.parent_teid = parent->info.node_teid;
4431
4432 buf->num_qsets = cpu_to_le16(num_qsets);
4433 for (i = 0; i < num_qsets; i++) {
4434 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4435 buf->rdma_qsets[i].info.valid_sections =
4436 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4437 ICE_AQC_ELEM_VALID_EIR;
4438 buf->rdma_qsets[i].info.generic = 0;
4439 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4440 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4441 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4442 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4443 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4444 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4445 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4446 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4447 }
4448 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4449 if (ret) {
4450 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4451 goto rdma_error_exit;
4452 }
4453 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4454 for (i = 0; i < num_qsets; i++) {
4455 node.node_teid = buf->rdma_qsets[i].qset_teid;
4456 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4457 &node);
4458 if (status) {
4459 ret = ice_status_to_errno(status);
4460 break;
4461 }
4462 qset_teid[i] = le32_to_cpu(node.node_teid);
4463 }
4464rdma_error_exit:
4465 mutex_unlock(&pi->sched_lock);
4466 kfree(buf);
4467 return ret;
4468}
4469
4470/**
4471 * ice_dis_vsi_rdma_qset - free RDMA resources
4472 * @pi: port_info struct
4473 * @count: number of RDMA Qsets to free
4474 * @qset_teid: TEID of Qset node
4475 * @q_id: list of queue IDs being disabled
4476 */
4477int
4478ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4479 u16 *q_id)
4480{
4481 struct ice_aqc_dis_txq_item *qg_list;
4482 enum ice_status status = 0;
4483 struct ice_hw *hw;
4484 u16 qg_size;
4485 int i;
4486
4487 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4488 return -EIO;
4489
4490 hw = pi->hw;
4491
4492 qg_size = struct_size(qg_list, q_id, 1);
4493 qg_list = kzalloc(qg_size, GFP_KERNEL);
4494 if (!qg_list)
4495 return -ENOMEM;
4496
4497 mutex_lock(&pi->sched_lock);
4498
4499 for (i = 0; i < count; i++) {
4500 struct ice_sched_node *node;
4501
4502 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4503 if (!node)
4504 continue;
4505
4506 qg_list->parent_teid = node->info.parent_teid;
4507 qg_list->num_qs = 1;
4508 qg_list->q_id[0] =
4509 cpu_to_le16(q_id[i] |
4510 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4511
4512 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4513 ICE_NO_RESET, 0, NULL);
4514 if (status)
4515 break;
4516
4517 ice_free_sched_node(pi, node);
4518 }
4519
4520 mutex_unlock(&pi->sched_lock);
4521 kfree(qg_list);
4522 return ice_status_to_errno(status);
4523}
4524
4525/**
4526 * ice_replay_pre_init - replay pre initialization
4527 * @hw: pointer to the HW struct
4528 *
4529 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4530 */
4531static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4532{
4533 struct ice_switch_info *sw = hw->switch_info;
4534 u8 i;
4535
4536 /* Delete old entries from replay filter list head if there is any */
4537 ice_rm_all_sw_replay_rule_info(hw);
4538 /* In start of replay, move entries into replay_rules list, it
4539 * will allow adding rules entries back to filt_rules list,
4540 * which is operational list.
4541 */
4542 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4543 list_replace_init(&sw->recp_list[i].filt_rules,
4544 &sw->recp_list[i].filt_replay_rules);
4545 ice_sched_replay_agg_vsi_preinit(hw);
4546
4547 return 0;
4548}
4549
4550/**
4551 * ice_replay_vsi - replay VSI configuration
4552 * @hw: pointer to the HW struct
4553 * @vsi_handle: driver VSI handle
4554 *
4555 * Restore all VSI configuration after reset. It is required to call this
4556 * function with main VSI first.
4557 */
4558enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4559{
4560 enum ice_status status;
4561
4562 if (!ice_is_vsi_valid(hw, vsi_handle))
4563 return ICE_ERR_PARAM;
4564
4565 /* Replay pre-initialization if there is any */
4566 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4567 status = ice_replay_pre_init(hw);
4568 if (status)
4569 return status;
4570 }
4571 /* Replay per VSI all RSS configurations */
4572 status = ice_replay_rss_cfg(hw, vsi_handle);
4573 if (status)
4574 return status;
4575 /* Replay per VSI all filters */
4576 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4577 if (!status)
4578 status = ice_replay_vsi_agg(hw, vsi_handle);
4579 return status;
4580}
4581
4582/**
4583 * ice_replay_post - post replay configuration cleanup
4584 * @hw: pointer to the HW struct
4585 *
4586 * Post replay cleanup.
4587 */
4588void ice_replay_post(struct ice_hw *hw)
4589{
4590 /* Delete old entries from replay filter list head */
4591 ice_rm_all_sw_replay_rule_info(hw);
4592 ice_sched_replay_agg(hw);
4593}
4594
4595/**
4596 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4597 * @hw: ptr to the hardware info
4598 * @reg: offset of 64 bit HW register to read from
4599 * @prev_stat_loaded: bool to specify if previous stats are loaded
4600 * @prev_stat: ptr to previous loaded stat value
4601 * @cur_stat: ptr to current stat value
4602 */
4603void
4604ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4605 u64 *prev_stat, u64 *cur_stat)
4606{
4607 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4608
4609 /* device stats are not reset at PFR, they likely will not be zeroed
4610 * when the driver starts. Thus, save the value from the first read
4611 * without adding to the statistic value so that we report stats which
4612 * count up from zero.
4613 */
4614 if (!prev_stat_loaded) {
4615 *prev_stat = new_data;
4616 return;
4617 }
4618
4619 /* Calculate the difference between the new and old values, and then
4620 * add it to the software stat value.
4621 */
4622 if (new_data >= *prev_stat)
4623 *cur_stat += new_data - *prev_stat;
4624 else
4625 /* to manage the potential roll-over */
4626 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4627
4628 /* Update the previously stored value to prepare for next read */
4629 *prev_stat = new_data;
4630}
4631
4632/**
4633 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4634 * @hw: ptr to the hardware info
4635 * @reg: offset of HW register to read from
4636 * @prev_stat_loaded: bool to specify if previous stats are loaded
4637 * @prev_stat: ptr to previous loaded stat value
4638 * @cur_stat: ptr to current stat value
4639 */
4640void
4641ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4642 u64 *prev_stat, u64 *cur_stat)
4643{
4644 u32 new_data;
4645
4646 new_data = rd32(hw, reg);
4647
4648 /* device stats are not reset at PFR, they likely will not be zeroed
4649 * when the driver starts. Thus, save the value from the first read
4650 * without adding to the statistic value so that we report stats which
4651 * count up from zero.
4652 */
4653 if (!prev_stat_loaded) {
4654 *prev_stat = new_data;
4655 return;
4656 }
4657
4658 /* Calculate the difference between the new and old values, and then
4659 * add it to the software stat value.
4660 */
4661 if (new_data >= *prev_stat)
4662 *cur_stat += new_data - *prev_stat;
4663 else
4664 /* to manage the potential roll-over */
4665 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4666
4667 /* Update the previously stored value to prepare for next read */
4668 *prev_stat = new_data;
4669}
4670
4671/**
4672 * ice_sched_query_elem - query element information from HW
4673 * @hw: pointer to the HW struct
4674 * @node_teid: node TEID to be queried
4675 * @buf: buffer to element information
4676 *
4677 * This function queries HW element information
4678 */
4679enum ice_status
4680ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4681 struct ice_aqc_txsched_elem_data *buf)
4682{
4683 u16 buf_size, num_elem_ret = 0;
4684 enum ice_status status;
4685
4686 buf_size = sizeof(*buf);
4687 memset(buf, 0, buf_size);
4688 buf->node_teid = cpu_to_le32(node_teid);
4689 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4690 NULL);
4691 if (status || num_elem_ret != 1)
4692 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4693 return status;
4694}
4695
4696/**
4697 * ice_aq_set_driver_param - Set driver parameter to share via firmware
4698 * @hw: pointer to the HW struct
4699 * @idx: parameter index to set
4700 * @value: the value to set the parameter to
4701 * @cd: pointer to command details structure or NULL
4702 *
4703 * Set the value of one of the software defined parameters. All PFs connected
4704 * to this device can read the value using ice_aq_get_driver_param.
4705 *
4706 * Note that firmware provides no synchronization or locking, and will not
4707 * save the parameter value during a device reset. It is expected that
4708 * a single PF will write the parameter value, while all other PFs will only
4709 * read it.
4710 */
4711int
4712ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4713 u32 value, struct ice_sq_cd *cd)
4714{
4715 struct ice_aqc_driver_shared_params *cmd;
4716 struct ice_aq_desc desc;
4717
4718 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4719 return -EIO;
4720
4721 cmd = &desc.params.drv_shared_params;
4722
4723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4724
4725 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4726 cmd->param_indx = idx;
4727 cmd->param_val = cpu_to_le32(value);
4728
4729 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
4730}
4731
4732/**
4733 * ice_aq_get_driver_param - Get driver parameter shared via firmware
4734 * @hw: pointer to the HW struct
4735 * @idx: parameter index to set
4736 * @value: storage to return the shared parameter
4737 * @cd: pointer to command details structure or NULL
4738 *
4739 * Get the value of one of the software defined parameters.
4740 *
4741 * Note that firmware provides no synchronization or locking. It is expected
4742 * that only a single PF will write a given parameter.
4743 */
4744int
4745ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4746 u32 *value, struct ice_sq_cd *cd)
4747{
4748 struct ice_aqc_driver_shared_params *cmd;
4749 struct ice_aq_desc desc;
4750 enum ice_status status;
4751
4752 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4753 return -EIO;
4754
4755 cmd = &desc.params.drv_shared_params;
4756
4757 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4758
4759 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4760 cmd->param_indx = idx;
4761
4762 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4763 if (status)
4764 return ice_status_to_errno(status);
4765
4766 *value = le32_to_cpu(cmd->param_val);
4767
4768 return 0;
4769}
4770
4771/**
4772 * ice_fw_supports_link_override
4773 * @hw: pointer to the hardware structure
4774 *
4775 * Checks if the firmware supports link override
4776 */
4777bool ice_fw_supports_link_override(struct ice_hw *hw)
4778{
4779 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4780 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4781 return true;
4782 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4783 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4784 return true;
4785 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4786 return true;
4787 }
4788
4789 return false;
4790}
4791
4792/**
4793 * ice_get_link_default_override
4794 * @ldo: pointer to the link default override struct
4795 * @pi: pointer to the port info struct
4796 *
4797 * Gets the link default override for a port
4798 */
4799enum ice_status
4800ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4801 struct ice_port_info *pi)
4802{
4803 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4804 struct ice_hw *hw = pi->hw;
4805 enum ice_status status;
4806
4807 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4808 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4809 if (status) {
4810 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4811 return status;
4812 }
4813
4814 /* Each port has its own config; calculate for our port */
4815 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4816 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4817
4818 /* link options first */
4819 status = ice_read_sr_word(hw, tlv_start, &buf);
4820 if (status) {
4821 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4822 return status;
4823 }
4824 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4825 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4826 ICE_LINK_OVERRIDE_PHY_CFG_S;
4827
4828 /* link PHY config */
4829 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4830 status = ice_read_sr_word(hw, offset, &buf);
4831 if (status) {
4832 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4833 return status;
4834 }
4835 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4836
4837 /* PHY types low */
4838 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4839 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4840 status = ice_read_sr_word(hw, (offset + i), &buf);
4841 if (status) {
4842 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4843 return status;
4844 }
4845 /* shift 16 bits at a time to fill 64 bits */
4846 ldo->phy_type_low |= ((u64)buf << (i * 16));
4847 }
4848
4849 /* PHY types high */
4850 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4851 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4852 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4853 status = ice_read_sr_word(hw, (offset + i), &buf);
4854 if (status) {
4855 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4856 return status;
4857 }
4858 /* shift 16 bits at a time to fill 64 bits */
4859 ldo->phy_type_high |= ((u64)buf << (i * 16));
4860 }
4861
4862 return status;
4863}
4864
4865/**
4866 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4867 * @caps: get PHY capability data
4868 */
4869bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4870{
4871 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4872 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4873 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4874 ICE_AQC_PHY_AN_EN_CLAUSE37))
4875 return true;
4876
4877 return false;
4878}
4879
4880/**
4881 * ice_aq_set_lldp_mib - Set the LLDP MIB
4882 * @hw: pointer to the HW struct
4883 * @mib_type: Local, Remote or both Local and Remote MIBs
4884 * @buf: pointer to the caller-supplied buffer to store the MIB block
4885 * @buf_size: size of the buffer (in bytes)
4886 * @cd: pointer to command details structure or NULL
4887 *
4888 * Set the LLDP MIB. (0x0A08)
4889 */
4890enum ice_status
4891ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4892 struct ice_sq_cd *cd)
4893{
4894 struct ice_aqc_lldp_set_local_mib *cmd;
4895 struct ice_aq_desc desc;
4896
4897 cmd = &desc.params.lldp_set_mib;
4898
4899 if (buf_size == 0 || !buf)
4900 return ICE_ERR_PARAM;
4901
4902 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4903
4904 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4905 desc.datalen = cpu_to_le16(buf_size);
4906
4907 cmd->type = mib_type;
4908 cmd->length = cpu_to_le16(buf_size);
4909
4910 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4911}
4912
4913/**
4914 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
4915 * @hw: pointer to HW struct
4916 */
4917bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4918{
4919 if (hw->mac_type != ICE_MAC_E810)
4920 return false;
4921
4922 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4923 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4924 return true;
4925 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4926 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4927 return true;
4928 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4929 return true;
4930 }
4931 return false;
4932}
4933
4934/**
4935 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
4936 * @hw: pointer to HW struct
4937 * @vsi_num: absolute HW index for VSI
4938 * @add: boolean for if adding or removing a filter
4939 */
4940enum ice_status
4941ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4942{
4943 struct ice_aqc_lldp_filter_ctrl *cmd;
4944 struct ice_aq_desc desc;
4945
4946 cmd = &desc.params.lldp_filter_ctrl;
4947
4948 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4949
4950 if (add)
4951 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4952 else
4953 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4954
4955 cmd->vsi_num = cpu_to_le16(vsi_num);
4956
4957 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4958}
4959
4960/**
4961 * ice_fw_supports_report_dflt_cfg
4962 * @hw: pointer to the hardware structure
4963 *
4964 * Checks if the firmware supports report default configuration
4965 */
4966bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
4967{
4968 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4969 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
4970 return true;
4971 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
4972 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
4973 return true;
4974 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4975 return true;
4976 }
4977 return false;
4978}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7
8#define ICE_PF_RESET_WAIT_COUNT 200
9
10#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
18#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
29/**
30 * ice_set_mac_type - Sets MAC type
31 * @hw: pointer to the HW structure
32 *
33 * This function sets the MAC type of the adapter based on the
34 * vendor ID and device ID stored in the HW structure.
35 */
36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37{
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43}
44
45/**
46 * ice_dev_onetime_setup - Temporary HW/FW workarounds
47 * @hw: pointer to the HW structure
48 *
49 * This function provides temporary workarounds for certain issues
50 * that are expected to be fixed in the HW/FW.
51 */
52void ice_dev_onetime_setup(struct ice_hw *hw)
53{
54#define MBX_PF_VT_PFALLOC 0x00231E80
55 /* set VFs per PF */
56 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
57}
58
59/**
60 * ice_clear_pf_cfg - Clear PF configuration
61 * @hw: pointer to the hardware structure
62 *
63 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
64 * configuration, flow director filters, etc.).
65 */
66enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
67{
68 struct ice_aq_desc desc;
69
70 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
71
72 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
73}
74
75/**
76 * ice_aq_manage_mac_read - manage MAC address read command
77 * @hw: pointer to the HW struct
78 * @buf: a virtual buffer to hold the manage MAC read response
79 * @buf_size: Size of the virtual buffer
80 * @cd: pointer to command details structure or NULL
81 *
82 * This function is used to return per PF station MAC address (0x0107).
83 * NOTE: Upon successful completion of this command, MAC address information
84 * is returned in user specified buffer. Please interpret user specified
85 * buffer as "manage_mac_read" response.
86 * Response such as various MAC addresses are stored in HW struct (port.mac)
87 * ice_aq_discover_caps is expected to be called before this function is called.
88 */
89static enum ice_status
90ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 struct ice_sq_cd *cd)
92{
93 struct ice_aqc_manage_mac_read_resp *resp;
94 struct ice_aqc_manage_mac_read *cmd;
95 struct ice_aq_desc desc;
96 enum ice_status status;
97 u16 flags;
98 u8 i;
99
100 cmd = &desc.params.mac_read;
101
102 if (buf_size < sizeof(*resp))
103 return ICE_ERR_BUF_TOO_SHORT;
104
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
106
107 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
108 if (status)
109 return status;
110
111 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
112 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
113
114 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
115 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
116 return ICE_ERR_CFG;
117 }
118
119 /* A single port can report up to two (LAN and WoL) addresses */
120 for (i = 0; i < cmd->num_addr; i++)
121 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
122 ether_addr_copy(hw->port_info->mac.lan_addr,
123 resp[i].mac_addr);
124 ether_addr_copy(hw->port_info->mac.perm_addr,
125 resp[i].mac_addr);
126 break;
127 }
128
129 return 0;
130}
131
132/**
133 * ice_aq_get_phy_caps - returns PHY capabilities
134 * @pi: port information structure
135 * @qual_mods: report qualified modules
136 * @report_mode: report mode capabilities
137 * @pcaps: structure for PHY capabilities to be filled
138 * @cd: pointer to command details structure or NULL
139 *
140 * Returns the various PHY capabilities supported on the Port (0x0600)
141 */
142enum ice_status
143ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
144 struct ice_aqc_get_phy_caps_data *pcaps,
145 struct ice_sq_cd *cd)
146{
147 struct ice_aqc_get_phy_caps *cmd;
148 u16 pcaps_size = sizeof(*pcaps);
149 struct ice_aq_desc desc;
150 enum ice_status status;
151
152 cmd = &desc.params.get_phy;
153
154 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
155 return ICE_ERR_PARAM;
156
157 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
158
159 if (qual_mods)
160 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
161
162 cmd->param0 |= cpu_to_le16(report_mode);
163 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164
165 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
166 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
167 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
168 }
169
170 return status;
171}
172
173/**
174 * ice_get_media_type - Gets media type
175 * @pi: port information structure
176 */
177static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
178{
179 struct ice_link_status *hw_link_info;
180
181 if (!pi)
182 return ICE_MEDIA_UNKNOWN;
183
184 hw_link_info = &pi->phy.link_info;
185 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
186 /* If more than one media type is selected, report unknown */
187 return ICE_MEDIA_UNKNOWN;
188
189 if (hw_link_info->phy_type_low) {
190 switch (hw_link_info->phy_type_low) {
191 case ICE_PHY_TYPE_LOW_1000BASE_SX:
192 case ICE_PHY_TYPE_LOW_1000BASE_LX:
193 case ICE_PHY_TYPE_LOW_10GBASE_SR:
194 case ICE_PHY_TYPE_LOW_10GBASE_LR:
195 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
196 case ICE_PHY_TYPE_LOW_25GBASE_SR:
197 case ICE_PHY_TYPE_LOW_25GBASE_LR:
198 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
199 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
200 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
201 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
202 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
203 case ICE_PHY_TYPE_LOW_50GBASE_SR:
204 case ICE_PHY_TYPE_LOW_50GBASE_FR:
205 case ICE_PHY_TYPE_LOW_50GBASE_LR:
206 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
207 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
208 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
209 case ICE_PHY_TYPE_LOW_100GBASE_DR:
210 return ICE_MEDIA_FIBER;
211 case ICE_PHY_TYPE_LOW_100BASE_TX:
212 case ICE_PHY_TYPE_LOW_1000BASE_T:
213 case ICE_PHY_TYPE_LOW_2500BASE_T:
214 case ICE_PHY_TYPE_LOW_5GBASE_T:
215 case ICE_PHY_TYPE_LOW_10GBASE_T:
216 case ICE_PHY_TYPE_LOW_25GBASE_T:
217 return ICE_MEDIA_BASET;
218 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
219 case ICE_PHY_TYPE_LOW_25GBASE_CR:
220 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
221 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
222 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
223 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
224 case ICE_PHY_TYPE_LOW_50GBASE_CP:
225 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
226 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
227 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
228 return ICE_MEDIA_DA;
229 case ICE_PHY_TYPE_LOW_1000BASE_KX:
230 case ICE_PHY_TYPE_LOW_2500BASE_KX:
231 case ICE_PHY_TYPE_LOW_2500BASE_X:
232 case ICE_PHY_TYPE_LOW_5GBASE_KR:
233 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
234 case ICE_PHY_TYPE_LOW_25GBASE_KR:
235 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
236 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
237 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
238 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
239 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
240 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
241 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
242 return ICE_MEDIA_BACKPLANE;
243 }
244 } else {
245 switch (hw_link_info->phy_type_high) {
246 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
247 return ICE_MEDIA_BACKPLANE;
248 }
249 }
250 return ICE_MEDIA_UNKNOWN;
251}
252
253/**
254 * ice_aq_get_link_info
255 * @pi: port information structure
256 * @ena_lse: enable/disable LinkStatusEvent reporting
257 * @link: pointer to link status structure - optional
258 * @cd: pointer to command details structure or NULL
259 *
260 * Get Link Status (0x607). Returns the link status of the adapter.
261 */
262enum ice_status
263ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
264 struct ice_link_status *link, struct ice_sq_cd *cd)
265{
266 struct ice_aqc_get_link_status_data link_data = { 0 };
267 struct ice_aqc_get_link_status *resp;
268 struct ice_link_status *li_old, *li;
269 enum ice_media_type *hw_media_type;
270 struct ice_fc_info *hw_fc_info;
271 bool tx_pause, rx_pause;
272 struct ice_aq_desc desc;
273 enum ice_status status;
274 struct ice_hw *hw;
275 u16 cmd_flags;
276
277 if (!pi)
278 return ICE_ERR_PARAM;
279 hw = pi->hw;
280 li_old = &pi->phy.link_info_old;
281 hw_media_type = &pi->phy.media_type;
282 li = &pi->phy.link_info;
283 hw_fc_info = &pi->fc;
284
285 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
286 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
287 resp = &desc.params.get_link_status;
288 resp->cmd_flags = cpu_to_le16(cmd_flags);
289 resp->lport_num = pi->lport;
290
291 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
292
293 if (status)
294 return status;
295
296 /* save off old link status information */
297 *li_old = *li;
298
299 /* update current link status information */
300 li->link_speed = le16_to_cpu(link_data.link_speed);
301 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
302 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
303 *hw_media_type = ice_get_media_type(pi);
304 li->link_info = link_data.link_info;
305 li->an_info = link_data.an_info;
306 li->ext_info = link_data.ext_info;
307 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
308 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
309 li->topo_media_conflict = link_data.topo_media_conflict;
310 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
311 ICE_AQ_CFG_PACING_TYPE_M);
312
313 /* update fc info */
314 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
315 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
316 if (tx_pause && rx_pause)
317 hw_fc_info->current_mode = ICE_FC_FULL;
318 else if (tx_pause)
319 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
320 else if (rx_pause)
321 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
322 else
323 hw_fc_info->current_mode = ICE_FC_NONE;
324
325 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
326
327 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
328 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
329 (unsigned long long)li->phy_type_low);
330 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
331 (unsigned long long)li->phy_type_high);
332 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
333 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
334 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
335 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
336 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
337 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
338 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
339
340 /* save link status information */
341 if (link)
342 *link = *li;
343
344 /* flag cleared so calling functions don't call AQ again */
345 pi->phy.get_link_info = false;
346
347 return 0;
348}
349
350/**
351 * ice_init_flex_flags
352 * @hw: pointer to the hardware structure
353 * @prof_id: Rx Descriptor Builder profile ID
354 *
355 * Function to initialize Rx flex flags
356 */
357static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
358{
359 u8 idx = 0;
360
361 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
362 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
363 * flexiflags1[3:0] - Not used for flag programming
364 * flexiflags2[7:0] - Tunnel and VLAN types
365 * 2 invalid fields in last index
366 */
367 switch (prof_id) {
368 /* Rx flex flags are currently programmed for the NIC profiles only.
369 * Different flag bit programming configurations can be added per
370 * profile as needed.
371 */
372 case ICE_RXDID_FLEX_NIC:
373 case ICE_RXDID_FLEX_NIC_2:
374 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
375 ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
376 ICE_FLG_FIN, idx++);
377 /* flex flag 1 is not used for flexi-flag programming, skipping
378 * these four FLG64 bits.
379 */
380 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
381 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
382 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
383 ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
384 ICE_FLG_EVLAN_x9100, idx++);
385 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
386 ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
387 ICE_FLG_TNL0, idx++);
388 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
389 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
390 break;
391
392 default:
393 ice_debug(hw, ICE_DBG_INIT,
394 "Flag programming for profile ID %d not supported\n",
395 prof_id);
396 }
397}
398
399/**
400 * ice_init_flex_flds
401 * @hw: pointer to the hardware structure
402 * @prof_id: Rx Descriptor Builder profile ID
403 *
404 * Function to initialize flex descriptors
405 */
406static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
407{
408 enum ice_flex_rx_mdid mdid;
409
410 switch (prof_id) {
411 case ICE_RXDID_FLEX_NIC:
412 case ICE_RXDID_FLEX_NIC_2:
413 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
414 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
415 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
416
417 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
418 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
419
420 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
421
422 ice_init_flex_flags(hw, prof_id);
423 break;
424
425 default:
426 ice_debug(hw, ICE_DBG_INIT,
427 "Field init for profile ID %d not supported\n",
428 prof_id);
429 }
430}
431
432/**
433 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
434 * @hw: pointer to the HW struct
435 */
436static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
437{
438 struct ice_switch_info *sw;
439
440 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
441 sizeof(*hw->switch_info), GFP_KERNEL);
442 sw = hw->switch_info;
443
444 if (!sw)
445 return ICE_ERR_NO_MEMORY;
446
447 INIT_LIST_HEAD(&sw->vsi_list_map_head);
448
449 return ice_init_def_sw_recp(hw);
450}
451
452/**
453 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
454 * @hw: pointer to the HW struct
455 */
456static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
457{
458 struct ice_switch_info *sw = hw->switch_info;
459 struct ice_vsi_list_map_info *v_pos_map;
460 struct ice_vsi_list_map_info *v_tmp_map;
461 struct ice_sw_recipe *recps;
462 u8 i;
463
464 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
465 list_entry) {
466 list_del(&v_pos_map->list_entry);
467 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
468 }
469 recps = hw->switch_info->recp_list;
470 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
471 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
472
473 recps[i].root_rid = i;
474 mutex_destroy(&recps[i].filt_rule_lock);
475 list_for_each_entry_safe(lst_itr, tmp_entry,
476 &recps[i].filt_rules, list_entry) {
477 list_del(&lst_itr->list_entry);
478 devm_kfree(ice_hw_to_dev(hw), lst_itr);
479 }
480 }
481 ice_rm_all_sw_replay_rule_info(hw);
482 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
483 devm_kfree(ice_hw_to_dev(hw), sw);
484}
485
486#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
487 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
488#define ICE_FW_LOG_DESC_SIZE_MAX \
489 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
490
491/**
492 * ice_get_fw_log_cfg - get FW logging configuration
493 * @hw: pointer to the HW struct
494 */
495static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
496{
497 struct ice_aqc_fw_logging_data *config;
498 struct ice_aq_desc desc;
499 enum ice_status status;
500 u16 size;
501
502 size = ICE_FW_LOG_DESC_SIZE_MAX;
503 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
504 if (!config)
505 return ICE_ERR_NO_MEMORY;
506
507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
508
509 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
510 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
511
512 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
513 if (!status) {
514 u16 i;
515
516 /* Save FW logging information into the HW structure */
517 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
518 u16 v, m, flgs;
519
520 v = le16_to_cpu(config->entry[i]);
521 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
522 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
523
524 if (m < ICE_AQC_FW_LOG_ID_MAX)
525 hw->fw_log.evnts[m].cur = flgs;
526 }
527 }
528
529 devm_kfree(ice_hw_to_dev(hw), config);
530
531 return status;
532}
533
534/**
535 * ice_cfg_fw_log - configure FW logging
536 * @hw: pointer to the HW struct
537 * @enable: enable certain FW logging events if true, disable all if false
538 *
539 * This function enables/disables the FW logging via Rx CQ events and a UART
540 * port based on predetermined configurations. FW logging via the Rx CQ can be
541 * enabled/disabled for individual PF's. However, FW logging via the UART can
542 * only be enabled/disabled for all PFs on the same device.
543 *
544 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
545 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
546 * before initializing the device.
547 *
548 * When re/configuring FW logging, callers need to update the "cfg" elements of
549 * the hw->fw_log.evnts array with the desired logging event configurations for
550 * modules of interest. When disabling FW logging completely, the callers can
551 * just pass false in the "enable" parameter. On completion, the function will
552 * update the "cur" element of the hw->fw_log.evnts array with the resulting
553 * logging event configurations of the modules that are being re/configured. FW
554 * logging modules that are not part of a reconfiguration operation retain their
555 * previous states.
556 *
557 * Before resetting the device, it is recommended that the driver disables FW
558 * logging before shutting down the control queue. When disabling FW logging
559 * ("enable" = false), the latest configurations of FW logging events stored in
560 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
561 * a device reset.
562 *
563 * When enabling FW logging to emit log messages via the Rx CQ during the
564 * device's initialization phase, a mechanism alternative to interrupt handlers
565 * needs to be used to extract FW log messages from the Rx CQ periodically and
566 * to prevent the Rx CQ from being full and stalling other types of control
567 * messages from FW to SW. Interrupts are typically disabled during the device's
568 * initialization phase.
569 */
570static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
571{
572 struct ice_aqc_fw_logging_data *data = NULL;
573 struct ice_aqc_fw_logging *cmd;
574 enum ice_status status = 0;
575 u16 i, chgs = 0, len = 0;
576 struct ice_aq_desc desc;
577 u8 actv_evnts = 0;
578 void *buf = NULL;
579
580 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
581 return 0;
582
583 /* Disable FW logging only when the control queue is still responsive */
584 if (!enable &&
585 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
586 return 0;
587
588 /* Get current FW log settings */
589 status = ice_get_fw_log_cfg(hw);
590 if (status)
591 return status;
592
593 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
594 cmd = &desc.params.fw_logging;
595
596 /* Indicate which controls are valid */
597 if (hw->fw_log.cq_en)
598 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
599
600 if (hw->fw_log.uart_en)
601 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
602
603 if (enable) {
604 /* Fill in an array of entries with FW logging modules and
605 * logging events being reconfigured.
606 */
607 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
608 u16 val;
609
610 /* Keep track of enabled event types */
611 actv_evnts |= hw->fw_log.evnts[i].cfg;
612
613 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
614 continue;
615
616 if (!data) {
617 data = devm_kzalloc(ice_hw_to_dev(hw),
618 ICE_FW_LOG_DESC_SIZE_MAX,
619 GFP_KERNEL);
620 if (!data)
621 return ICE_ERR_NO_MEMORY;
622 }
623
624 val = i << ICE_AQC_FW_LOG_ID_S;
625 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
626 data->entry[chgs++] = cpu_to_le16(val);
627 }
628
629 /* Only enable FW logging if at least one module is specified.
630 * If FW logging is currently enabled but all modules are not
631 * enabled to emit log messages, disable FW logging altogether.
632 */
633 if (actv_evnts) {
634 /* Leave if there is effectively no change */
635 if (!chgs)
636 goto out;
637
638 if (hw->fw_log.cq_en)
639 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
640
641 if (hw->fw_log.uart_en)
642 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
643
644 buf = data;
645 len = ICE_FW_LOG_DESC_SIZE(chgs);
646 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
647 }
648 }
649
650 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
651 if (!status) {
652 /* Update the current configuration to reflect events enabled.
653 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
654 * logging mode is enabled for the device. They do not reflect
655 * actual modules being enabled to emit log messages. So, their
656 * values remain unchanged even when all modules are disabled.
657 */
658 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
659
660 hw->fw_log.actv_evnts = actv_evnts;
661 for (i = 0; i < cnt; i++) {
662 u16 v, m;
663
664 if (!enable) {
665 /* When disabling all FW logging events as part
666 * of device's de-initialization, the original
667 * configurations are retained, and can be used
668 * to reconfigure FW logging later if the device
669 * is re-initialized.
670 */
671 hw->fw_log.evnts[i].cur = 0;
672 continue;
673 }
674
675 v = le16_to_cpu(data->entry[i]);
676 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
677 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
678 }
679 }
680
681out:
682 if (data)
683 devm_kfree(ice_hw_to_dev(hw), data);
684
685 return status;
686}
687
688/**
689 * ice_output_fw_log
690 * @hw: pointer to the HW struct
691 * @desc: pointer to the AQ message descriptor
692 * @buf: pointer to the buffer accompanying the AQ message
693 *
694 * Formats a FW Log message and outputs it via the standard driver logs.
695 */
696void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
697{
698 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
699 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
700 le16_to_cpu(desc->datalen));
701 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
702}
703
704/**
705 * ice_get_itr_intrl_gran - determine int/intrl granularity
706 * @hw: pointer to the HW struct
707 *
708 * Determines the ITR/intrl granularities based on the maximum aggregate
709 * bandwidth according to the device's configuration during power-on.
710 */
711static void ice_get_itr_intrl_gran(struct ice_hw *hw)
712{
713 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
714 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
715 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
716
717 switch (max_agg_bw) {
718 case ICE_MAX_AGG_BW_200G:
719 case ICE_MAX_AGG_BW_100G:
720 case ICE_MAX_AGG_BW_50G:
721 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
722 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
723 break;
724 case ICE_MAX_AGG_BW_25G:
725 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
726 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
727 break;
728 }
729}
730
731/**
732 * ice_get_nvm_version - get cached NVM version data
733 * @hw: pointer to the hardware structure
734 * @oem_ver: 8 bit NVM version
735 * @oem_build: 16 bit NVM build number
736 * @oem_patch: 8 NVM patch number
737 * @ver_hi: high 16 bits of the NVM version
738 * @ver_lo: low 16 bits of the NVM version
739 */
740void
741ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
742 u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
743{
744 struct ice_nvm_info *nvm = &hw->nvm;
745
746 *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
747 *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
748 *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
749 ICE_OEM_VER_BUILD_SHIFT);
750 *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
751 *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
752}
753
754/**
755 * ice_init_hw - main hardware initialization routine
756 * @hw: pointer to the hardware structure
757 */
758enum ice_status ice_init_hw(struct ice_hw *hw)
759{
760 struct ice_aqc_get_phy_caps_data *pcaps;
761 enum ice_status status;
762 u16 mac_buf_len;
763 void *mac_buf;
764
765 /* Set MAC type based on DeviceID */
766 status = ice_set_mac_type(hw);
767 if (status)
768 return status;
769
770 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
771 PF_FUNC_RID_FUNC_NUM_M) >>
772 PF_FUNC_RID_FUNC_NUM_S;
773
774 status = ice_reset(hw, ICE_RESET_PFR);
775 if (status)
776 return status;
777
778 ice_get_itr_intrl_gran(hw);
779
780 status = ice_create_all_ctrlq(hw);
781 if (status)
782 goto err_unroll_cqinit;
783
784 /* Enable FW logging. Not fatal if this fails. */
785 status = ice_cfg_fw_log(hw, true);
786 if (status)
787 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
788
789 status = ice_clear_pf_cfg(hw);
790 if (status)
791 goto err_unroll_cqinit;
792
793 ice_clear_pxe_mode(hw);
794
795 status = ice_init_nvm(hw);
796 if (status)
797 goto err_unroll_cqinit;
798
799 status = ice_get_caps(hw);
800 if (status)
801 goto err_unroll_cqinit;
802
803 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
804 sizeof(*hw->port_info), GFP_KERNEL);
805 if (!hw->port_info) {
806 status = ICE_ERR_NO_MEMORY;
807 goto err_unroll_cqinit;
808 }
809
810 /* set the back pointer to HW */
811 hw->port_info->hw = hw;
812
813 /* Initialize port_info struct with switch configuration data */
814 status = ice_get_initial_sw_cfg(hw);
815 if (status)
816 goto err_unroll_alloc;
817
818 hw->evb_veb = true;
819
820 /* Query the allocated resources for Tx scheduler */
821 status = ice_sched_query_res_alloc(hw);
822 if (status) {
823 ice_debug(hw, ICE_DBG_SCHED,
824 "Failed to get scheduler allocated resources\n");
825 goto err_unroll_alloc;
826 }
827
828 /* Initialize port_info struct with scheduler data */
829 status = ice_sched_init_port(hw->port_info);
830 if (status)
831 goto err_unroll_sched;
832
833 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
834 if (!pcaps) {
835 status = ICE_ERR_NO_MEMORY;
836 goto err_unroll_sched;
837 }
838
839 /* Initialize port_info struct with PHY capabilities */
840 status = ice_aq_get_phy_caps(hw->port_info, false,
841 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
842 devm_kfree(ice_hw_to_dev(hw), pcaps);
843 if (status)
844 goto err_unroll_sched;
845
846 /* Initialize port_info struct with link information */
847 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
848 if (status)
849 goto err_unroll_sched;
850
851 /* need a valid SW entry point to build a Tx tree */
852 if (!hw->sw_entry_point_layer) {
853 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
854 status = ICE_ERR_CFG;
855 goto err_unroll_sched;
856 }
857 INIT_LIST_HEAD(&hw->agg_list);
858
859 status = ice_init_fltr_mgmt_struct(hw);
860 if (status)
861 goto err_unroll_sched;
862
863 ice_dev_onetime_setup(hw);
864
865 /* Get MAC information */
866 /* A single port can report up to two (LAN and WoL) addresses */
867 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
868 sizeof(struct ice_aqc_manage_mac_read_resp),
869 GFP_KERNEL);
870 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
871
872 if (!mac_buf) {
873 status = ICE_ERR_NO_MEMORY;
874 goto err_unroll_fltr_mgmt_struct;
875 }
876
877 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
878 devm_kfree(ice_hw_to_dev(hw), mac_buf);
879
880 if (status)
881 goto err_unroll_fltr_mgmt_struct;
882
883 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
884 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
885 status = ice_init_hw_tbls(hw);
886 if (status)
887 goto err_unroll_fltr_mgmt_struct;
888 return 0;
889
890err_unroll_fltr_mgmt_struct:
891 ice_cleanup_fltr_mgmt_struct(hw);
892err_unroll_sched:
893 ice_sched_cleanup_all(hw);
894err_unroll_alloc:
895 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
896err_unroll_cqinit:
897 ice_destroy_all_ctrlq(hw);
898 return status;
899}
900
901/**
902 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
903 * @hw: pointer to the hardware structure
904 *
905 * This should be called only during nominal operation, not as a result of
906 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
907 * applicable initializations if it fails for any reason.
908 */
909void ice_deinit_hw(struct ice_hw *hw)
910{
911 ice_cleanup_fltr_mgmt_struct(hw);
912
913 ice_sched_cleanup_all(hw);
914 ice_sched_clear_agg(hw);
915 ice_free_seg(hw);
916 ice_free_hw_tbls(hw);
917
918 if (hw->port_info) {
919 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
920 hw->port_info = NULL;
921 }
922
923 /* Attempt to disable FW logging before shutting down control queues */
924 ice_cfg_fw_log(hw, false);
925 ice_destroy_all_ctrlq(hw);
926
927 /* Clear VSI contexts if not already cleared */
928 ice_clear_all_vsi_ctx(hw);
929}
930
931/**
932 * ice_check_reset - Check to see if a global reset is complete
933 * @hw: pointer to the hardware structure
934 */
935enum ice_status ice_check_reset(struct ice_hw *hw)
936{
937 u32 cnt, reg = 0, grst_delay;
938
939 /* Poll for Device Active state in case a recent CORER, GLOBR,
940 * or EMPR has occurred. The grst delay value is in 100ms units.
941 * Add 1sec for outstanding AQ commands that can take a long time.
942 */
943 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
944 GLGEN_RSTCTL_GRSTDEL_S) + 10;
945
946 for (cnt = 0; cnt < grst_delay; cnt++) {
947 mdelay(100);
948 reg = rd32(hw, GLGEN_RSTAT);
949 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
950 break;
951 }
952
953 if (cnt == grst_delay) {
954 ice_debug(hw, ICE_DBG_INIT,
955 "Global reset polling failed to complete.\n");
956 return ICE_ERR_RESET_FAILED;
957 }
958
959#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
960 GLNVM_ULD_GLOBR_DONE_M)
961
962 /* Device is Active; check Global Reset processes are done */
963 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
964 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
965 if (reg == ICE_RESET_DONE_MASK) {
966 ice_debug(hw, ICE_DBG_INIT,
967 "Global reset processes done. %d\n", cnt);
968 break;
969 }
970 mdelay(10);
971 }
972
973 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
974 ice_debug(hw, ICE_DBG_INIT,
975 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
976 reg);
977 return ICE_ERR_RESET_FAILED;
978 }
979
980 return 0;
981}
982
983/**
984 * ice_pf_reset - Reset the PF
985 * @hw: pointer to the hardware structure
986 *
987 * If a global reset has been triggered, this function checks
988 * for its completion and then issues the PF reset
989 */
990static enum ice_status ice_pf_reset(struct ice_hw *hw)
991{
992 u32 cnt, reg;
993
994 /* If at function entry a global reset was already in progress, i.e.
995 * state is not 'device active' or any of the reset done bits are not
996 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
997 * global reset is done.
998 */
999 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1000 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1001 /* poll on global reset currently in progress until done */
1002 if (ice_check_reset(hw))
1003 return ICE_ERR_RESET_FAILED;
1004
1005 return 0;
1006 }
1007
1008 /* Reset the PF */
1009 reg = rd32(hw, PFGEN_CTRL);
1010
1011 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1012
1013 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1014 reg = rd32(hw, PFGEN_CTRL);
1015 if (!(reg & PFGEN_CTRL_PFSWR_M))
1016 break;
1017
1018 mdelay(1);
1019 }
1020
1021 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1022 ice_debug(hw, ICE_DBG_INIT,
1023 "PF reset polling failed to complete.\n");
1024 return ICE_ERR_RESET_FAILED;
1025 }
1026
1027 return 0;
1028}
1029
1030/**
1031 * ice_reset - Perform different types of reset
1032 * @hw: pointer to the hardware structure
1033 * @req: reset request
1034 *
1035 * This function triggers a reset as specified by the req parameter.
1036 *
1037 * Note:
1038 * If anything other than a PF reset is triggered, PXE mode is restored.
1039 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1040 * interface has been restored in the rebuild flow.
1041 */
1042enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1043{
1044 u32 val = 0;
1045
1046 switch (req) {
1047 case ICE_RESET_PFR:
1048 return ice_pf_reset(hw);
1049 case ICE_RESET_CORER:
1050 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1051 val = GLGEN_RTRIG_CORER_M;
1052 break;
1053 case ICE_RESET_GLOBR:
1054 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1055 val = GLGEN_RTRIG_GLOBR_M;
1056 break;
1057 default:
1058 return ICE_ERR_PARAM;
1059 }
1060
1061 val |= rd32(hw, GLGEN_RTRIG);
1062 wr32(hw, GLGEN_RTRIG, val);
1063 ice_flush(hw);
1064
1065 /* wait for the FW to be ready */
1066 return ice_check_reset(hw);
1067}
1068
1069/**
1070 * ice_copy_rxq_ctx_to_hw
1071 * @hw: pointer to the hardware structure
1072 * @ice_rxq_ctx: pointer to the rxq context
1073 * @rxq_index: the index of the Rx queue
1074 *
1075 * Copies rxq context from dense structure to HW register space
1076 */
1077static enum ice_status
1078ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1079{
1080 u8 i;
1081
1082 if (!ice_rxq_ctx)
1083 return ICE_ERR_BAD_PTR;
1084
1085 if (rxq_index > QRX_CTRL_MAX_INDEX)
1086 return ICE_ERR_PARAM;
1087
1088 /* Copy each dword separately to HW */
1089 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1090 wr32(hw, QRX_CONTEXT(i, rxq_index),
1091 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1092
1093 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1094 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1095 }
1096
1097 return 0;
1098}
1099
1100/* LAN Rx Queue Context */
1101static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1102 /* Field Width LSB */
1103 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1104 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1105 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1106 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1107 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1108 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1109 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1110 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1111 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1112 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1113 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1114 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1115 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1116 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1117 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1118 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1119 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1120 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1121 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1122 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1123 { 0 }
1124};
1125
1126/**
1127 * ice_write_rxq_ctx
1128 * @hw: pointer to the hardware structure
1129 * @rlan_ctx: pointer to the rxq context
1130 * @rxq_index: the index of the Rx queue
1131 *
1132 * Converts rxq context from sparse to dense structure and then writes
1133 * it to HW register space and enables the hardware to prefetch descriptors
1134 * instead of only fetching them on demand
1135 */
1136enum ice_status
1137ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1138 u32 rxq_index)
1139{
1140 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1141
1142 if (!rlan_ctx)
1143 return ICE_ERR_BAD_PTR;
1144
1145 rlan_ctx->prefena = 1;
1146
1147 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1148 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1149}
1150
1151/* LAN Tx Queue Context */
1152const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1153 /* Field Width LSB */
1154 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1155 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1156 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1157 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1158 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1159 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1160 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1161 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1162 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1163 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1164 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1165 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1166 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1167 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1168 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1169 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1170 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1171 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1172 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1173 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1174 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1175 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1176 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1177 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1178 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1179 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1180 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1181 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1182 { 0 }
1183};
1184
1185/**
1186 * ice_debug_cq
1187 * @hw: pointer to the hardware structure
1188 * @mask: debug mask
1189 * @desc: pointer to control queue descriptor
1190 * @buf: pointer to command buffer
1191 * @buf_len: max length of buf
1192 *
1193 * Dumps debug log about control command with descriptor contents.
1194 */
1195void
1196ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
1197 u16 buf_len)
1198{
1199 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1200 u16 len;
1201
1202#ifndef CONFIG_DYNAMIC_DEBUG
1203 if (!(mask & hw->debug_mask))
1204 return;
1205#endif
1206
1207 if (!desc)
1208 return;
1209
1210 len = le16_to_cpu(cq_desc->datalen);
1211
1212 ice_debug(hw, mask,
1213 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1214 le16_to_cpu(cq_desc->opcode),
1215 le16_to_cpu(cq_desc->flags),
1216 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1217 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1218 le32_to_cpu(cq_desc->cookie_high),
1219 le32_to_cpu(cq_desc->cookie_low));
1220 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1221 le32_to_cpu(cq_desc->params.generic.param0),
1222 le32_to_cpu(cq_desc->params.generic.param1));
1223 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1224 le32_to_cpu(cq_desc->params.generic.addr_high),
1225 le32_to_cpu(cq_desc->params.generic.addr_low));
1226 if (buf && cq_desc->datalen != 0) {
1227 ice_debug(hw, mask, "Buffer:\n");
1228 if (buf_len < len)
1229 len = buf_len;
1230
1231 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1232 }
1233}
1234
1235/* FW Admin Queue command wrappers */
1236
1237/* Software lock/mutex that is meant to be held while the Global Config Lock
1238 * in firmware is acquired by the software to prevent most (but not all) types
1239 * of AQ commands from being sent to FW
1240 */
1241DEFINE_MUTEX(ice_global_cfg_lock_sw);
1242
1243/**
1244 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1245 * @hw: pointer to the HW struct
1246 * @desc: descriptor describing the command
1247 * @buf: buffer to use for indirect commands (NULL for direct commands)
1248 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1249 * @cd: pointer to command details structure
1250 *
1251 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1252 */
1253enum ice_status
1254ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1255 u16 buf_size, struct ice_sq_cd *cd)
1256{
1257 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1258 bool lock_acquired = false;
1259 enum ice_status status;
1260
1261 /* When a package download is in process (i.e. when the firmware's
1262 * Global Configuration Lock resource is held), only the Download
1263 * Package, Get Version, Get Package Info List and Release Resource
1264 * (with resource ID set to Global Config Lock) AdminQ commands are
1265 * allowed; all others must block until the package download completes
1266 * and the Global Config Lock is released. See also
1267 * ice_acquire_global_cfg_lock().
1268 */
1269 switch (le16_to_cpu(desc->opcode)) {
1270 case ice_aqc_opc_download_pkg:
1271 case ice_aqc_opc_get_pkg_info_list:
1272 case ice_aqc_opc_get_ver:
1273 break;
1274 case ice_aqc_opc_release_res:
1275 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1276 break;
1277 /* fall-through */
1278 default:
1279 mutex_lock(&ice_global_cfg_lock_sw);
1280 lock_acquired = true;
1281 break;
1282 }
1283
1284 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1285 if (lock_acquired)
1286 mutex_unlock(&ice_global_cfg_lock_sw);
1287
1288 return status;
1289}
1290
1291/**
1292 * ice_aq_get_fw_ver
1293 * @hw: pointer to the HW struct
1294 * @cd: pointer to command details structure or NULL
1295 *
1296 * Get the firmware version (0x0001) from the admin queue commands
1297 */
1298enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1299{
1300 struct ice_aqc_get_ver *resp;
1301 struct ice_aq_desc desc;
1302 enum ice_status status;
1303
1304 resp = &desc.params.get_ver;
1305
1306 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1307
1308 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1309
1310 if (!status) {
1311 hw->fw_branch = resp->fw_branch;
1312 hw->fw_maj_ver = resp->fw_major;
1313 hw->fw_min_ver = resp->fw_minor;
1314 hw->fw_patch = resp->fw_patch;
1315 hw->fw_build = le32_to_cpu(resp->fw_build);
1316 hw->api_branch = resp->api_branch;
1317 hw->api_maj_ver = resp->api_major;
1318 hw->api_min_ver = resp->api_minor;
1319 hw->api_patch = resp->api_patch;
1320 }
1321
1322 return status;
1323}
1324
1325/**
1326 * ice_aq_send_driver_ver
1327 * @hw: pointer to the HW struct
1328 * @dv: driver's major, minor version
1329 * @cd: pointer to command details structure or NULL
1330 *
1331 * Send the driver version (0x0002) to the firmware
1332 */
1333enum ice_status
1334ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1335 struct ice_sq_cd *cd)
1336{
1337 struct ice_aqc_driver_ver *cmd;
1338 struct ice_aq_desc desc;
1339 u16 len;
1340
1341 cmd = &desc.params.driver_ver;
1342
1343 if (!dv)
1344 return ICE_ERR_PARAM;
1345
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1347
1348 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1349 cmd->major_ver = dv->major_ver;
1350 cmd->minor_ver = dv->minor_ver;
1351 cmd->build_ver = dv->build_ver;
1352 cmd->subbuild_ver = dv->subbuild_ver;
1353
1354 len = 0;
1355 while (len < sizeof(dv->driver_string) &&
1356 isascii(dv->driver_string[len]) && dv->driver_string[len])
1357 len++;
1358
1359 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1360}
1361
1362/**
1363 * ice_aq_q_shutdown
1364 * @hw: pointer to the HW struct
1365 * @unloading: is the driver unloading itself
1366 *
1367 * Tell the Firmware that we're shutting down the AdminQ and whether
1368 * or not the driver is unloading as well (0x0003).
1369 */
1370enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1371{
1372 struct ice_aqc_q_shutdown *cmd;
1373 struct ice_aq_desc desc;
1374
1375 cmd = &desc.params.q_shutdown;
1376
1377 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1378
1379 if (unloading)
1380 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1381
1382 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1383}
1384
1385/**
1386 * ice_aq_req_res
1387 * @hw: pointer to the HW struct
1388 * @res: resource ID
1389 * @access: access type
1390 * @sdp_number: resource number
1391 * @timeout: the maximum time in ms that the driver may hold the resource
1392 * @cd: pointer to command details structure or NULL
1393 *
1394 * Requests common resource using the admin queue commands (0x0008).
1395 * When attempting to acquire the Global Config Lock, the driver can
1396 * learn of three states:
1397 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1398 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1399 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1400 * successfully downloaded the package; the driver does
1401 * not have to download the package and can continue
1402 * loading
1403 *
1404 * Note that if the caller is in an acquire lock, perform action, release lock
1405 * phase of operation, it is possible that the FW may detect a timeout and issue
1406 * a CORER. In this case, the driver will receive a CORER interrupt and will
1407 * have to determine its cause. The calling thread that is handling this flow
1408 * will likely get an error propagated back to it indicating the Download
1409 * Package, Update Package or the Release Resource AQ commands timed out.
1410 */
1411static enum ice_status
1412ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1413 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1414 struct ice_sq_cd *cd)
1415{
1416 struct ice_aqc_req_res *cmd_resp;
1417 struct ice_aq_desc desc;
1418 enum ice_status status;
1419
1420 cmd_resp = &desc.params.res_owner;
1421
1422 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1423
1424 cmd_resp->res_id = cpu_to_le16(res);
1425 cmd_resp->access_type = cpu_to_le16(access);
1426 cmd_resp->res_number = cpu_to_le32(sdp_number);
1427 cmd_resp->timeout = cpu_to_le32(*timeout);
1428 *timeout = 0;
1429
1430 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1431
1432 /* The completion specifies the maximum time in ms that the driver
1433 * may hold the resource in the Timeout field.
1434 */
1435
1436 /* Global config lock response utilizes an additional status field.
1437 *
1438 * If the Global config lock resource is held by some other driver, the
1439 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1440 * and the timeout field indicates the maximum time the current owner
1441 * of the resource has to free it.
1442 */
1443 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1444 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1445 *timeout = le32_to_cpu(cmd_resp->timeout);
1446 return 0;
1447 } else if (le16_to_cpu(cmd_resp->status) ==
1448 ICE_AQ_RES_GLBL_IN_PROG) {
1449 *timeout = le32_to_cpu(cmd_resp->timeout);
1450 return ICE_ERR_AQ_ERROR;
1451 } else if (le16_to_cpu(cmd_resp->status) ==
1452 ICE_AQ_RES_GLBL_DONE) {
1453 return ICE_ERR_AQ_NO_WORK;
1454 }
1455
1456 /* invalid FW response, force a timeout immediately */
1457 *timeout = 0;
1458 return ICE_ERR_AQ_ERROR;
1459 }
1460
1461 /* If the resource is held by some other driver, the command completes
1462 * with a busy return value and the timeout field indicates the maximum
1463 * time the current owner of the resource has to free it.
1464 */
1465 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1466 *timeout = le32_to_cpu(cmd_resp->timeout);
1467
1468 return status;
1469}
1470
1471/**
1472 * ice_aq_release_res
1473 * @hw: pointer to the HW struct
1474 * @res: resource ID
1475 * @sdp_number: resource number
1476 * @cd: pointer to command details structure or NULL
1477 *
1478 * release common resource using the admin queue commands (0x0009)
1479 */
1480static enum ice_status
1481ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1482 struct ice_sq_cd *cd)
1483{
1484 struct ice_aqc_req_res *cmd;
1485 struct ice_aq_desc desc;
1486
1487 cmd = &desc.params.res_owner;
1488
1489 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1490
1491 cmd->res_id = cpu_to_le16(res);
1492 cmd->res_number = cpu_to_le32(sdp_number);
1493
1494 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1495}
1496
1497/**
1498 * ice_acquire_res
1499 * @hw: pointer to the HW structure
1500 * @res: resource ID
1501 * @access: access type (read or write)
1502 * @timeout: timeout in milliseconds
1503 *
1504 * This function will attempt to acquire the ownership of a resource.
1505 */
1506enum ice_status
1507ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1508 enum ice_aq_res_access_type access, u32 timeout)
1509{
1510#define ICE_RES_POLLING_DELAY_MS 10
1511 u32 delay = ICE_RES_POLLING_DELAY_MS;
1512 u32 time_left = timeout;
1513 enum ice_status status;
1514
1515 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1516
1517 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1518 * previously acquired the resource and performed any necessary updates;
1519 * in this case the caller does not obtain the resource and has no
1520 * further work to do.
1521 */
1522 if (status == ICE_ERR_AQ_NO_WORK)
1523 goto ice_acquire_res_exit;
1524
1525 if (status)
1526 ice_debug(hw, ICE_DBG_RES,
1527 "resource %d acquire type %d failed.\n", res, access);
1528
1529 /* If necessary, poll until the current lock owner timeouts */
1530 timeout = time_left;
1531 while (status && timeout && time_left) {
1532 mdelay(delay);
1533 timeout = (timeout > delay) ? timeout - delay : 0;
1534 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1535
1536 if (status == ICE_ERR_AQ_NO_WORK)
1537 /* lock free, but no work to do */
1538 break;
1539
1540 if (!status)
1541 /* lock acquired */
1542 break;
1543 }
1544 if (status && status != ICE_ERR_AQ_NO_WORK)
1545 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1546
1547ice_acquire_res_exit:
1548 if (status == ICE_ERR_AQ_NO_WORK) {
1549 if (access == ICE_RES_WRITE)
1550 ice_debug(hw, ICE_DBG_RES,
1551 "resource indicates no work to do.\n");
1552 else
1553 ice_debug(hw, ICE_DBG_RES,
1554 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1555 }
1556 return status;
1557}
1558
1559/**
1560 * ice_release_res
1561 * @hw: pointer to the HW structure
1562 * @res: resource ID
1563 *
1564 * This function will release a resource using the proper Admin Command.
1565 */
1566void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1567{
1568 enum ice_status status;
1569 u32 total_delay = 0;
1570
1571 status = ice_aq_release_res(hw, res, 0, NULL);
1572
1573 /* there are some rare cases when trying to release the resource
1574 * results in an admin queue timeout, so handle them correctly
1575 */
1576 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1577 (total_delay < hw->adminq.sq_cmd_timeout)) {
1578 mdelay(1);
1579 status = ice_aq_release_res(hw, res, 0, NULL);
1580 total_delay++;
1581 }
1582}
1583
1584/**
1585 * ice_get_num_per_func - determine number of resources per PF
1586 * @hw: pointer to the HW structure
1587 * @max: value to be evenly split between each PF
1588 *
1589 * Determine the number of valid functions by going through the bitmap returned
1590 * from parsing capabilities and use this to calculate the number of resources
1591 * per PF based on the max value passed in.
1592 */
1593static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1594{
1595 u8 funcs;
1596
1597#define ICE_CAPS_VALID_FUNCS_M 0xFF
1598 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1599 ICE_CAPS_VALID_FUNCS_M);
1600
1601 if (!funcs)
1602 return 0;
1603
1604 return max / funcs;
1605}
1606
1607/**
1608 * ice_parse_caps - parse function/device capabilities
1609 * @hw: pointer to the HW struct
1610 * @buf: pointer to a buffer containing function/device capability records
1611 * @cap_count: number of capability records in the list
1612 * @opc: type of capabilities list to parse
1613 *
1614 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1615 */
1616static void
1617ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1618 enum ice_adminq_opc opc)
1619{
1620 struct ice_aqc_list_caps_elem *cap_resp;
1621 struct ice_hw_func_caps *func_p = NULL;
1622 struct ice_hw_dev_caps *dev_p = NULL;
1623 struct ice_hw_common_caps *caps;
1624 char const *prefix;
1625 u32 i;
1626
1627 if (!buf)
1628 return;
1629
1630 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1631
1632 if (opc == ice_aqc_opc_list_dev_caps) {
1633 dev_p = &hw->dev_caps;
1634 caps = &dev_p->common_cap;
1635 prefix = "dev cap";
1636 } else if (opc == ice_aqc_opc_list_func_caps) {
1637 func_p = &hw->func_caps;
1638 caps = &func_p->common_cap;
1639 prefix = "func cap";
1640 } else {
1641 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1642 return;
1643 }
1644
1645 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1646 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1647 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1648 u32 number = le32_to_cpu(cap_resp->number);
1649 u16 cap = le16_to_cpu(cap_resp->cap);
1650
1651 switch (cap) {
1652 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1653 caps->valid_functions = number;
1654 ice_debug(hw, ICE_DBG_INIT,
1655 "%s: valid_functions (bitmap) = %d\n", prefix,
1656 caps->valid_functions);
1657 break;
1658 case ICE_AQC_CAPS_SRIOV:
1659 caps->sr_iov_1_1 = (number == 1);
1660 ice_debug(hw, ICE_DBG_INIT,
1661 "%s: sr_iov_1_1 = %d\n", prefix,
1662 caps->sr_iov_1_1);
1663 break;
1664 case ICE_AQC_CAPS_VF:
1665 if (dev_p) {
1666 dev_p->num_vfs_exposed = number;
1667 ice_debug(hw, ICE_DBG_INIT,
1668 "%s: num_vfs_exposed = %d\n", prefix,
1669 dev_p->num_vfs_exposed);
1670 } else if (func_p) {
1671 func_p->num_allocd_vfs = number;
1672 func_p->vf_base_id = logical_id;
1673 ice_debug(hw, ICE_DBG_INIT,
1674 "%s: num_allocd_vfs = %d\n", prefix,
1675 func_p->num_allocd_vfs);
1676 ice_debug(hw, ICE_DBG_INIT,
1677 "%s: vf_base_id = %d\n", prefix,
1678 func_p->vf_base_id);
1679 }
1680 break;
1681 case ICE_AQC_CAPS_VSI:
1682 if (dev_p) {
1683 dev_p->num_vsi_allocd_to_host = number;
1684 ice_debug(hw, ICE_DBG_INIT,
1685 "%s: num_vsi_allocd_to_host = %d\n",
1686 prefix,
1687 dev_p->num_vsi_allocd_to_host);
1688 } else if (func_p) {
1689 func_p->guar_num_vsi =
1690 ice_get_num_per_func(hw, ICE_MAX_VSI);
1691 ice_debug(hw, ICE_DBG_INIT,
1692 "%s: guar_num_vsi (fw) = %d\n",
1693 prefix, number);
1694 ice_debug(hw, ICE_DBG_INIT,
1695 "%s: guar_num_vsi = %d\n",
1696 prefix, func_p->guar_num_vsi);
1697 }
1698 break;
1699 case ICE_AQC_CAPS_DCB:
1700 caps->dcb = (number == 1);
1701 caps->active_tc_bitmap = logical_id;
1702 caps->maxtc = phys_id;
1703 ice_debug(hw, ICE_DBG_INIT,
1704 "%s: dcb = %d\n", prefix, caps->dcb);
1705 ice_debug(hw, ICE_DBG_INIT,
1706 "%s: active_tc_bitmap = %d\n", prefix,
1707 caps->active_tc_bitmap);
1708 ice_debug(hw, ICE_DBG_INIT,
1709 "%s: maxtc = %d\n", prefix, caps->maxtc);
1710 break;
1711 case ICE_AQC_CAPS_RSS:
1712 caps->rss_table_size = number;
1713 caps->rss_table_entry_width = logical_id;
1714 ice_debug(hw, ICE_DBG_INIT,
1715 "%s: rss_table_size = %d\n", prefix,
1716 caps->rss_table_size);
1717 ice_debug(hw, ICE_DBG_INIT,
1718 "%s: rss_table_entry_width = %d\n", prefix,
1719 caps->rss_table_entry_width);
1720 break;
1721 case ICE_AQC_CAPS_RXQS:
1722 caps->num_rxq = number;
1723 caps->rxq_first_id = phys_id;
1724 ice_debug(hw, ICE_DBG_INIT,
1725 "%s: num_rxq = %d\n", prefix,
1726 caps->num_rxq);
1727 ice_debug(hw, ICE_DBG_INIT,
1728 "%s: rxq_first_id = %d\n", prefix,
1729 caps->rxq_first_id);
1730 break;
1731 case ICE_AQC_CAPS_TXQS:
1732 caps->num_txq = number;
1733 caps->txq_first_id = phys_id;
1734 ice_debug(hw, ICE_DBG_INIT,
1735 "%s: num_txq = %d\n", prefix,
1736 caps->num_txq);
1737 ice_debug(hw, ICE_DBG_INIT,
1738 "%s: txq_first_id = %d\n", prefix,
1739 caps->txq_first_id);
1740 break;
1741 case ICE_AQC_CAPS_MSIX:
1742 caps->num_msix_vectors = number;
1743 caps->msix_vector_first_id = phys_id;
1744 ice_debug(hw, ICE_DBG_INIT,
1745 "%s: num_msix_vectors = %d\n", prefix,
1746 caps->num_msix_vectors);
1747 ice_debug(hw, ICE_DBG_INIT,
1748 "%s: msix_vector_first_id = %d\n", prefix,
1749 caps->msix_vector_first_id);
1750 break;
1751 case ICE_AQC_CAPS_MAX_MTU:
1752 caps->max_mtu = number;
1753 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1754 prefix, caps->max_mtu);
1755 break;
1756 default:
1757 ice_debug(hw, ICE_DBG_INIT,
1758 "%s: unknown capability[%d]: 0x%x\n", prefix,
1759 i, cap);
1760 break;
1761 }
1762 }
1763}
1764
1765/**
1766 * ice_aq_discover_caps - query function/device capabilities
1767 * @hw: pointer to the HW struct
1768 * @buf: a virtual buffer to hold the capabilities
1769 * @buf_size: Size of the virtual buffer
1770 * @cap_count: cap count needed if AQ err==ENOMEM
1771 * @opc: capabilities type to discover - pass in the command opcode
1772 * @cd: pointer to command details structure or NULL
1773 *
1774 * Get the function(0x000a)/device(0x000b) capabilities description from
1775 * the firmware.
1776 */
1777static enum ice_status
1778ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1779 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1780{
1781 struct ice_aqc_list_caps *cmd;
1782 struct ice_aq_desc desc;
1783 enum ice_status status;
1784
1785 cmd = &desc.params.get_cap;
1786
1787 if (opc != ice_aqc_opc_list_func_caps &&
1788 opc != ice_aqc_opc_list_dev_caps)
1789 return ICE_ERR_PARAM;
1790
1791 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1792
1793 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1794 if (!status)
1795 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1796 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1797 *cap_count = le32_to_cpu(cmd->count);
1798 return status;
1799}
1800
1801/**
1802 * ice_discover_caps - get info about the HW
1803 * @hw: pointer to the hardware structure
1804 * @opc: capabilities type to discover - pass in the command opcode
1805 */
1806static enum ice_status
1807ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1808{
1809 enum ice_status status;
1810 u32 cap_count;
1811 u16 cbuf_len;
1812 u8 retries;
1813
1814 /* The driver doesn't know how many capabilities the device will return
1815 * so the buffer size required isn't known ahead of time. The driver
1816 * starts with cbuf_len and if this turns out to be insufficient, the
1817 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1818 * The driver then allocates the buffer based on the count and retries
1819 * the operation. So it follows that the retry count is 2.
1820 */
1821#define ICE_GET_CAP_BUF_COUNT 40
1822#define ICE_GET_CAP_RETRY_COUNT 2
1823
1824 cap_count = ICE_GET_CAP_BUF_COUNT;
1825 retries = ICE_GET_CAP_RETRY_COUNT;
1826
1827 do {
1828 void *cbuf;
1829
1830 cbuf_len = (u16)(cap_count *
1831 sizeof(struct ice_aqc_list_caps_elem));
1832 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1833 if (!cbuf)
1834 return ICE_ERR_NO_MEMORY;
1835
1836 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1837 opc, NULL);
1838 devm_kfree(ice_hw_to_dev(hw), cbuf);
1839
1840 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1841 break;
1842
1843 /* If ENOMEM is returned, try again with bigger buffer */
1844 } while (--retries);
1845
1846 return status;
1847}
1848
1849/**
1850 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
1851 * @hw: pointer to the hardware structure
1852 */
1853void ice_set_safe_mode_caps(struct ice_hw *hw)
1854{
1855 struct ice_hw_func_caps *func_caps = &hw->func_caps;
1856 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1857 u32 valid_func, rxq_first_id, txq_first_id;
1858 u32 msix_vector_first_id, max_mtu;
1859 u32 num_func = 0;
1860 u8 i;
1861
1862 /* cache some func_caps values that should be restored after memset */
1863 valid_func = func_caps->common_cap.valid_functions;
1864 txq_first_id = func_caps->common_cap.txq_first_id;
1865 rxq_first_id = func_caps->common_cap.rxq_first_id;
1866 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1867 max_mtu = func_caps->common_cap.max_mtu;
1868
1869 /* unset func capabilities */
1870 memset(func_caps, 0, sizeof(*func_caps));
1871
1872 /* restore cached values */
1873 func_caps->common_cap.valid_functions = valid_func;
1874 func_caps->common_cap.txq_first_id = txq_first_id;
1875 func_caps->common_cap.rxq_first_id = rxq_first_id;
1876 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1877 func_caps->common_cap.max_mtu = max_mtu;
1878
1879 /* one Tx and one Rx queue in safe mode */
1880 func_caps->common_cap.num_rxq = 1;
1881 func_caps->common_cap.num_txq = 1;
1882
1883 /* two MSIX vectors, one for traffic and one for misc causes */
1884 func_caps->common_cap.num_msix_vectors = 2;
1885 func_caps->guar_num_vsi = 1;
1886
1887 /* cache some dev_caps values that should be restored after memset */
1888 valid_func = dev_caps->common_cap.valid_functions;
1889 txq_first_id = dev_caps->common_cap.txq_first_id;
1890 rxq_first_id = dev_caps->common_cap.rxq_first_id;
1891 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1892 max_mtu = dev_caps->common_cap.max_mtu;
1893
1894 /* unset dev capabilities */
1895 memset(dev_caps, 0, sizeof(*dev_caps));
1896
1897 /* restore cached values */
1898 dev_caps->common_cap.valid_functions = valid_func;
1899 dev_caps->common_cap.txq_first_id = txq_first_id;
1900 dev_caps->common_cap.rxq_first_id = rxq_first_id;
1901 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1902 dev_caps->common_cap.max_mtu = max_mtu;
1903
1904 /* valid_func is a bitmap. get number of functions */
1905#define ICE_MAX_FUNCS 8
1906 for (i = 0; i < ICE_MAX_FUNCS; i++)
1907 if (valid_func & BIT(i))
1908 num_func++;
1909
1910 /* one Tx and one Rx queue per function in safe mode */
1911 dev_caps->common_cap.num_rxq = num_func;
1912 dev_caps->common_cap.num_txq = num_func;
1913
1914 /* two MSIX vectors per function */
1915 dev_caps->common_cap.num_msix_vectors = 2 * num_func;
1916}
1917
1918/**
1919 * ice_get_caps - get info about the HW
1920 * @hw: pointer to the hardware structure
1921 */
1922enum ice_status ice_get_caps(struct ice_hw *hw)
1923{
1924 enum ice_status status;
1925
1926 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1927 if (!status)
1928 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1929
1930 return status;
1931}
1932
1933/**
1934 * ice_aq_manage_mac_write - manage MAC address write command
1935 * @hw: pointer to the HW struct
1936 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1937 * @flags: flags to control write behavior
1938 * @cd: pointer to command details structure or NULL
1939 *
1940 * This function is used to write MAC address to the NVM (0x0108).
1941 */
1942enum ice_status
1943ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1944 struct ice_sq_cd *cd)
1945{
1946 struct ice_aqc_manage_mac_write *cmd;
1947 struct ice_aq_desc desc;
1948
1949 cmd = &desc.params.mac_write;
1950 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1951
1952 cmd->flags = flags;
1953
1954 /* Prep values for flags, sah, sal */
1955 cmd->sah = htons(*((const u16 *)mac_addr));
1956 cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1957
1958 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1959}
1960
1961/**
1962 * ice_aq_clear_pxe_mode
1963 * @hw: pointer to the HW struct
1964 *
1965 * Tell the firmware that the driver is taking over from PXE (0x0110).
1966 */
1967static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1968{
1969 struct ice_aq_desc desc;
1970
1971 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1972 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1973
1974 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1975}
1976
1977/**
1978 * ice_clear_pxe_mode - clear pxe operations mode
1979 * @hw: pointer to the HW struct
1980 *
1981 * Make sure all PXE mode settings are cleared, including things
1982 * like descriptor fetch/write-back mode.
1983 */
1984void ice_clear_pxe_mode(struct ice_hw *hw)
1985{
1986 if (ice_check_sq_alive(hw, &hw->adminq))
1987 ice_aq_clear_pxe_mode(hw);
1988}
1989
1990/**
1991 * ice_get_link_speed_based_on_phy_type - returns link speed
1992 * @phy_type_low: lower part of phy_type
1993 * @phy_type_high: higher part of phy_type
1994 *
1995 * This helper function will convert an entry in PHY type structure
1996 * [phy_type_low, phy_type_high] to its corresponding link speed.
1997 * Note: In the structure of [phy_type_low, phy_type_high], there should
1998 * be one bit set, as this function will convert one PHY type to its
1999 * speed.
2000 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2001 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2002 */
2003static u16
2004ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2005{
2006 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2007 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2008
2009 switch (phy_type_low) {
2010 case ICE_PHY_TYPE_LOW_100BASE_TX:
2011 case ICE_PHY_TYPE_LOW_100M_SGMII:
2012 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2013 break;
2014 case ICE_PHY_TYPE_LOW_1000BASE_T:
2015 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2016 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2017 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2018 case ICE_PHY_TYPE_LOW_1G_SGMII:
2019 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2020 break;
2021 case ICE_PHY_TYPE_LOW_2500BASE_T:
2022 case ICE_PHY_TYPE_LOW_2500BASE_X:
2023 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2024 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2025 break;
2026 case ICE_PHY_TYPE_LOW_5GBASE_T:
2027 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2028 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2029 break;
2030 case ICE_PHY_TYPE_LOW_10GBASE_T:
2031 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2032 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2033 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2034 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2035 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2036 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2037 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2038 break;
2039 case ICE_PHY_TYPE_LOW_25GBASE_T:
2040 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2041 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2042 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2043 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2044 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2045 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2046 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2047 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2048 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2049 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2050 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2051 break;
2052 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2053 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2054 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2055 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2056 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2057 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2058 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2059 break;
2060 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2061 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2062 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2063 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2064 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2065 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2066 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2067 case ICE_PHY_TYPE_LOW_50G_AUI2:
2068 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2069 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2070 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2071 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2072 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2073 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2074 case ICE_PHY_TYPE_LOW_50G_AUI1:
2075 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2076 break;
2077 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2078 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2079 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2080 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2081 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2082 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2083 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2084 case ICE_PHY_TYPE_LOW_100G_AUI4:
2085 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2086 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2087 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2088 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2089 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2090 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2091 break;
2092 default:
2093 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2094 break;
2095 }
2096
2097 switch (phy_type_high) {
2098 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2099 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2100 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2101 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2102 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2103 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2104 break;
2105 default:
2106 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2107 break;
2108 }
2109
2110 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2111 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2112 return ICE_AQ_LINK_SPEED_UNKNOWN;
2113 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2114 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2115 return ICE_AQ_LINK_SPEED_UNKNOWN;
2116 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2117 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2118 return speed_phy_type_low;
2119 else
2120 return speed_phy_type_high;
2121}
2122
2123/**
2124 * ice_update_phy_type
2125 * @phy_type_low: pointer to the lower part of phy_type
2126 * @phy_type_high: pointer to the higher part of phy_type
2127 * @link_speeds_bitmap: targeted link speeds bitmap
2128 *
2129 * Note: For the link_speeds_bitmap structure, you can check it at
2130 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2131 * link_speeds_bitmap include multiple speeds.
2132 *
2133 * Each entry in this [phy_type_low, phy_type_high] structure will
2134 * present a certain link speed. This helper function will turn on bits
2135 * in [phy_type_low, phy_type_high] structure based on the value of
2136 * link_speeds_bitmap input parameter.
2137 */
2138void
2139ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2140 u16 link_speeds_bitmap)
2141{
2142 u64 pt_high;
2143 u64 pt_low;
2144 int index;
2145 u16 speed;
2146
2147 /* We first check with low part of phy_type */
2148 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2149 pt_low = BIT_ULL(index);
2150 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2151
2152 if (link_speeds_bitmap & speed)
2153 *phy_type_low |= BIT_ULL(index);
2154 }
2155
2156 /* We then check with high part of phy_type */
2157 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2158 pt_high = BIT_ULL(index);
2159 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2160
2161 if (link_speeds_bitmap & speed)
2162 *phy_type_high |= BIT_ULL(index);
2163 }
2164}
2165
2166/**
2167 * ice_aq_set_phy_cfg
2168 * @hw: pointer to the HW struct
2169 * @lport: logical port number
2170 * @cfg: structure with PHY configuration data to be set
2171 * @cd: pointer to command details structure or NULL
2172 *
2173 * Set the various PHY configuration parameters supported on the Port.
2174 * One or more of the Set PHY config parameters may be ignored in an MFP
2175 * mode as the PF may not have the privilege to set some of the PHY Config
2176 * parameters. This status will be indicated by the command response (0x0601).
2177 */
2178enum ice_status
2179ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2180 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2181{
2182 struct ice_aq_desc desc;
2183
2184 if (!cfg)
2185 return ICE_ERR_PARAM;
2186
2187 /* Ensure that only valid bits of cfg->caps can be turned on. */
2188 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2189 ice_debug(hw, ICE_DBG_PHY,
2190 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2191 cfg->caps);
2192
2193 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2194 }
2195
2196 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2197 desc.params.set_phy.lport_num = lport;
2198 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2199
2200 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2201 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2202 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2203 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2204 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2205 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2206 cfg->low_power_ctrl);
2207 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2208 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2209 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2210
2211 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2212}
2213
2214/**
2215 * ice_update_link_info - update status of the HW network link
2216 * @pi: port info structure of the interested logical port
2217 */
2218enum ice_status ice_update_link_info(struct ice_port_info *pi)
2219{
2220 struct ice_link_status *li;
2221 enum ice_status status;
2222
2223 if (!pi)
2224 return ICE_ERR_PARAM;
2225
2226 li = &pi->phy.link_info;
2227
2228 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2229 if (status)
2230 return status;
2231
2232 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2233 struct ice_aqc_get_phy_caps_data *pcaps;
2234 struct ice_hw *hw;
2235
2236 hw = pi->hw;
2237 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2238 GFP_KERNEL);
2239 if (!pcaps)
2240 return ICE_ERR_NO_MEMORY;
2241
2242 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2243 pcaps, NULL);
2244 if (!status)
2245 memcpy(li->module_type, &pcaps->module_type,
2246 sizeof(li->module_type));
2247
2248 devm_kfree(ice_hw_to_dev(hw), pcaps);
2249 }
2250
2251 return status;
2252}
2253
2254/**
2255 * ice_set_fc
2256 * @pi: port information structure
2257 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2258 * @ena_auto_link_update: enable automatic link update
2259 *
2260 * Set the requested flow control mode.
2261 */
2262enum ice_status
2263ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2264{
2265 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2266 struct ice_aqc_get_phy_caps_data *pcaps;
2267 enum ice_status status;
2268 u8 pause_mask = 0x0;
2269 struct ice_hw *hw;
2270
2271 if (!pi)
2272 return ICE_ERR_PARAM;
2273 hw = pi->hw;
2274 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2275
2276 switch (pi->fc.req_mode) {
2277 case ICE_FC_FULL:
2278 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2279 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2280 break;
2281 case ICE_FC_RX_PAUSE:
2282 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2283 break;
2284 case ICE_FC_TX_PAUSE:
2285 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2286 break;
2287 default:
2288 break;
2289 }
2290
2291 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2292 if (!pcaps)
2293 return ICE_ERR_NO_MEMORY;
2294
2295 /* Get the current PHY config */
2296 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2297 NULL);
2298 if (status) {
2299 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2300 goto out;
2301 }
2302
2303 /* clear the old pause settings */
2304 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2305 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2306
2307 /* set the new capabilities */
2308 cfg.caps |= pause_mask;
2309
2310 /* If the capabilities have changed, then set the new config */
2311 if (cfg.caps != pcaps->caps) {
2312 int retry_count, retry_max = 10;
2313
2314 /* Auto restart link so settings take effect */
2315 if (ena_auto_link_update)
2316 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2317 /* Copy over all the old settings */
2318 cfg.phy_type_high = pcaps->phy_type_high;
2319 cfg.phy_type_low = pcaps->phy_type_low;
2320 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2321 cfg.eee_cap = pcaps->eee_cap;
2322 cfg.eeer_value = pcaps->eeer_value;
2323 cfg.link_fec_opt = pcaps->link_fec_options;
2324
2325 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2326 if (status) {
2327 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2328 goto out;
2329 }
2330
2331 /* Update the link info
2332 * It sometimes takes a really long time for link to
2333 * come back from the atomic reset. Thus, we wait a
2334 * little bit.
2335 */
2336 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2337 status = ice_update_link_info(pi);
2338
2339 if (!status)
2340 break;
2341
2342 mdelay(100);
2343 }
2344
2345 if (status)
2346 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2347 }
2348
2349out:
2350 devm_kfree(ice_hw_to_dev(hw), pcaps);
2351 return status;
2352}
2353
2354/**
2355 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2356 * @caps: PHY ability structure to copy date from
2357 * @cfg: PHY configuration structure to copy data to
2358 *
2359 * Helper function to copy AQC PHY get ability data to PHY set configuration
2360 * data structure
2361 */
2362void
2363ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2364 struct ice_aqc_set_phy_cfg_data *cfg)
2365{
2366 if (!caps || !cfg)
2367 return;
2368
2369 cfg->phy_type_low = caps->phy_type_low;
2370 cfg->phy_type_high = caps->phy_type_high;
2371 cfg->caps = caps->caps;
2372 cfg->low_power_ctrl = caps->low_power_ctrl;
2373 cfg->eee_cap = caps->eee_cap;
2374 cfg->eeer_value = caps->eeer_value;
2375 cfg->link_fec_opt = caps->link_fec_options;
2376}
2377
2378/**
2379 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2380 * @cfg: PHY configuration data to set FEC mode
2381 * @fec: FEC mode to configure
2382 *
2383 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2384 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2385 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2386 */
2387void
2388ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2389{
2390 switch (fec) {
2391 case ICE_FEC_BASER:
2392 /* Clear RS bits, and AND BASE-R ability
2393 * bits and OR request bits.
2394 */
2395 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2396 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2397 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2398 ICE_AQC_PHY_FEC_25G_KR_REQ;
2399 break;
2400 case ICE_FEC_RS:
2401 /* Clear BASE-R bits, and AND RS ability
2402 * bits and OR request bits.
2403 */
2404 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2405 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2406 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2407 break;
2408 case ICE_FEC_NONE:
2409 /* Clear all FEC option bits. */
2410 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2411 break;
2412 case ICE_FEC_AUTO:
2413 /* AND auto FEC bit, and all caps bits. */
2414 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2415 break;
2416 }
2417}
2418
2419/**
2420 * ice_get_link_status - get status of the HW network link
2421 * @pi: port information structure
2422 * @link_up: pointer to bool (true/false = linkup/linkdown)
2423 *
2424 * Variable link_up is true if link is up, false if link is down.
2425 * The variable link_up is invalid if status is non zero. As a
2426 * result of this call, link status reporting becomes enabled
2427 */
2428enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2429{
2430 struct ice_phy_info *phy_info;
2431 enum ice_status status = 0;
2432
2433 if (!pi || !link_up)
2434 return ICE_ERR_PARAM;
2435
2436 phy_info = &pi->phy;
2437
2438 if (phy_info->get_link_info) {
2439 status = ice_update_link_info(pi);
2440
2441 if (status)
2442 ice_debug(pi->hw, ICE_DBG_LINK,
2443 "get link status error, status = %d\n",
2444 status);
2445 }
2446
2447 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2448
2449 return status;
2450}
2451
2452/**
2453 * ice_aq_set_link_restart_an
2454 * @pi: pointer to the port information structure
2455 * @ena_link: if true: enable link, if false: disable link
2456 * @cd: pointer to command details structure or NULL
2457 *
2458 * Sets up the link and restarts the Auto-Negotiation over the link.
2459 */
2460enum ice_status
2461ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2462 struct ice_sq_cd *cd)
2463{
2464 struct ice_aqc_restart_an *cmd;
2465 struct ice_aq_desc desc;
2466
2467 cmd = &desc.params.restart_an;
2468
2469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2470
2471 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2472 cmd->lport_num = pi->lport;
2473 if (ena_link)
2474 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2475 else
2476 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2477
2478 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2479}
2480
2481/**
2482 * ice_aq_set_event_mask
2483 * @hw: pointer to the HW struct
2484 * @port_num: port number of the physical function
2485 * @mask: event mask to be set
2486 * @cd: pointer to command details structure or NULL
2487 *
2488 * Set event mask (0x0613)
2489 */
2490enum ice_status
2491ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2492 struct ice_sq_cd *cd)
2493{
2494 struct ice_aqc_set_event_mask *cmd;
2495 struct ice_aq_desc desc;
2496
2497 cmd = &desc.params.set_event_mask;
2498
2499 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2500
2501 cmd->lport_num = port_num;
2502
2503 cmd->event_mask = cpu_to_le16(mask);
2504 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2505}
2506
2507/**
2508 * ice_aq_set_mac_loopback
2509 * @hw: pointer to the HW struct
2510 * @ena_lpbk: Enable or Disable loopback
2511 * @cd: pointer to command details structure or NULL
2512 *
2513 * Enable/disable loopback on a given port
2514 */
2515enum ice_status
2516ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2517{
2518 struct ice_aqc_set_mac_lb *cmd;
2519 struct ice_aq_desc desc;
2520
2521 cmd = &desc.params.set_mac_lb;
2522
2523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2524 if (ena_lpbk)
2525 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2526
2527 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2528}
2529
2530/**
2531 * ice_aq_set_port_id_led
2532 * @pi: pointer to the port information
2533 * @is_orig_mode: is this LED set to original mode (by the net-list)
2534 * @cd: pointer to command details structure or NULL
2535 *
2536 * Set LED value for the given port (0x06e9)
2537 */
2538enum ice_status
2539ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2540 struct ice_sq_cd *cd)
2541{
2542 struct ice_aqc_set_port_id_led *cmd;
2543 struct ice_hw *hw = pi->hw;
2544 struct ice_aq_desc desc;
2545
2546 cmd = &desc.params.set_port_id_led;
2547
2548 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2549
2550 if (is_orig_mode)
2551 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2552 else
2553 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2554
2555 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2556}
2557
2558/**
2559 * __ice_aq_get_set_rss_lut
2560 * @hw: pointer to the hardware structure
2561 * @vsi_id: VSI FW index
2562 * @lut_type: LUT table type
2563 * @lut: pointer to the LUT buffer provided by the caller
2564 * @lut_size: size of the LUT buffer
2565 * @glob_lut_idx: global LUT index
2566 * @set: set true to set the table, false to get the table
2567 *
2568 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2569 */
2570static enum ice_status
2571__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2572 u16 lut_size, u8 glob_lut_idx, bool set)
2573{
2574 struct ice_aqc_get_set_rss_lut *cmd_resp;
2575 struct ice_aq_desc desc;
2576 enum ice_status status;
2577 u16 flags = 0;
2578
2579 cmd_resp = &desc.params.get_set_rss_lut;
2580
2581 if (set) {
2582 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2583 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2584 } else {
2585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2586 }
2587
2588 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2589 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2590 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2591 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2592
2593 switch (lut_type) {
2594 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2595 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2596 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2597 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2598 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2599 break;
2600 default:
2601 status = ICE_ERR_PARAM;
2602 goto ice_aq_get_set_rss_lut_exit;
2603 }
2604
2605 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2606 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2607 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2608
2609 if (!set)
2610 goto ice_aq_get_set_rss_lut_send;
2611 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2612 if (!set)
2613 goto ice_aq_get_set_rss_lut_send;
2614 } else {
2615 goto ice_aq_get_set_rss_lut_send;
2616 }
2617
2618 /* LUT size is only valid for Global and PF table types */
2619 switch (lut_size) {
2620 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2621 break;
2622 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2623 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2624 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2625 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2626 break;
2627 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2628 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2629 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2630 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2631 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2632 break;
2633 }
2634 /* fall-through */
2635 default:
2636 status = ICE_ERR_PARAM;
2637 goto ice_aq_get_set_rss_lut_exit;
2638 }
2639
2640ice_aq_get_set_rss_lut_send:
2641 cmd_resp->flags = cpu_to_le16(flags);
2642 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2643
2644ice_aq_get_set_rss_lut_exit:
2645 return status;
2646}
2647
2648/**
2649 * ice_aq_get_rss_lut
2650 * @hw: pointer to the hardware structure
2651 * @vsi_handle: software VSI handle
2652 * @lut_type: LUT table type
2653 * @lut: pointer to the LUT buffer provided by the caller
2654 * @lut_size: size of the LUT buffer
2655 *
2656 * get the RSS lookup table, PF or VSI type
2657 */
2658enum ice_status
2659ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2660 u8 *lut, u16 lut_size)
2661{
2662 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2663 return ICE_ERR_PARAM;
2664
2665 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2666 lut_type, lut, lut_size, 0, false);
2667}
2668
2669/**
2670 * ice_aq_set_rss_lut
2671 * @hw: pointer to the hardware structure
2672 * @vsi_handle: software VSI handle
2673 * @lut_type: LUT table type
2674 * @lut: pointer to the LUT buffer provided by the caller
2675 * @lut_size: size of the LUT buffer
2676 *
2677 * set the RSS lookup table, PF or VSI type
2678 */
2679enum ice_status
2680ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2681 u8 *lut, u16 lut_size)
2682{
2683 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2684 return ICE_ERR_PARAM;
2685
2686 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2687 lut_type, lut, lut_size, 0, true);
2688}
2689
2690/**
2691 * __ice_aq_get_set_rss_key
2692 * @hw: pointer to the HW struct
2693 * @vsi_id: VSI FW index
2694 * @key: pointer to key info struct
2695 * @set: set true to set the key, false to get the key
2696 *
2697 * get (0x0B04) or set (0x0B02) the RSS key per VSI
2698 */
2699static enum
2700ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2701 struct ice_aqc_get_set_rss_keys *key,
2702 bool set)
2703{
2704 struct ice_aqc_get_set_rss_key *cmd_resp;
2705 u16 key_size = sizeof(*key);
2706 struct ice_aq_desc desc;
2707
2708 cmd_resp = &desc.params.get_set_rss_key;
2709
2710 if (set) {
2711 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2712 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2713 } else {
2714 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2715 }
2716
2717 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2718 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2719 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2720 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2721
2722 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2723}
2724
2725/**
2726 * ice_aq_get_rss_key
2727 * @hw: pointer to the HW struct
2728 * @vsi_handle: software VSI handle
2729 * @key: pointer to key info struct
2730 *
2731 * get the RSS key per VSI
2732 */
2733enum ice_status
2734ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2735 struct ice_aqc_get_set_rss_keys *key)
2736{
2737 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2738 return ICE_ERR_PARAM;
2739
2740 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2741 key, false);
2742}
2743
2744/**
2745 * ice_aq_set_rss_key
2746 * @hw: pointer to the HW struct
2747 * @vsi_handle: software VSI handle
2748 * @keys: pointer to key info struct
2749 *
2750 * set the RSS key per VSI
2751 */
2752enum ice_status
2753ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2754 struct ice_aqc_get_set_rss_keys *keys)
2755{
2756 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2757 return ICE_ERR_PARAM;
2758
2759 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2760 keys, true);
2761}
2762
2763/**
2764 * ice_aq_add_lan_txq
2765 * @hw: pointer to the hardware structure
2766 * @num_qgrps: Number of added queue groups
2767 * @qg_list: list of queue groups to be added
2768 * @buf_size: size of buffer for indirect command
2769 * @cd: pointer to command details structure or NULL
2770 *
2771 * Add Tx LAN queue (0x0C30)
2772 *
2773 * NOTE:
2774 * Prior to calling add Tx LAN queue:
2775 * Initialize the following as part of the Tx queue context:
2776 * Completion queue ID if the queue uses Completion queue, Quanta profile,
2777 * Cache profile and Packet shaper profile.
2778 *
2779 * After add Tx LAN queue AQ command is completed:
2780 * Interrupts should be associated with specific queues,
2781 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2782 * flow.
2783 */
2784static enum ice_status
2785ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2786 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2787 struct ice_sq_cd *cd)
2788{
2789 u16 i, sum_header_size, sum_q_size = 0;
2790 struct ice_aqc_add_tx_qgrp *list;
2791 struct ice_aqc_add_txqs *cmd;
2792 struct ice_aq_desc desc;
2793
2794 cmd = &desc.params.add_txqs;
2795
2796 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2797
2798 if (!qg_list)
2799 return ICE_ERR_PARAM;
2800
2801 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2802 return ICE_ERR_PARAM;
2803
2804 sum_header_size = num_qgrps *
2805 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2806
2807 list = qg_list;
2808 for (i = 0; i < num_qgrps; i++) {
2809 struct ice_aqc_add_txqs_perq *q = list->txqs;
2810
2811 sum_q_size += list->num_txqs * sizeof(*q);
2812 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2813 }
2814
2815 if (buf_size != (sum_header_size + sum_q_size))
2816 return ICE_ERR_PARAM;
2817
2818 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2819
2820 cmd->num_qgrps = num_qgrps;
2821
2822 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2823}
2824
2825/**
2826 * ice_aq_dis_lan_txq
2827 * @hw: pointer to the hardware structure
2828 * @num_qgrps: number of groups in the list
2829 * @qg_list: the list of groups to disable
2830 * @buf_size: the total size of the qg_list buffer in bytes
2831 * @rst_src: if called due to reset, specifies the reset source
2832 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2833 * @cd: pointer to command details structure or NULL
2834 *
2835 * Disable LAN Tx queue (0x0C31)
2836 */
2837static enum ice_status
2838ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2839 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2840 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2841 struct ice_sq_cd *cd)
2842{
2843 struct ice_aqc_dis_txqs *cmd;
2844 struct ice_aq_desc desc;
2845 enum ice_status status;
2846 u16 i, sz = 0;
2847
2848 cmd = &desc.params.dis_txqs;
2849 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2850
2851 /* qg_list can be NULL only in VM/VF reset flow */
2852 if (!qg_list && !rst_src)
2853 return ICE_ERR_PARAM;
2854
2855 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2856 return ICE_ERR_PARAM;
2857
2858 cmd->num_entries = num_qgrps;
2859
2860 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2861 ICE_AQC_Q_DIS_TIMEOUT_M);
2862
2863 switch (rst_src) {
2864 case ICE_VM_RESET:
2865 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2866 cmd->vmvf_and_timeout |=
2867 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2868 break;
2869 case ICE_VF_RESET:
2870 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2871 /* In this case, FW expects vmvf_num to be absolute VF ID */
2872 cmd->vmvf_and_timeout |=
2873 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2874 ICE_AQC_Q_DIS_VMVF_NUM_M);
2875 break;
2876 case ICE_NO_RESET:
2877 default:
2878 break;
2879 }
2880
2881 /* flush pipe on time out */
2882 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2883 /* If no queue group info, we are in a reset flow. Issue the AQ */
2884 if (!qg_list)
2885 goto do_aq;
2886
2887 /* set RD bit to indicate that command buffer is provided by the driver
2888 * and it needs to be read by the firmware
2889 */
2890 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2891
2892 for (i = 0; i < num_qgrps; ++i) {
2893 /* Calculate the size taken up by the queue IDs in this group */
2894 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2895
2896 /* Add the size of the group header */
2897 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2898
2899 /* If the num of queues is even, add 2 bytes of padding */
2900 if ((qg_list[i].num_qs % 2) == 0)
2901 sz += 2;
2902 }
2903
2904 if (buf_size != sz)
2905 return ICE_ERR_PARAM;
2906
2907do_aq:
2908 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2909 if (status) {
2910 if (!qg_list)
2911 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2912 vmvf_num, hw->adminq.sq_last_status);
2913 else
2914 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2915 le16_to_cpu(qg_list[0].q_id[0]),
2916 hw->adminq.sq_last_status);
2917 }
2918 return status;
2919}
2920
2921/* End of FW Admin Queue command wrappers */
2922
2923/**
2924 * ice_write_byte - write a byte to a packed context structure
2925 * @src_ctx: the context structure to read from
2926 * @dest_ctx: the context to be written to
2927 * @ce_info: a description of the struct to be filled
2928 */
2929static void
2930ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2931{
2932 u8 src_byte, dest_byte, mask;
2933 u8 *from, *dest;
2934 u16 shift_width;
2935
2936 /* copy from the next struct field */
2937 from = src_ctx + ce_info->offset;
2938
2939 /* prepare the bits and mask */
2940 shift_width = ce_info->lsb % 8;
2941 mask = (u8)(BIT(ce_info->width) - 1);
2942
2943 src_byte = *from;
2944 src_byte &= mask;
2945
2946 /* shift to correct alignment */
2947 mask <<= shift_width;
2948 src_byte <<= shift_width;
2949
2950 /* get the current bits from the target bit string */
2951 dest = dest_ctx + (ce_info->lsb / 8);
2952
2953 memcpy(&dest_byte, dest, sizeof(dest_byte));
2954
2955 dest_byte &= ~mask; /* get the bits not changing */
2956 dest_byte |= src_byte; /* add in the new bits */
2957
2958 /* put it all back */
2959 memcpy(dest, &dest_byte, sizeof(dest_byte));
2960}
2961
2962/**
2963 * ice_write_word - write a word to a packed context structure
2964 * @src_ctx: the context structure to read from
2965 * @dest_ctx: the context to be written to
2966 * @ce_info: a description of the struct to be filled
2967 */
2968static void
2969ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2970{
2971 u16 src_word, mask;
2972 __le16 dest_word;
2973 u8 *from, *dest;
2974 u16 shift_width;
2975
2976 /* copy from the next struct field */
2977 from = src_ctx + ce_info->offset;
2978
2979 /* prepare the bits and mask */
2980 shift_width = ce_info->lsb % 8;
2981 mask = BIT(ce_info->width) - 1;
2982
2983 /* don't swizzle the bits until after the mask because the mask bits
2984 * will be in a different bit position on big endian machines
2985 */
2986 src_word = *(u16 *)from;
2987 src_word &= mask;
2988
2989 /* shift to correct alignment */
2990 mask <<= shift_width;
2991 src_word <<= shift_width;
2992
2993 /* get the current bits from the target bit string */
2994 dest = dest_ctx + (ce_info->lsb / 8);
2995
2996 memcpy(&dest_word, dest, sizeof(dest_word));
2997
2998 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
2999 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3000
3001 /* put it all back */
3002 memcpy(dest, &dest_word, sizeof(dest_word));
3003}
3004
3005/**
3006 * ice_write_dword - write a dword to a packed context structure
3007 * @src_ctx: the context structure to read from
3008 * @dest_ctx: the context to be written to
3009 * @ce_info: a description of the struct to be filled
3010 */
3011static void
3012ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3013{
3014 u32 src_dword, mask;
3015 __le32 dest_dword;
3016 u8 *from, *dest;
3017 u16 shift_width;
3018
3019 /* copy from the next struct field */
3020 from = src_ctx + ce_info->offset;
3021
3022 /* prepare the bits and mask */
3023 shift_width = ce_info->lsb % 8;
3024
3025 /* if the field width is exactly 32 on an x86 machine, then the shift
3026 * operation will not work because the SHL instructions count is masked
3027 * to 5 bits so the shift will do nothing
3028 */
3029 if (ce_info->width < 32)
3030 mask = BIT(ce_info->width) - 1;
3031 else
3032 mask = (u32)~0;
3033
3034 /* don't swizzle the bits until after the mask because the mask bits
3035 * will be in a different bit position on big endian machines
3036 */
3037 src_dword = *(u32 *)from;
3038 src_dword &= mask;
3039
3040 /* shift to correct alignment */
3041 mask <<= shift_width;
3042 src_dword <<= shift_width;
3043
3044 /* get the current bits from the target bit string */
3045 dest = dest_ctx + (ce_info->lsb / 8);
3046
3047 memcpy(&dest_dword, dest, sizeof(dest_dword));
3048
3049 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3050 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
3051
3052 /* put it all back */
3053 memcpy(dest, &dest_dword, sizeof(dest_dword));
3054}
3055
3056/**
3057 * ice_write_qword - write a qword to a packed context structure
3058 * @src_ctx: the context structure to read from
3059 * @dest_ctx: the context to be written to
3060 * @ce_info: a description of the struct to be filled
3061 */
3062static void
3063ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3064{
3065 u64 src_qword, mask;
3066 __le64 dest_qword;
3067 u8 *from, *dest;
3068 u16 shift_width;
3069
3070 /* copy from the next struct field */
3071 from = src_ctx + ce_info->offset;
3072
3073 /* prepare the bits and mask */
3074 shift_width = ce_info->lsb % 8;
3075
3076 /* if the field width is exactly 64 on an x86 machine, then the shift
3077 * operation will not work because the SHL instructions count is masked
3078 * to 6 bits so the shift will do nothing
3079 */
3080 if (ce_info->width < 64)
3081 mask = BIT_ULL(ce_info->width) - 1;
3082 else
3083 mask = (u64)~0;
3084
3085 /* don't swizzle the bits until after the mask because the mask bits
3086 * will be in a different bit position on big endian machines
3087 */
3088 src_qword = *(u64 *)from;
3089 src_qword &= mask;
3090
3091 /* shift to correct alignment */
3092 mask <<= shift_width;
3093 src_qword <<= shift_width;
3094
3095 /* get the current bits from the target bit string */
3096 dest = dest_ctx + (ce_info->lsb / 8);
3097
3098 memcpy(&dest_qword, dest, sizeof(dest_qword));
3099
3100 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
3101 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
3102
3103 /* put it all back */
3104 memcpy(dest, &dest_qword, sizeof(dest_qword));
3105}
3106
3107/**
3108 * ice_set_ctx - set context bits in packed structure
3109 * @src_ctx: pointer to a generic non-packed context structure
3110 * @dest_ctx: pointer to memory for the packed structure
3111 * @ce_info: a description of the structure to be transformed
3112 */
3113enum ice_status
3114ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3115{
3116 int f;
3117
3118 for (f = 0; ce_info[f].width; f++) {
3119 /* We have to deal with each element of the FW response
3120 * using the correct size so that we are correct regardless
3121 * of the endianness of the machine.
3122 */
3123 switch (ce_info[f].size_of) {
3124 case sizeof(u8):
3125 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3126 break;
3127 case sizeof(u16):
3128 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3129 break;
3130 case sizeof(u32):
3131 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3132 break;
3133 case sizeof(u64):
3134 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3135 break;
3136 default:
3137 return ICE_ERR_INVAL_SIZE;
3138 }
3139 }
3140
3141 return 0;
3142}
3143
3144/**
3145 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3146 * @hw: pointer to the HW struct
3147 * @vsi_handle: software VSI handle
3148 * @tc: TC number
3149 * @q_handle: software queue handle
3150 */
3151static struct ice_q_ctx *
3152ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3153{
3154 struct ice_vsi_ctx *vsi;
3155 struct ice_q_ctx *q_ctx;
3156
3157 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3158 if (!vsi)
3159 return NULL;
3160 if (q_handle >= vsi->num_lan_q_entries[tc])
3161 return NULL;
3162 if (!vsi->lan_q_ctx[tc])
3163 return NULL;
3164 q_ctx = vsi->lan_q_ctx[tc];
3165 return &q_ctx[q_handle];
3166}
3167
3168/**
3169 * ice_ena_vsi_txq
3170 * @pi: port information structure
3171 * @vsi_handle: software VSI handle
3172 * @tc: TC number
3173 * @q_handle: software queue handle
3174 * @num_qgrps: Number of added queue groups
3175 * @buf: list of queue groups to be added
3176 * @buf_size: size of buffer for indirect command
3177 * @cd: pointer to command details structure or NULL
3178 *
3179 * This function adds one LAN queue
3180 */
3181enum ice_status
3182ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3183 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3184 struct ice_sq_cd *cd)
3185{
3186 struct ice_aqc_txsched_elem_data node = { 0 };
3187 struct ice_sched_node *parent;
3188 struct ice_q_ctx *q_ctx;
3189 enum ice_status status;
3190 struct ice_hw *hw;
3191
3192 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3193 return ICE_ERR_CFG;
3194
3195 if (num_qgrps > 1 || buf->num_txqs > 1)
3196 return ICE_ERR_MAX_LIMIT;
3197
3198 hw = pi->hw;
3199
3200 if (!ice_is_vsi_valid(hw, vsi_handle))
3201 return ICE_ERR_PARAM;
3202
3203 mutex_lock(&pi->sched_lock);
3204
3205 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3206 if (!q_ctx) {
3207 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3208 q_handle);
3209 status = ICE_ERR_PARAM;
3210 goto ena_txq_exit;
3211 }
3212
3213 /* find a parent node */
3214 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3215 ICE_SCHED_NODE_OWNER_LAN);
3216 if (!parent) {
3217 status = ICE_ERR_PARAM;
3218 goto ena_txq_exit;
3219 }
3220
3221 buf->parent_teid = parent->info.node_teid;
3222 node.parent_teid = parent->info.node_teid;
3223 /* Mark that the values in the "generic" section as valid. The default
3224 * value in the "generic" section is zero. This means that :
3225 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3226 * - 0 priority among siblings, indicated by Bit 1-3.
3227 * - WFQ, indicated by Bit 4.
3228 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3229 * Bit 5-6.
3230 * - Bit 7 is reserved.
3231 * Without setting the generic section as valid in valid_sections, the
3232 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3233 */
3234 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3235
3236 /* add the LAN queue */
3237 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3238 if (status) {
3239 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3240 le16_to_cpu(buf->txqs[0].txq_id),
3241 hw->adminq.sq_last_status);
3242 goto ena_txq_exit;
3243 }
3244
3245 node.node_teid = buf->txqs[0].q_teid;
3246 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3247 q_ctx->q_handle = q_handle;
3248
3249 /* add a leaf node into schduler tree queue layer */
3250 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3251
3252ena_txq_exit:
3253 mutex_unlock(&pi->sched_lock);
3254 return status;
3255}
3256
3257/**
3258 * ice_dis_vsi_txq
3259 * @pi: port information structure
3260 * @vsi_handle: software VSI handle
3261 * @tc: TC number
3262 * @num_queues: number of queues
3263 * @q_handles: pointer to software queue handle array
3264 * @q_ids: pointer to the q_id array
3265 * @q_teids: pointer to queue node teids
3266 * @rst_src: if called due to reset, specifies the reset source
3267 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3268 * @cd: pointer to command details structure or NULL
3269 *
3270 * This function removes queues and their corresponding nodes in SW DB
3271 */
3272enum ice_status
3273ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3274 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3275 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3276 struct ice_sq_cd *cd)
3277{
3278 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3279 struct ice_aqc_dis_txq_item qg_list;
3280 struct ice_q_ctx *q_ctx;
3281 u16 i;
3282
3283 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3284 return ICE_ERR_CFG;
3285
3286 if (!num_queues) {
3287 /* if queue is disabled already yet the disable queue command
3288 * has to be sent to complete the VF reset, then call
3289 * ice_aq_dis_lan_txq without any queue information
3290 */
3291 if (rst_src)
3292 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3293 vmvf_num, NULL);
3294 return ICE_ERR_CFG;
3295 }
3296
3297 mutex_lock(&pi->sched_lock);
3298
3299 for (i = 0; i < num_queues; i++) {
3300 struct ice_sched_node *node;
3301
3302 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3303 if (!node)
3304 continue;
3305 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3306 if (!q_ctx) {
3307 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3308 q_handles[i]);
3309 continue;
3310 }
3311 if (q_ctx->q_handle != q_handles[i]) {
3312 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3313 q_ctx->q_handle, q_handles[i]);
3314 continue;
3315 }
3316 qg_list.parent_teid = node->info.parent_teid;
3317 qg_list.num_qs = 1;
3318 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3319 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3320 sizeof(qg_list), rst_src, vmvf_num,
3321 cd);
3322
3323 if (status)
3324 break;
3325 ice_free_sched_node(pi, node);
3326 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3327 }
3328 mutex_unlock(&pi->sched_lock);
3329 return status;
3330}
3331
3332/**
3333 * ice_cfg_vsi_qs - configure the new/existing VSI queues
3334 * @pi: port information structure
3335 * @vsi_handle: software VSI handle
3336 * @tc_bitmap: TC bitmap
3337 * @maxqs: max queues array per TC
3338 * @owner: LAN or RDMA
3339 *
3340 * This function adds/updates the VSI queues per TC.
3341 */
3342static enum ice_status
3343ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3344 u16 *maxqs, u8 owner)
3345{
3346 enum ice_status status = 0;
3347 u8 i;
3348
3349 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3350 return ICE_ERR_CFG;
3351
3352 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3353 return ICE_ERR_PARAM;
3354
3355 mutex_lock(&pi->sched_lock);
3356
3357 ice_for_each_traffic_class(i) {
3358 /* configuration is possible only if TC node is present */
3359 if (!ice_sched_get_tc_node(pi, i))
3360 continue;
3361
3362 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3363 ice_is_tc_ena(tc_bitmap, i));
3364 if (status)
3365 break;
3366 }
3367
3368 mutex_unlock(&pi->sched_lock);
3369 return status;
3370}
3371
3372/**
3373 * ice_cfg_vsi_lan - configure VSI LAN queues
3374 * @pi: port information structure
3375 * @vsi_handle: software VSI handle
3376 * @tc_bitmap: TC bitmap
3377 * @max_lanqs: max LAN queues array per TC
3378 *
3379 * This function adds/updates the VSI LAN queues per TC.
3380 */
3381enum ice_status
3382ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3383 u16 *max_lanqs)
3384{
3385 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3386 ICE_SCHED_NODE_OWNER_LAN);
3387}
3388
3389/**
3390 * ice_replay_pre_init - replay pre initialization
3391 * @hw: pointer to the HW struct
3392 *
3393 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3394 */
3395static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3396{
3397 struct ice_switch_info *sw = hw->switch_info;
3398 u8 i;
3399
3400 /* Delete old entries from replay filter list head if there is any */
3401 ice_rm_all_sw_replay_rule_info(hw);
3402 /* In start of replay, move entries into replay_rules list, it
3403 * will allow adding rules entries back to filt_rules list,
3404 * which is operational list.
3405 */
3406 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3407 list_replace_init(&sw->recp_list[i].filt_rules,
3408 &sw->recp_list[i].filt_replay_rules);
3409
3410 return 0;
3411}
3412
3413/**
3414 * ice_replay_vsi - replay VSI configuration
3415 * @hw: pointer to the HW struct
3416 * @vsi_handle: driver VSI handle
3417 *
3418 * Restore all VSI configuration after reset. It is required to call this
3419 * function with main VSI first.
3420 */
3421enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3422{
3423 enum ice_status status;
3424
3425 if (!ice_is_vsi_valid(hw, vsi_handle))
3426 return ICE_ERR_PARAM;
3427
3428 /* Replay pre-initialization if there is any */
3429 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3430 status = ice_replay_pre_init(hw);
3431 if (status)
3432 return status;
3433 }
3434
3435 /* Replay per VSI all filters */
3436 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3437 return status;
3438}
3439
3440/**
3441 * ice_replay_post - post replay configuration cleanup
3442 * @hw: pointer to the HW struct
3443 *
3444 * Post replay cleanup.
3445 */
3446void ice_replay_post(struct ice_hw *hw)
3447{
3448 /* Delete old entries from replay filter list head */
3449 ice_rm_all_sw_replay_rule_info(hw);
3450}
3451
3452/**
3453 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3454 * @hw: ptr to the hardware info
3455 * @reg: offset of 64 bit HW register to read from
3456 * @prev_stat_loaded: bool to specify if previous stats are loaded
3457 * @prev_stat: ptr to previous loaded stat value
3458 * @cur_stat: ptr to current stat value
3459 */
3460void
3461ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3462 u64 *prev_stat, u64 *cur_stat)
3463{
3464 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3465
3466 /* device stats are not reset at PFR, they likely will not be zeroed
3467 * when the driver starts. Thus, save the value from the first read
3468 * without adding to the statistic value so that we report stats which
3469 * count up from zero.
3470 */
3471 if (!prev_stat_loaded) {
3472 *prev_stat = new_data;
3473 return;
3474 }
3475
3476 /* Calculate the difference between the new and old values, and then
3477 * add it to the software stat value.
3478 */
3479 if (new_data >= *prev_stat)
3480 *cur_stat += new_data - *prev_stat;
3481 else
3482 /* to manage the potential roll-over */
3483 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3484
3485 /* Update the previously stored value to prepare for next read */
3486 *prev_stat = new_data;
3487}
3488
3489/**
3490 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3491 * @hw: ptr to the hardware info
3492 * @reg: offset of HW register to read from
3493 * @prev_stat_loaded: bool to specify if previous stats are loaded
3494 * @prev_stat: ptr to previous loaded stat value
3495 * @cur_stat: ptr to current stat value
3496 */
3497void
3498ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3499 u64 *prev_stat, u64 *cur_stat)
3500{
3501 u32 new_data;
3502
3503 new_data = rd32(hw, reg);
3504
3505 /* device stats are not reset at PFR, they likely will not be zeroed
3506 * when the driver starts. Thus, save the value from the first read
3507 * without adding to the statistic value so that we report stats which
3508 * count up from zero.
3509 */
3510 if (!prev_stat_loaded) {
3511 *prev_stat = new_data;
3512 return;
3513 }
3514
3515 /* Calculate the difference between the new and old values, and then
3516 * add it to the software stat value.
3517 */
3518 if (new_data >= *prev_stat)
3519 *cur_stat += new_data - *prev_stat;
3520 else
3521 /* to manage the potential roll-over */
3522 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3523
3524 /* Update the previously stored value to prepare for next read */
3525 *prev_stat = new_data;
3526}
3527
3528/**
3529 * ice_sched_query_elem - query element information from HW
3530 * @hw: pointer to the HW struct
3531 * @node_teid: node TEID to be queried
3532 * @buf: buffer to element information
3533 *
3534 * This function queries HW element information
3535 */
3536enum ice_status
3537ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3538 struct ice_aqc_get_elem *buf)
3539{
3540 u16 buf_size, num_elem_ret = 0;
3541 enum ice_status status;
3542
3543 buf_size = sizeof(*buf);
3544 memset(buf, 0, buf_size);
3545 buf->generic[0].node_teid = cpu_to_le32(node_teid);
3546 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3547 NULL);
3548 if (status || num_elem_ret != 1)
3549 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3550 return status;
3551}