Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018-2023, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7#include "ice_flow.h"
8#include "ice_ptp_hw.h"
9
10#define ICE_PF_RESET_WAIT_COUNT 300
11#define ICE_MAX_NETLIST_SIZE 10
12
13static const char * const ice_link_mode_str_low[] = {
14 [0] = "100BASE_TX",
15 [1] = "100M_SGMII",
16 [2] = "1000BASE_T",
17 [3] = "1000BASE_SX",
18 [4] = "1000BASE_LX",
19 [5] = "1000BASE_KX",
20 [6] = "1G_SGMII",
21 [7] = "2500BASE_T",
22 [8] = "2500BASE_X",
23 [9] = "2500BASE_KX",
24 [10] = "5GBASE_T",
25 [11] = "5GBASE_KR",
26 [12] = "10GBASE_T",
27 [13] = "10G_SFI_DA",
28 [14] = "10GBASE_SR",
29 [15] = "10GBASE_LR",
30 [16] = "10GBASE_KR_CR1",
31 [17] = "10G_SFI_AOC_ACC",
32 [18] = "10G_SFI_C2C",
33 [19] = "25GBASE_T",
34 [20] = "25GBASE_CR",
35 [21] = "25GBASE_CR_S",
36 [22] = "25GBASE_CR1",
37 [23] = "25GBASE_SR",
38 [24] = "25GBASE_LR",
39 [25] = "25GBASE_KR",
40 [26] = "25GBASE_KR_S",
41 [27] = "25GBASE_KR1",
42 [28] = "25G_AUI_AOC_ACC",
43 [29] = "25G_AUI_C2C",
44 [30] = "40GBASE_CR4",
45 [31] = "40GBASE_SR4",
46 [32] = "40GBASE_LR4",
47 [33] = "40GBASE_KR4",
48 [34] = "40G_XLAUI_AOC_ACC",
49 [35] = "40G_XLAUI",
50 [36] = "50GBASE_CR2",
51 [37] = "50GBASE_SR2",
52 [38] = "50GBASE_LR2",
53 [39] = "50GBASE_KR2",
54 [40] = "50G_LAUI2_AOC_ACC",
55 [41] = "50G_LAUI2",
56 [42] = "50G_AUI2_AOC_ACC",
57 [43] = "50G_AUI2",
58 [44] = "50GBASE_CP",
59 [45] = "50GBASE_SR",
60 [46] = "50GBASE_FR",
61 [47] = "50GBASE_LR",
62 [48] = "50GBASE_KR_PAM4",
63 [49] = "50G_AUI1_AOC_ACC",
64 [50] = "50G_AUI1",
65 [51] = "100GBASE_CR4",
66 [52] = "100GBASE_SR4",
67 [53] = "100GBASE_LR4",
68 [54] = "100GBASE_KR4",
69 [55] = "100G_CAUI4_AOC_ACC",
70 [56] = "100G_CAUI4",
71 [57] = "100G_AUI4_AOC_ACC",
72 [58] = "100G_AUI4",
73 [59] = "100GBASE_CR_PAM4",
74 [60] = "100GBASE_KR_PAM4",
75 [61] = "100GBASE_CP2",
76 [62] = "100GBASE_SR2",
77 [63] = "100GBASE_DR",
78};
79
80static const char * const ice_link_mode_str_high[] = {
81 [0] = "100GBASE_KR2_PAM4",
82 [1] = "100G_CAUI2_AOC_ACC",
83 [2] = "100G_CAUI2",
84 [3] = "100G_AUI2_AOC_ACC",
85 [4] = "100G_AUI2",
86};
87
88/**
89 * ice_dump_phy_type - helper function to dump phy_type
90 * @hw: pointer to the HW structure
91 * @low: 64 bit value for phy_type_low
92 * @high: 64 bit value for phy_type_high
93 * @prefix: prefix string to differentiate multiple dumps
94 */
95static void
96ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
97{
98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
99
100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
101 if (low & BIT_ULL(i))
102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
103 prefix, i, ice_link_mode_str_low[i]);
104 }
105
106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
107
108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
109 if (high & BIT_ULL(i))
110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
111 prefix, i, ice_link_mode_str_high[i]);
112 }
113}
114
115/**
116 * ice_set_mac_type - Sets MAC type
117 * @hw: pointer to the HW structure
118 *
119 * This function sets the MAC type of the adapter based on the
120 * vendor ID and device ID stored in the HW structure.
121 */
122static int ice_set_mac_type(struct ice_hw *hw)
123{
124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
125 return -ENODEV;
126
127 switch (hw->device_id) {
128 case ICE_DEV_ID_E810C_BACKPLANE:
129 case ICE_DEV_ID_E810C_QSFP:
130 case ICE_DEV_ID_E810C_SFP:
131 case ICE_DEV_ID_E810_XXV_BACKPLANE:
132 case ICE_DEV_ID_E810_XXV_QSFP:
133 case ICE_DEV_ID_E810_XXV_SFP:
134 hw->mac_type = ICE_MAC_E810;
135 break;
136 case ICE_DEV_ID_E823C_10G_BASE_T:
137 case ICE_DEV_ID_E823C_BACKPLANE:
138 case ICE_DEV_ID_E823C_QSFP:
139 case ICE_DEV_ID_E823C_SFP:
140 case ICE_DEV_ID_E823C_SGMII:
141 case ICE_DEV_ID_E822C_10G_BASE_T:
142 case ICE_DEV_ID_E822C_BACKPLANE:
143 case ICE_DEV_ID_E822C_QSFP:
144 case ICE_DEV_ID_E822C_SFP:
145 case ICE_DEV_ID_E822C_SGMII:
146 case ICE_DEV_ID_E822L_10G_BASE_T:
147 case ICE_DEV_ID_E822L_BACKPLANE:
148 case ICE_DEV_ID_E822L_SFP:
149 case ICE_DEV_ID_E822L_SGMII:
150 case ICE_DEV_ID_E823L_10G_BASE_T:
151 case ICE_DEV_ID_E823L_1GBE:
152 case ICE_DEV_ID_E823L_BACKPLANE:
153 case ICE_DEV_ID_E823L_QSFP:
154 case ICE_DEV_ID_E823L_SFP:
155 hw->mac_type = ICE_MAC_GENERIC;
156 break;
157 case ICE_DEV_ID_E830_BACKPLANE:
158 case ICE_DEV_ID_E830_QSFP56:
159 case ICE_DEV_ID_E830_SFP:
160 case ICE_DEV_ID_E830_SFP_DD:
161 hw->mac_type = ICE_MAC_E830;
162 break;
163 default:
164 hw->mac_type = ICE_MAC_UNKNOWN;
165 break;
166 }
167
168 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
169 return 0;
170}
171
172/**
173 * ice_is_e810
174 * @hw: pointer to the hardware structure
175 *
176 * returns true if the device is E810 based, false if not.
177 */
178bool ice_is_e810(struct ice_hw *hw)
179{
180 return hw->mac_type == ICE_MAC_E810;
181}
182
183/**
184 * ice_is_e810t
185 * @hw: pointer to the hardware structure
186 *
187 * returns true if the device is E810T based, false if not.
188 */
189bool ice_is_e810t(struct ice_hw *hw)
190{
191 switch (hw->device_id) {
192 case ICE_DEV_ID_E810C_SFP:
193 switch (hw->subsystem_device_id) {
194 case ICE_SUBDEV_ID_E810T:
195 case ICE_SUBDEV_ID_E810T2:
196 case ICE_SUBDEV_ID_E810T3:
197 case ICE_SUBDEV_ID_E810T4:
198 case ICE_SUBDEV_ID_E810T6:
199 case ICE_SUBDEV_ID_E810T7:
200 return true;
201 }
202 break;
203 case ICE_DEV_ID_E810C_QSFP:
204 switch (hw->subsystem_device_id) {
205 case ICE_SUBDEV_ID_E810T2:
206 case ICE_SUBDEV_ID_E810T3:
207 case ICE_SUBDEV_ID_E810T5:
208 return true;
209 }
210 break;
211 default:
212 break;
213 }
214
215 return false;
216}
217
218/**
219 * ice_is_e823
220 * @hw: pointer to the hardware structure
221 *
222 * returns true if the device is E823-L or E823-C based, false if not.
223 */
224bool ice_is_e823(struct ice_hw *hw)
225{
226 switch (hw->device_id) {
227 case ICE_DEV_ID_E823L_BACKPLANE:
228 case ICE_DEV_ID_E823L_SFP:
229 case ICE_DEV_ID_E823L_10G_BASE_T:
230 case ICE_DEV_ID_E823L_1GBE:
231 case ICE_DEV_ID_E823L_QSFP:
232 case ICE_DEV_ID_E823C_BACKPLANE:
233 case ICE_DEV_ID_E823C_QSFP:
234 case ICE_DEV_ID_E823C_SFP:
235 case ICE_DEV_ID_E823C_10G_BASE_T:
236 case ICE_DEV_ID_E823C_SGMII:
237 return true;
238 default:
239 return false;
240 }
241}
242
243/**
244 * ice_clear_pf_cfg - Clear PF configuration
245 * @hw: pointer to the hardware structure
246 *
247 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
248 * configuration, flow director filters, etc.).
249 */
250int ice_clear_pf_cfg(struct ice_hw *hw)
251{
252 struct ice_aq_desc desc;
253
254 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
255
256 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
257}
258
259/**
260 * ice_aq_manage_mac_read - manage MAC address read command
261 * @hw: pointer to the HW struct
262 * @buf: a virtual buffer to hold the manage MAC read response
263 * @buf_size: Size of the virtual buffer
264 * @cd: pointer to command details structure or NULL
265 *
266 * This function is used to return per PF station MAC address (0x0107).
267 * NOTE: Upon successful completion of this command, MAC address information
268 * is returned in user specified buffer. Please interpret user specified
269 * buffer as "manage_mac_read" response.
270 * Response such as various MAC addresses are stored in HW struct (port.mac)
271 * ice_discover_dev_caps is expected to be called before this function is
272 * called.
273 */
274static int
275ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
276 struct ice_sq_cd *cd)
277{
278 struct ice_aqc_manage_mac_read_resp *resp;
279 struct ice_aqc_manage_mac_read *cmd;
280 struct ice_aq_desc desc;
281 int status;
282 u16 flags;
283 u8 i;
284
285 cmd = &desc.params.mac_read;
286
287 if (buf_size < sizeof(*resp))
288 return -EINVAL;
289
290 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
291
292 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
293 if (status)
294 return status;
295
296 resp = buf;
297 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
298
299 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
300 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
301 return -EIO;
302 }
303
304 /* A single port can report up to two (LAN and WoL) addresses */
305 for (i = 0; i < cmd->num_addr; i++)
306 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
307 ether_addr_copy(hw->port_info->mac.lan_addr,
308 resp[i].mac_addr);
309 ether_addr_copy(hw->port_info->mac.perm_addr,
310 resp[i].mac_addr);
311 break;
312 }
313
314 return 0;
315}
316
317/**
318 * ice_aq_get_phy_caps - returns PHY capabilities
319 * @pi: port information structure
320 * @qual_mods: report qualified modules
321 * @report_mode: report mode capabilities
322 * @pcaps: structure for PHY capabilities to be filled
323 * @cd: pointer to command details structure or NULL
324 *
325 * Returns the various PHY capabilities supported on the Port (0x0600)
326 */
327int
328ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
329 struct ice_aqc_get_phy_caps_data *pcaps,
330 struct ice_sq_cd *cd)
331{
332 struct ice_aqc_get_phy_caps *cmd;
333 u16 pcaps_size = sizeof(*pcaps);
334 struct ice_aq_desc desc;
335 const char *prefix;
336 struct ice_hw *hw;
337 int status;
338
339 cmd = &desc.params.get_phy;
340
341 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
342 return -EINVAL;
343 hw = pi->hw;
344
345 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
346 !ice_fw_supports_report_dflt_cfg(hw))
347 return -EINVAL;
348
349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
350
351 if (qual_mods)
352 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
353
354 cmd->param0 |= cpu_to_le16(report_mode);
355 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
356
357 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
358
359 switch (report_mode) {
360 case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
361 prefix = "phy_caps_media";
362 break;
363 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
364 prefix = "phy_caps_no_media";
365 break;
366 case ICE_AQC_REPORT_ACTIVE_CFG:
367 prefix = "phy_caps_active";
368 break;
369 case ICE_AQC_REPORT_DFLT_CFG:
370 prefix = "phy_caps_default";
371 break;
372 default:
373 prefix = "phy_caps_invalid";
374 }
375
376 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
377 le64_to_cpu(pcaps->phy_type_high), prefix);
378
379 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
380 prefix, report_mode);
381 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
382 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
383 pcaps->low_power_ctrl_an);
384 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
385 pcaps->eee_cap);
386 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
387 pcaps->eeer_value);
388 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
389 pcaps->link_fec_options);
390 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
391 prefix, pcaps->module_compliance_enforcement);
392 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
393 prefix, pcaps->extended_compliance_code);
394 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
395 pcaps->module_type[0]);
396 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
397 pcaps->module_type[1]);
398 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
399 pcaps->module_type[2]);
400
401 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
402 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
403 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
404 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
405 sizeof(pi->phy.link_info.module_type));
406 }
407
408 return status;
409}
410
411/**
412 * ice_aq_get_link_topo_handle - get link topology node return status
413 * @pi: port information structure
414 * @node_type: requested node type
415 * @cd: pointer to command details structure or NULL
416 *
417 * Get link topology node return status for specified node type (0x06E0)
418 *
419 * Node type cage can be used to determine if cage is present. If AQC
420 * returns error (ENOENT), then no cage present. If no cage present, then
421 * connection type is backplane or BASE-T.
422 */
423static int
424ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
425 struct ice_sq_cd *cd)
426{
427 struct ice_aqc_get_link_topo *cmd;
428 struct ice_aq_desc desc;
429
430 cmd = &desc.params.get_link_topo;
431
432 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
433
434 cmd->addr.topo_params.node_type_ctx =
435 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
436 ICE_AQC_LINK_TOPO_NODE_CTX_S);
437
438 /* set node type */
439 cmd->addr.topo_params.node_type_ctx |=
440 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
441
442 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
443}
444
445/**
446 * ice_aq_get_netlist_node
447 * @hw: pointer to the hw struct
448 * @cmd: get_link_topo AQ structure
449 * @node_part_number: output node part number if node found
450 * @node_handle: output node handle parameter if node found
451 *
452 * Get netlist node handle.
453 */
454int
455ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
456 u8 *node_part_number, u16 *node_handle)
457{
458 struct ice_aq_desc desc;
459
460 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
461 desc.params.get_link_topo = *cmd;
462
463 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
464 return -EINTR;
465
466 if (node_handle)
467 *node_handle =
468 le16_to_cpu(desc.params.get_link_topo.addr.handle);
469 if (node_part_number)
470 *node_part_number = desc.params.get_link_topo.node_part_num;
471
472 return 0;
473}
474
475/**
476 * ice_find_netlist_node
477 * @hw: pointer to the hw struct
478 * @node_type_ctx: type of netlist node to look for
479 * @node_part_number: node part number to look for
480 * @node_handle: output parameter if node found - optional
481 *
482 * Scan the netlist for a node handle of the given node type and part number.
483 *
484 * If node_handle is non-NULL it will be modified on function exit. It is only
485 * valid if the function returns zero, and should be ignored on any non-zero
486 * return value.
487 *
488 * Returns: 0 if the node is found, -ENOENT if no handle was found, and
489 * a negative error code on failure to access the AQ.
490 */
491static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
492 u8 node_part_number, u16 *node_handle)
493{
494 u8 idx;
495
496 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) {
497 struct ice_aqc_get_link_topo cmd = {};
498 u8 rec_node_part_number;
499 int status;
500
501 cmd.addr.topo_params.node_type_ctx =
502 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
503 node_type_ctx);
504 cmd.addr.topo_params.index = idx;
505
506 status = ice_aq_get_netlist_node(hw, &cmd,
507 &rec_node_part_number,
508 node_handle);
509 if (status)
510 return status;
511
512 if (rec_node_part_number == node_part_number)
513 return 0;
514 }
515
516 return -ENOENT;
517}
518
519/**
520 * ice_is_media_cage_present
521 * @pi: port information structure
522 *
523 * Returns true if media cage is present, else false. If no cage, then
524 * media type is backplane or BASE-T.
525 */
526static bool ice_is_media_cage_present(struct ice_port_info *pi)
527{
528 /* Node type cage can be used to determine if cage is present. If AQC
529 * returns error (ENOENT), then no cage present. If no cage present then
530 * connection type is backplane or BASE-T.
531 */
532 return !ice_aq_get_link_topo_handle(pi,
533 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
534 NULL);
535}
536
537/**
538 * ice_get_media_type - Gets media type
539 * @pi: port information structure
540 */
541static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
542{
543 struct ice_link_status *hw_link_info;
544
545 if (!pi)
546 return ICE_MEDIA_UNKNOWN;
547
548 hw_link_info = &pi->phy.link_info;
549 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
550 /* If more than one media type is selected, report unknown */
551 return ICE_MEDIA_UNKNOWN;
552
553 if (hw_link_info->phy_type_low) {
554 /* 1G SGMII is a special case where some DA cable PHYs
555 * may show this as an option when it really shouldn't
556 * be since SGMII is meant to be between a MAC and a PHY
557 * in a backplane. Try to detect this case and handle it
558 */
559 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
560 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
561 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
562 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
563 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
564 return ICE_MEDIA_DA;
565
566 switch (hw_link_info->phy_type_low) {
567 case ICE_PHY_TYPE_LOW_1000BASE_SX:
568 case ICE_PHY_TYPE_LOW_1000BASE_LX:
569 case ICE_PHY_TYPE_LOW_10GBASE_SR:
570 case ICE_PHY_TYPE_LOW_10GBASE_LR:
571 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
572 case ICE_PHY_TYPE_LOW_25GBASE_SR:
573 case ICE_PHY_TYPE_LOW_25GBASE_LR:
574 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
575 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
576 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
577 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
578 case ICE_PHY_TYPE_LOW_50GBASE_SR:
579 case ICE_PHY_TYPE_LOW_50GBASE_FR:
580 case ICE_PHY_TYPE_LOW_50GBASE_LR:
581 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
582 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
583 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
584 case ICE_PHY_TYPE_LOW_100GBASE_DR:
585 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
586 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
587 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
588 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
589 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
590 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
591 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
592 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
593 return ICE_MEDIA_FIBER;
594 case ICE_PHY_TYPE_LOW_100BASE_TX:
595 case ICE_PHY_TYPE_LOW_1000BASE_T:
596 case ICE_PHY_TYPE_LOW_2500BASE_T:
597 case ICE_PHY_TYPE_LOW_5GBASE_T:
598 case ICE_PHY_TYPE_LOW_10GBASE_T:
599 case ICE_PHY_TYPE_LOW_25GBASE_T:
600 return ICE_MEDIA_BASET;
601 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
602 case ICE_PHY_TYPE_LOW_25GBASE_CR:
603 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
604 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
605 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
606 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
607 case ICE_PHY_TYPE_LOW_50GBASE_CP:
608 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
609 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
610 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
611 return ICE_MEDIA_DA;
612 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
613 case ICE_PHY_TYPE_LOW_40G_XLAUI:
614 case ICE_PHY_TYPE_LOW_50G_LAUI2:
615 case ICE_PHY_TYPE_LOW_50G_AUI2:
616 case ICE_PHY_TYPE_LOW_50G_AUI1:
617 case ICE_PHY_TYPE_LOW_100G_AUI4:
618 case ICE_PHY_TYPE_LOW_100G_CAUI4:
619 if (ice_is_media_cage_present(pi))
620 return ICE_MEDIA_DA;
621 fallthrough;
622 case ICE_PHY_TYPE_LOW_1000BASE_KX:
623 case ICE_PHY_TYPE_LOW_2500BASE_KX:
624 case ICE_PHY_TYPE_LOW_2500BASE_X:
625 case ICE_PHY_TYPE_LOW_5GBASE_KR:
626 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
627 case ICE_PHY_TYPE_LOW_25GBASE_KR:
628 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
629 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
630 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
631 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
632 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
633 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
634 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
635 return ICE_MEDIA_BACKPLANE;
636 }
637 } else {
638 switch (hw_link_info->phy_type_high) {
639 case ICE_PHY_TYPE_HIGH_100G_AUI2:
640 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
641 if (ice_is_media_cage_present(pi))
642 return ICE_MEDIA_DA;
643 fallthrough;
644 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
645 return ICE_MEDIA_BACKPLANE;
646 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
647 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
648 return ICE_MEDIA_FIBER;
649 }
650 }
651 return ICE_MEDIA_UNKNOWN;
652}
653
654/**
655 * ice_get_link_status_datalen
656 * @hw: pointer to the HW struct
657 *
658 * Returns datalength for the Get Link Status AQ command, which is bigger for
659 * newer adapter families handled by ice driver.
660 */
661static u16 ice_get_link_status_datalen(struct ice_hw *hw)
662{
663 switch (hw->mac_type) {
664 case ICE_MAC_E830:
665 return ICE_AQC_LS_DATA_SIZE_V2;
666 case ICE_MAC_E810:
667 default:
668 return ICE_AQC_LS_DATA_SIZE_V1;
669 }
670}
671
672/**
673 * ice_aq_get_link_info
674 * @pi: port information structure
675 * @ena_lse: enable/disable LinkStatusEvent reporting
676 * @link: pointer to link status structure - optional
677 * @cd: pointer to command details structure or NULL
678 *
679 * Get Link Status (0x607). Returns the link status of the adapter.
680 */
681int
682ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
683 struct ice_link_status *link, struct ice_sq_cd *cd)
684{
685 struct ice_aqc_get_link_status_data link_data = { 0 };
686 struct ice_aqc_get_link_status *resp;
687 struct ice_link_status *li_old, *li;
688 enum ice_media_type *hw_media_type;
689 struct ice_fc_info *hw_fc_info;
690 bool tx_pause, rx_pause;
691 struct ice_aq_desc desc;
692 struct ice_hw *hw;
693 u16 cmd_flags;
694 int status;
695
696 if (!pi)
697 return -EINVAL;
698 hw = pi->hw;
699 li_old = &pi->phy.link_info_old;
700 hw_media_type = &pi->phy.media_type;
701 li = &pi->phy.link_info;
702 hw_fc_info = &pi->fc;
703
704 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
705 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
706 resp = &desc.params.get_link_status;
707 resp->cmd_flags = cpu_to_le16(cmd_flags);
708 resp->lport_num = pi->lport;
709
710 status = ice_aq_send_cmd(hw, &desc, &link_data,
711 ice_get_link_status_datalen(hw), cd);
712 if (status)
713 return status;
714
715 /* save off old link status information */
716 *li_old = *li;
717
718 /* update current link status information */
719 li->link_speed = le16_to_cpu(link_data.link_speed);
720 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
721 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
722 *hw_media_type = ice_get_media_type(pi);
723 li->link_info = link_data.link_info;
724 li->link_cfg_err = link_data.link_cfg_err;
725 li->an_info = link_data.an_info;
726 li->ext_info = link_data.ext_info;
727 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
728 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
729 li->topo_media_conflict = link_data.topo_media_conflict;
730 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
731 ICE_AQ_CFG_PACING_TYPE_M);
732
733 /* update fc info */
734 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
735 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
736 if (tx_pause && rx_pause)
737 hw_fc_info->current_mode = ICE_FC_FULL;
738 else if (tx_pause)
739 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
740 else if (rx_pause)
741 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
742 else
743 hw_fc_info->current_mode = ICE_FC_NONE;
744
745 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
746
747 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
748 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
749 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
750 (unsigned long long)li->phy_type_low);
751 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
752 (unsigned long long)li->phy_type_high);
753 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
754 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
755 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
756 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
757 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
758 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
759 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
760 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
761 li->max_frame_size);
762 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
763
764 /* save link status information */
765 if (link)
766 *link = *li;
767
768 /* flag cleared so calling functions don't call AQ again */
769 pi->phy.get_link_info = false;
770
771 return 0;
772}
773
774/**
775 * ice_fill_tx_timer_and_fc_thresh
776 * @hw: pointer to the HW struct
777 * @cmd: pointer to MAC cfg structure
778 *
779 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
780 * descriptor
781 */
782static void
783ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
784 struct ice_aqc_set_mac_cfg *cmd)
785{
786 u32 val, fc_thres_m;
787
788 /* We read back the transmit timer and FC threshold value of
789 * LFC. Thus, we will use index =
790 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
791 *
792 * Also, because we are operating on transmit timer and FC
793 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
794 */
795#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX
796#define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR
797
798 if (hw->mac_type == ICE_MAC_E830) {
799 /* Retrieve the transmit timer */
800 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT);
801 cmd->tx_tmr_value =
802 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M);
803
804 /* Retrieve the fc threshold */
805 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR);
806 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M;
807 } else {
808 /* Retrieve the transmit timer */
809 val = rd32(hw,
810 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC));
811 cmd->tx_tmr_value =
812 le16_encode_bits(val,
813 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M);
814
815 /* Retrieve the fc threshold */
816 val = rd32(hw,
817 E800_REFRESH_TMR(E800_IDX_OF_LFC));
818 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M;
819 }
820 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m);
821}
822
823/**
824 * ice_aq_set_mac_cfg
825 * @hw: pointer to the HW struct
826 * @max_frame_size: Maximum Frame Size to be supported
827 * @cd: pointer to command details structure or NULL
828 *
829 * Set MAC configuration (0x0603)
830 */
831int
832ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
833{
834 struct ice_aqc_set_mac_cfg *cmd;
835 struct ice_aq_desc desc;
836
837 cmd = &desc.params.set_mac_cfg;
838
839 if (max_frame_size == 0)
840 return -EINVAL;
841
842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
843
844 cmd->max_frame_size = cpu_to_le16(max_frame_size);
845
846 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
847
848 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
849}
850
851/**
852 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
853 * @hw: pointer to the HW struct
854 */
855static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
856{
857 struct ice_switch_info *sw;
858 int status;
859
860 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
861 sizeof(*hw->switch_info), GFP_KERNEL);
862 sw = hw->switch_info;
863
864 if (!sw)
865 return -ENOMEM;
866
867 INIT_LIST_HEAD(&sw->vsi_list_map_head);
868 sw->prof_res_bm_init = 0;
869
870 status = ice_init_def_sw_recp(hw);
871 if (status) {
872 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
873 return status;
874 }
875 return 0;
876}
877
878/**
879 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
880 * @hw: pointer to the HW struct
881 */
882static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
883{
884 struct ice_switch_info *sw = hw->switch_info;
885 struct ice_vsi_list_map_info *v_pos_map;
886 struct ice_vsi_list_map_info *v_tmp_map;
887 struct ice_sw_recipe *recps;
888 u8 i;
889
890 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
891 list_entry) {
892 list_del(&v_pos_map->list_entry);
893 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
894 }
895 recps = sw->recp_list;
896 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
897 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
898
899 recps[i].root_rid = i;
900 list_for_each_entry_safe(rg_entry, tmprg_entry,
901 &recps[i].rg_list, l_entry) {
902 list_del(&rg_entry->l_entry);
903 devm_kfree(ice_hw_to_dev(hw), rg_entry);
904 }
905
906 if (recps[i].adv_rule) {
907 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
908 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
909
910 mutex_destroy(&recps[i].filt_rule_lock);
911 list_for_each_entry_safe(lst_itr, tmp_entry,
912 &recps[i].filt_rules,
913 list_entry) {
914 list_del(&lst_itr->list_entry);
915 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
916 devm_kfree(ice_hw_to_dev(hw), lst_itr);
917 }
918 } else {
919 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
920
921 mutex_destroy(&recps[i].filt_rule_lock);
922 list_for_each_entry_safe(lst_itr, tmp_entry,
923 &recps[i].filt_rules,
924 list_entry) {
925 list_del(&lst_itr->list_entry);
926 devm_kfree(ice_hw_to_dev(hw), lst_itr);
927 }
928 }
929 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
930 }
931 ice_rm_all_sw_replay_rule_info(hw);
932 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
933 devm_kfree(ice_hw_to_dev(hw), sw);
934}
935
936/**
937 * ice_get_itr_intrl_gran
938 * @hw: pointer to the HW struct
939 *
940 * Determines the ITR/INTRL granularities based on the maximum aggregate
941 * bandwidth according to the device's configuration during power-on.
942 */
943static void ice_get_itr_intrl_gran(struct ice_hw *hw)
944{
945 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M,
946 rd32(hw, GL_PWR_MODE_CTL));
947
948 switch (max_agg_bw) {
949 case ICE_MAX_AGG_BW_200G:
950 case ICE_MAX_AGG_BW_100G:
951 case ICE_MAX_AGG_BW_50G:
952 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
953 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
954 break;
955 case ICE_MAX_AGG_BW_25G:
956 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
957 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
958 break;
959 }
960}
961
962/**
963 * ice_init_hw - main hardware initialization routine
964 * @hw: pointer to the hardware structure
965 */
966int ice_init_hw(struct ice_hw *hw)
967{
968 struct ice_aqc_get_phy_caps_data *pcaps;
969 u16 mac_buf_len;
970 void *mac_buf;
971 int status;
972
973 /* Set MAC type based on DeviceID */
974 status = ice_set_mac_type(hw);
975 if (status)
976 return status;
977
978 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID));
979
980 status = ice_reset(hw, ICE_RESET_PFR);
981 if (status)
982 return status;
983
984 ice_get_itr_intrl_gran(hw);
985
986 status = ice_create_all_ctrlq(hw);
987 if (status)
988 goto err_unroll_cqinit;
989
990 status = ice_fwlog_init(hw);
991 if (status)
992 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
993 status);
994
995 status = ice_clear_pf_cfg(hw);
996 if (status)
997 goto err_unroll_cqinit;
998
999 /* Set bit to enable Flow Director filters */
1000 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
1001 INIT_LIST_HEAD(&hw->fdir_list_head);
1002
1003 ice_clear_pxe_mode(hw);
1004
1005 status = ice_init_nvm(hw);
1006 if (status)
1007 goto err_unroll_cqinit;
1008
1009 status = ice_get_caps(hw);
1010 if (status)
1011 goto err_unroll_cqinit;
1012
1013 if (!hw->port_info)
1014 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
1015 sizeof(*hw->port_info),
1016 GFP_KERNEL);
1017 if (!hw->port_info) {
1018 status = -ENOMEM;
1019 goto err_unroll_cqinit;
1020 }
1021
1022 /* set the back pointer to HW */
1023 hw->port_info->hw = hw;
1024
1025 /* Initialize port_info struct with switch configuration data */
1026 status = ice_get_initial_sw_cfg(hw);
1027 if (status)
1028 goto err_unroll_alloc;
1029
1030 hw->evb_veb = true;
1031
1032 /* init xarray for identifying scheduling nodes uniquely */
1033 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC);
1034
1035 /* Query the allocated resources for Tx scheduler */
1036 status = ice_sched_query_res_alloc(hw);
1037 if (status) {
1038 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1039 goto err_unroll_alloc;
1040 }
1041 ice_sched_get_psm_clk_freq(hw);
1042
1043 /* Initialize port_info struct with scheduler data */
1044 status = ice_sched_init_port(hw->port_info);
1045 if (status)
1046 goto err_unroll_sched;
1047
1048 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1049 if (!pcaps) {
1050 status = -ENOMEM;
1051 goto err_unroll_sched;
1052 }
1053
1054 /* Initialize port_info struct with PHY capabilities */
1055 status = ice_aq_get_phy_caps(hw->port_info, false,
1056 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
1057 NULL);
1058 devm_kfree(ice_hw_to_dev(hw), pcaps);
1059 if (status)
1060 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
1061 status);
1062
1063 /* Initialize port_info struct with link information */
1064 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1065 if (status)
1066 goto err_unroll_sched;
1067
1068 /* need a valid SW entry point to build a Tx tree */
1069 if (!hw->sw_entry_point_layer) {
1070 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1071 status = -EIO;
1072 goto err_unroll_sched;
1073 }
1074 INIT_LIST_HEAD(&hw->agg_list);
1075 /* Initialize max burst size */
1076 if (!hw->max_burst_size)
1077 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1078
1079 status = ice_init_fltr_mgmt_struct(hw);
1080 if (status)
1081 goto err_unroll_sched;
1082
1083 /* Get MAC information */
1084 /* A single port can report up to two (LAN and WoL) addresses */
1085 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1086 sizeof(struct ice_aqc_manage_mac_read_resp),
1087 GFP_KERNEL);
1088 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1089
1090 if (!mac_buf) {
1091 status = -ENOMEM;
1092 goto err_unroll_fltr_mgmt_struct;
1093 }
1094
1095 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1096 devm_kfree(ice_hw_to_dev(hw), mac_buf);
1097
1098 if (status)
1099 goto err_unroll_fltr_mgmt_struct;
1100 /* enable jumbo frame support at MAC level */
1101 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1102 if (status)
1103 goto err_unroll_fltr_mgmt_struct;
1104 /* Obtain counter base index which would be used by flow director */
1105 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1106 if (status)
1107 goto err_unroll_fltr_mgmt_struct;
1108 status = ice_init_hw_tbls(hw);
1109 if (status)
1110 goto err_unroll_fltr_mgmt_struct;
1111 mutex_init(&hw->tnl_lock);
1112 return 0;
1113
1114err_unroll_fltr_mgmt_struct:
1115 ice_cleanup_fltr_mgmt_struct(hw);
1116err_unroll_sched:
1117 ice_sched_cleanup_all(hw);
1118err_unroll_alloc:
1119 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1120err_unroll_cqinit:
1121 ice_destroy_all_ctrlq(hw);
1122 return status;
1123}
1124
1125/**
1126 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1127 * @hw: pointer to the hardware structure
1128 *
1129 * This should be called only during nominal operation, not as a result of
1130 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1131 * applicable initializations if it fails for any reason.
1132 */
1133void ice_deinit_hw(struct ice_hw *hw)
1134{
1135 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1136 ice_cleanup_fltr_mgmt_struct(hw);
1137
1138 ice_sched_cleanup_all(hw);
1139 ice_sched_clear_agg(hw);
1140 ice_free_seg(hw);
1141 ice_free_hw_tbls(hw);
1142 mutex_destroy(&hw->tnl_lock);
1143
1144 ice_fwlog_deinit(hw);
1145 ice_destroy_all_ctrlq(hw);
1146
1147 /* Clear VSI contexts if not already cleared */
1148 ice_clear_all_vsi_ctx(hw);
1149}
1150
1151/**
1152 * ice_check_reset - Check to see if a global reset is complete
1153 * @hw: pointer to the hardware structure
1154 */
1155int ice_check_reset(struct ice_hw *hw)
1156{
1157 u32 cnt, reg = 0, grst_timeout, uld_mask;
1158
1159 /* Poll for Device Active state in case a recent CORER, GLOBR,
1160 * or EMPR has occurred. The grst delay value is in 100ms units.
1161 * Add 1sec for outstanding AQ commands that can take a long time.
1162 */
1163 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M,
1164 rd32(hw, GLGEN_RSTCTL)) + 10;
1165
1166 for (cnt = 0; cnt < grst_timeout; cnt++) {
1167 mdelay(100);
1168 reg = rd32(hw, GLGEN_RSTAT);
1169 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1170 break;
1171 }
1172
1173 if (cnt == grst_timeout) {
1174 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1175 return -EIO;
1176 }
1177
1178#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1179 GLNVM_ULD_PCIER_DONE_1_M |\
1180 GLNVM_ULD_CORER_DONE_M |\
1181 GLNVM_ULD_GLOBR_DONE_M |\
1182 GLNVM_ULD_POR_DONE_M |\
1183 GLNVM_ULD_POR_DONE_1_M |\
1184 GLNVM_ULD_PCIER_DONE_2_M)
1185
1186 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1187 GLNVM_ULD_PE_DONE_M : 0);
1188
1189 /* Device is Active; check Global Reset processes are done */
1190 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1191 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1192 if (reg == uld_mask) {
1193 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1194 break;
1195 }
1196 mdelay(10);
1197 }
1198
1199 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1200 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1201 reg);
1202 return -EIO;
1203 }
1204
1205 return 0;
1206}
1207
1208/**
1209 * ice_pf_reset - Reset the PF
1210 * @hw: pointer to the hardware structure
1211 *
1212 * If a global reset has been triggered, this function checks
1213 * for its completion and then issues the PF reset
1214 */
1215static int ice_pf_reset(struct ice_hw *hw)
1216{
1217 u32 cnt, reg;
1218
1219 /* If at function entry a global reset was already in progress, i.e.
1220 * state is not 'device active' or any of the reset done bits are not
1221 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1222 * global reset is done.
1223 */
1224 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1225 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1226 /* poll on global reset currently in progress until done */
1227 if (ice_check_reset(hw))
1228 return -EIO;
1229
1230 return 0;
1231 }
1232
1233 /* Reset the PF */
1234 reg = rd32(hw, PFGEN_CTRL);
1235
1236 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1237
1238 /* Wait for the PFR to complete. The wait time is the global config lock
1239 * timeout plus the PFR timeout which will account for a possible reset
1240 * that is occurring during a download package operation.
1241 */
1242 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1243 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1244 reg = rd32(hw, PFGEN_CTRL);
1245 if (!(reg & PFGEN_CTRL_PFSWR_M))
1246 break;
1247
1248 mdelay(1);
1249 }
1250
1251 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1252 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1253 return -EIO;
1254 }
1255
1256 return 0;
1257}
1258
1259/**
1260 * ice_reset - Perform different types of reset
1261 * @hw: pointer to the hardware structure
1262 * @req: reset request
1263 *
1264 * This function triggers a reset as specified by the req parameter.
1265 *
1266 * Note:
1267 * If anything other than a PF reset is triggered, PXE mode is restored.
1268 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1269 * interface has been restored in the rebuild flow.
1270 */
1271int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1272{
1273 u32 val = 0;
1274
1275 switch (req) {
1276 case ICE_RESET_PFR:
1277 return ice_pf_reset(hw);
1278 case ICE_RESET_CORER:
1279 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1280 val = GLGEN_RTRIG_CORER_M;
1281 break;
1282 case ICE_RESET_GLOBR:
1283 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1284 val = GLGEN_RTRIG_GLOBR_M;
1285 break;
1286 default:
1287 return -EINVAL;
1288 }
1289
1290 val |= rd32(hw, GLGEN_RTRIG);
1291 wr32(hw, GLGEN_RTRIG, val);
1292 ice_flush(hw);
1293
1294 /* wait for the FW to be ready */
1295 return ice_check_reset(hw);
1296}
1297
1298/**
1299 * ice_copy_rxq_ctx_to_hw
1300 * @hw: pointer to the hardware structure
1301 * @ice_rxq_ctx: pointer to the rxq context
1302 * @rxq_index: the index of the Rx queue
1303 *
1304 * Copies rxq context from dense structure to HW register space
1305 */
1306static int
1307ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1308{
1309 u8 i;
1310
1311 if (!ice_rxq_ctx)
1312 return -EINVAL;
1313
1314 if (rxq_index > QRX_CTRL_MAX_INDEX)
1315 return -EINVAL;
1316
1317 /* Copy each dword separately to HW */
1318 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1319 wr32(hw, QRX_CONTEXT(i, rxq_index),
1320 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1321
1322 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1323 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1324 }
1325
1326 return 0;
1327}
1328
1329/* LAN Rx Queue Context */
1330static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1331 /* Field Width LSB */
1332 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1333 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1334 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1335 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1336 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1337 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1338 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1339 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1340 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1341 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1342 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1343 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1344 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1345 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1346 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1347 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1348 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1349 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1350 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1351 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1352 { 0 }
1353};
1354
1355/**
1356 * ice_write_rxq_ctx
1357 * @hw: pointer to the hardware structure
1358 * @rlan_ctx: pointer to the rxq context
1359 * @rxq_index: the index of the Rx queue
1360 *
1361 * Converts rxq context from sparse to dense structure and then writes
1362 * it to HW register space and enables the hardware to prefetch descriptors
1363 * instead of only fetching them on demand
1364 */
1365int
1366ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1367 u32 rxq_index)
1368{
1369 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1370
1371 if (!rlan_ctx)
1372 return -EINVAL;
1373
1374 rlan_ctx->prefena = 1;
1375
1376 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1377 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1378}
1379
1380/* LAN Tx Queue Context */
1381const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1382 /* Field Width LSB */
1383 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1384 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1385 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1386 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1387 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1388 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1389 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1390 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1391 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1392 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1393 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1394 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1395 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1396 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1397 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1398 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1399 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1400 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1401 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1402 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1403 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1404 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1405 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1406 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1407 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1408 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1409 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1410 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1411 { 0 }
1412};
1413
1414/* Sideband Queue command wrappers */
1415
1416/**
1417 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1418 * @hw: pointer to the HW struct
1419 * @desc: descriptor describing the command
1420 * @buf: buffer to use for indirect commands (NULL for direct commands)
1421 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1422 * @cd: pointer to command details structure
1423 */
1424static int
1425ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1426 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1427{
1428 return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1429 (struct ice_aq_desc *)desc, buf, buf_size, cd);
1430}
1431
1432/**
1433 * ice_sbq_rw_reg - Fill Sideband Queue command
1434 * @hw: pointer to the HW struct
1435 * @in: message info to be filled in descriptor
1436 */
1437int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1438{
1439 struct ice_sbq_cmd_desc desc = {0};
1440 struct ice_sbq_msg_req msg = {0};
1441 u16 msg_len;
1442 int status;
1443
1444 msg_len = sizeof(msg);
1445
1446 msg.dest_dev = in->dest_dev;
1447 msg.opcode = in->opcode;
1448 msg.flags = ICE_SBQ_MSG_FLAGS;
1449 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1450 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1451 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1452
1453 if (in->opcode)
1454 msg.data = cpu_to_le32(in->data);
1455 else
1456 /* data read comes back in completion, so shorten the struct by
1457 * sizeof(msg.data)
1458 */
1459 msg_len -= sizeof(msg.data);
1460
1461 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1462 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1463 desc.param0.cmd_len = cpu_to_le16(msg_len);
1464 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1465 if (!status && !in->opcode)
1466 in->data = le32_to_cpu
1467 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1468 return status;
1469}
1470
1471/* FW Admin Queue command wrappers */
1472
1473/* Software lock/mutex that is meant to be held while the Global Config Lock
1474 * in firmware is acquired by the software to prevent most (but not all) types
1475 * of AQ commands from being sent to FW
1476 */
1477DEFINE_MUTEX(ice_global_cfg_lock_sw);
1478
1479/**
1480 * ice_should_retry_sq_send_cmd
1481 * @opcode: AQ opcode
1482 *
1483 * Decide if we should retry the send command routine for the ATQ, depending
1484 * on the opcode.
1485 */
1486static bool ice_should_retry_sq_send_cmd(u16 opcode)
1487{
1488 switch (opcode) {
1489 case ice_aqc_opc_get_link_topo:
1490 case ice_aqc_opc_lldp_stop:
1491 case ice_aqc_opc_lldp_start:
1492 case ice_aqc_opc_lldp_filter_ctrl:
1493 return true;
1494 }
1495
1496 return false;
1497}
1498
1499/**
1500 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1501 * @hw: pointer to the HW struct
1502 * @cq: pointer to the specific Control queue
1503 * @desc: prefilled descriptor describing the command
1504 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1505 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1506 * @cd: pointer to command details structure
1507 *
1508 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1509 * Queue if the EBUSY AQ error is returned.
1510 */
1511static int
1512ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1513 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1514 struct ice_sq_cd *cd)
1515{
1516 struct ice_aq_desc desc_cpy;
1517 bool is_cmd_for_retry;
1518 u8 idx = 0;
1519 u16 opcode;
1520 int status;
1521
1522 opcode = le16_to_cpu(desc->opcode);
1523 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1524 memset(&desc_cpy, 0, sizeof(desc_cpy));
1525
1526 if (is_cmd_for_retry) {
1527 /* All retryable cmds are direct, without buf. */
1528 WARN_ON(buf);
1529
1530 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1531 }
1532
1533 do {
1534 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1535
1536 if (!is_cmd_for_retry || !status ||
1537 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1538 break;
1539
1540 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1541
1542 msleep(ICE_SQ_SEND_DELAY_TIME_MS);
1543
1544 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1545
1546 return status;
1547}
1548
1549/**
1550 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1551 * @hw: pointer to the HW struct
1552 * @desc: descriptor describing the command
1553 * @buf: buffer to use for indirect commands (NULL for direct commands)
1554 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1555 * @cd: pointer to command details structure
1556 *
1557 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1558 */
1559int
1560ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1561 u16 buf_size, struct ice_sq_cd *cd)
1562{
1563 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1564 bool lock_acquired = false;
1565 int status;
1566
1567 /* When a package download is in process (i.e. when the firmware's
1568 * Global Configuration Lock resource is held), only the Download
1569 * Package, Get Version, Get Package Info List, Upload Section,
1570 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
1571 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
1572 * Recipes to Profile Association, and Release Resource (with resource
1573 * ID set to Global Config Lock) AdminQ commands are allowed; all others
1574 * must block until the package download completes and the Global Config
1575 * Lock is released. See also ice_acquire_global_cfg_lock().
1576 */
1577 switch (le16_to_cpu(desc->opcode)) {
1578 case ice_aqc_opc_download_pkg:
1579 case ice_aqc_opc_get_pkg_info_list:
1580 case ice_aqc_opc_get_ver:
1581 case ice_aqc_opc_upload_section:
1582 case ice_aqc_opc_update_pkg:
1583 case ice_aqc_opc_set_port_params:
1584 case ice_aqc_opc_get_vlan_mode_parameters:
1585 case ice_aqc_opc_set_vlan_mode_parameters:
1586 case ice_aqc_opc_add_recipe:
1587 case ice_aqc_opc_recipe_to_profile:
1588 case ice_aqc_opc_get_recipe:
1589 case ice_aqc_opc_get_recipe_to_profile:
1590 break;
1591 case ice_aqc_opc_release_res:
1592 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1593 break;
1594 fallthrough;
1595 default:
1596 mutex_lock(&ice_global_cfg_lock_sw);
1597 lock_acquired = true;
1598 break;
1599 }
1600
1601 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1602 if (lock_acquired)
1603 mutex_unlock(&ice_global_cfg_lock_sw);
1604
1605 return status;
1606}
1607
1608/**
1609 * ice_aq_get_fw_ver
1610 * @hw: pointer to the HW struct
1611 * @cd: pointer to command details structure or NULL
1612 *
1613 * Get the firmware version (0x0001) from the admin queue commands
1614 */
1615int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1616{
1617 struct ice_aqc_get_ver *resp;
1618 struct ice_aq_desc desc;
1619 int status;
1620
1621 resp = &desc.params.get_ver;
1622
1623 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1624
1625 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1626
1627 if (!status) {
1628 hw->fw_branch = resp->fw_branch;
1629 hw->fw_maj_ver = resp->fw_major;
1630 hw->fw_min_ver = resp->fw_minor;
1631 hw->fw_patch = resp->fw_patch;
1632 hw->fw_build = le32_to_cpu(resp->fw_build);
1633 hw->api_branch = resp->api_branch;
1634 hw->api_maj_ver = resp->api_major;
1635 hw->api_min_ver = resp->api_minor;
1636 hw->api_patch = resp->api_patch;
1637 }
1638
1639 return status;
1640}
1641
1642/**
1643 * ice_aq_send_driver_ver
1644 * @hw: pointer to the HW struct
1645 * @dv: driver's major, minor version
1646 * @cd: pointer to command details structure or NULL
1647 *
1648 * Send the driver version (0x0002) to the firmware
1649 */
1650int
1651ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1652 struct ice_sq_cd *cd)
1653{
1654 struct ice_aqc_driver_ver *cmd;
1655 struct ice_aq_desc desc;
1656 u16 len;
1657
1658 cmd = &desc.params.driver_ver;
1659
1660 if (!dv)
1661 return -EINVAL;
1662
1663 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1664
1665 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1666 cmd->major_ver = dv->major_ver;
1667 cmd->minor_ver = dv->minor_ver;
1668 cmd->build_ver = dv->build_ver;
1669 cmd->subbuild_ver = dv->subbuild_ver;
1670
1671 len = 0;
1672 while (len < sizeof(dv->driver_string) &&
1673 isascii(dv->driver_string[len]) && dv->driver_string[len])
1674 len++;
1675
1676 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1677}
1678
1679/**
1680 * ice_aq_q_shutdown
1681 * @hw: pointer to the HW struct
1682 * @unloading: is the driver unloading itself
1683 *
1684 * Tell the Firmware that we're shutting down the AdminQ and whether
1685 * or not the driver is unloading as well (0x0003).
1686 */
1687int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1688{
1689 struct ice_aqc_q_shutdown *cmd;
1690 struct ice_aq_desc desc;
1691
1692 cmd = &desc.params.q_shutdown;
1693
1694 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1695
1696 if (unloading)
1697 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1698
1699 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1700}
1701
1702/**
1703 * ice_aq_req_res
1704 * @hw: pointer to the HW struct
1705 * @res: resource ID
1706 * @access: access type
1707 * @sdp_number: resource number
1708 * @timeout: the maximum time in ms that the driver may hold the resource
1709 * @cd: pointer to command details structure or NULL
1710 *
1711 * Requests common resource using the admin queue commands (0x0008).
1712 * When attempting to acquire the Global Config Lock, the driver can
1713 * learn of three states:
1714 * 1) 0 - acquired lock, and can perform download package
1715 * 2) -EIO - did not get lock, driver should fail to load
1716 * 3) -EALREADY - did not get lock, but another driver has
1717 * successfully downloaded the package; the driver does
1718 * not have to download the package and can continue
1719 * loading
1720 *
1721 * Note that if the caller is in an acquire lock, perform action, release lock
1722 * phase of operation, it is possible that the FW may detect a timeout and issue
1723 * a CORER. In this case, the driver will receive a CORER interrupt and will
1724 * have to determine its cause. The calling thread that is handling this flow
1725 * will likely get an error propagated back to it indicating the Download
1726 * Package, Update Package or the Release Resource AQ commands timed out.
1727 */
1728static int
1729ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1730 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1731 struct ice_sq_cd *cd)
1732{
1733 struct ice_aqc_req_res *cmd_resp;
1734 struct ice_aq_desc desc;
1735 int status;
1736
1737 cmd_resp = &desc.params.res_owner;
1738
1739 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1740
1741 cmd_resp->res_id = cpu_to_le16(res);
1742 cmd_resp->access_type = cpu_to_le16(access);
1743 cmd_resp->res_number = cpu_to_le32(sdp_number);
1744 cmd_resp->timeout = cpu_to_le32(*timeout);
1745 *timeout = 0;
1746
1747 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1748
1749 /* The completion specifies the maximum time in ms that the driver
1750 * may hold the resource in the Timeout field.
1751 */
1752
1753 /* Global config lock response utilizes an additional status field.
1754 *
1755 * If the Global config lock resource is held by some other driver, the
1756 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1757 * and the timeout field indicates the maximum time the current owner
1758 * of the resource has to free it.
1759 */
1760 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1761 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1762 *timeout = le32_to_cpu(cmd_resp->timeout);
1763 return 0;
1764 } else if (le16_to_cpu(cmd_resp->status) ==
1765 ICE_AQ_RES_GLBL_IN_PROG) {
1766 *timeout = le32_to_cpu(cmd_resp->timeout);
1767 return -EIO;
1768 } else if (le16_to_cpu(cmd_resp->status) ==
1769 ICE_AQ_RES_GLBL_DONE) {
1770 return -EALREADY;
1771 }
1772
1773 /* invalid FW response, force a timeout immediately */
1774 *timeout = 0;
1775 return -EIO;
1776 }
1777
1778 /* If the resource is held by some other driver, the command completes
1779 * with a busy return value and the timeout field indicates the maximum
1780 * time the current owner of the resource has to free it.
1781 */
1782 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1783 *timeout = le32_to_cpu(cmd_resp->timeout);
1784
1785 return status;
1786}
1787
1788/**
1789 * ice_aq_release_res
1790 * @hw: pointer to the HW struct
1791 * @res: resource ID
1792 * @sdp_number: resource number
1793 * @cd: pointer to command details structure or NULL
1794 *
1795 * release common resource using the admin queue commands (0x0009)
1796 */
1797static int
1798ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1799 struct ice_sq_cd *cd)
1800{
1801 struct ice_aqc_req_res *cmd;
1802 struct ice_aq_desc desc;
1803
1804 cmd = &desc.params.res_owner;
1805
1806 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1807
1808 cmd->res_id = cpu_to_le16(res);
1809 cmd->res_number = cpu_to_le32(sdp_number);
1810
1811 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1812}
1813
1814/**
1815 * ice_acquire_res
1816 * @hw: pointer to the HW structure
1817 * @res: resource ID
1818 * @access: access type (read or write)
1819 * @timeout: timeout in milliseconds
1820 *
1821 * This function will attempt to acquire the ownership of a resource.
1822 */
1823int
1824ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1825 enum ice_aq_res_access_type access, u32 timeout)
1826{
1827#define ICE_RES_POLLING_DELAY_MS 10
1828 u32 delay = ICE_RES_POLLING_DELAY_MS;
1829 u32 time_left = timeout;
1830 int status;
1831
1832 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1833
1834 /* A return code of -EALREADY means that another driver has
1835 * previously acquired the resource and performed any necessary updates;
1836 * in this case the caller does not obtain the resource and has no
1837 * further work to do.
1838 */
1839 if (status == -EALREADY)
1840 goto ice_acquire_res_exit;
1841
1842 if (status)
1843 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1844
1845 /* If necessary, poll until the current lock owner timeouts */
1846 timeout = time_left;
1847 while (status && timeout && time_left) {
1848 mdelay(delay);
1849 timeout = (timeout > delay) ? timeout - delay : 0;
1850 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1851
1852 if (status == -EALREADY)
1853 /* lock free, but no work to do */
1854 break;
1855
1856 if (!status)
1857 /* lock acquired */
1858 break;
1859 }
1860 if (status && status != -EALREADY)
1861 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1862
1863ice_acquire_res_exit:
1864 if (status == -EALREADY) {
1865 if (access == ICE_RES_WRITE)
1866 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1867 else
1868 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
1869 }
1870 return status;
1871}
1872
1873/**
1874 * ice_release_res
1875 * @hw: pointer to the HW structure
1876 * @res: resource ID
1877 *
1878 * This function will release a resource using the proper Admin Command.
1879 */
1880void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1881{
1882 unsigned long timeout;
1883 int status;
1884
1885 /* there are some rare cases when trying to release the resource
1886 * results in an admin queue timeout, so handle them correctly
1887 */
1888 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT;
1889 do {
1890 status = ice_aq_release_res(hw, res, 0, NULL);
1891 if (status != -EIO)
1892 break;
1893 usleep_range(1000, 2000);
1894 } while (time_before(jiffies, timeout));
1895}
1896
1897/**
1898 * ice_aq_alloc_free_res - command to allocate/free resources
1899 * @hw: pointer to the HW struct
1900 * @buf: Indirect buffer to hold data parameters and response
1901 * @buf_size: size of buffer for indirect commands
1902 * @opc: pass in the command opcode
1903 *
1904 * Helper function to allocate/free resources using the admin queue commands
1905 */
1906int ice_aq_alloc_free_res(struct ice_hw *hw,
1907 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1908 enum ice_adminq_opc opc)
1909{
1910 struct ice_aqc_alloc_free_res_cmd *cmd;
1911 struct ice_aq_desc desc;
1912
1913 cmd = &desc.params.sw_res_ctrl;
1914
1915 if (!buf || buf_size < flex_array_size(buf, elem, 1))
1916 return -EINVAL;
1917
1918 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1919
1920 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1921
1922 cmd->num_entries = cpu_to_le16(1);
1923
1924 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL);
1925}
1926
1927/**
1928 * ice_alloc_hw_res - allocate resource
1929 * @hw: pointer to the HW struct
1930 * @type: type of resource
1931 * @num: number of resources to allocate
1932 * @btm: allocate from bottom
1933 * @res: pointer to array that will receive the resources
1934 */
1935int
1936ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1937{
1938 struct ice_aqc_alloc_free_res_elem *buf;
1939 u16 buf_len;
1940 int status;
1941
1942 buf_len = struct_size(buf, elem, num);
1943 buf = kzalloc(buf_len, GFP_KERNEL);
1944 if (!buf)
1945 return -ENOMEM;
1946
1947 /* Prepare buffer to allocate resource. */
1948 buf->num_elems = cpu_to_le16(num);
1949 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1950 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1951 if (btm)
1952 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1953
1954 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
1955 if (status)
1956 goto ice_alloc_res_exit;
1957
1958 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1959
1960ice_alloc_res_exit:
1961 kfree(buf);
1962 return status;
1963}
1964
1965/**
1966 * ice_free_hw_res - free allocated HW resource
1967 * @hw: pointer to the HW struct
1968 * @type: type of resource to free
1969 * @num: number of resources
1970 * @res: pointer to array that contains the resources to free
1971 */
1972int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1973{
1974 struct ice_aqc_alloc_free_res_elem *buf;
1975 u16 buf_len;
1976 int status;
1977
1978 buf_len = struct_size(buf, elem, num);
1979 buf = kzalloc(buf_len, GFP_KERNEL);
1980 if (!buf)
1981 return -ENOMEM;
1982
1983 /* Prepare buffer to free resource. */
1984 buf->num_elems = cpu_to_le16(num);
1985 buf->res_type = cpu_to_le16(type);
1986 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1987
1988 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
1989 if (status)
1990 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1991
1992 kfree(buf);
1993 return status;
1994}
1995
1996/**
1997 * ice_get_num_per_func - determine number of resources per PF
1998 * @hw: pointer to the HW structure
1999 * @max: value to be evenly split between each PF
2000 *
2001 * Determine the number of valid functions by going through the bitmap returned
2002 * from parsing capabilities and use this to calculate the number of resources
2003 * per PF based on the max value passed in.
2004 */
2005static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2006{
2007 u8 funcs;
2008
2009#define ICE_CAPS_VALID_FUNCS_M 0xFF
2010 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
2011 ICE_CAPS_VALID_FUNCS_M);
2012
2013 if (!funcs)
2014 return 0;
2015
2016 return max / funcs;
2017}
2018
2019/**
2020 * ice_parse_common_caps - parse common device/function capabilities
2021 * @hw: pointer to the HW struct
2022 * @caps: pointer to common capabilities structure
2023 * @elem: the capability element to parse
2024 * @prefix: message prefix for tracing capabilities
2025 *
2026 * Given a capability element, extract relevant details into the common
2027 * capability structure.
2028 *
2029 * Returns: true if the capability matches one of the common capability ids,
2030 * false otherwise.
2031 */
2032static bool
2033ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2034 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2035{
2036 u32 logical_id = le32_to_cpu(elem->logical_id);
2037 u32 phys_id = le32_to_cpu(elem->phys_id);
2038 u32 number = le32_to_cpu(elem->number);
2039 u16 cap = le16_to_cpu(elem->cap);
2040 bool found = true;
2041
2042 switch (cap) {
2043 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2044 caps->valid_functions = number;
2045 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2046 caps->valid_functions);
2047 break;
2048 case ICE_AQC_CAPS_SRIOV:
2049 caps->sr_iov_1_1 = (number == 1);
2050 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2051 caps->sr_iov_1_1);
2052 break;
2053 case ICE_AQC_CAPS_DCB:
2054 caps->dcb = (number == 1);
2055 caps->active_tc_bitmap = logical_id;
2056 caps->maxtc = phys_id;
2057 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2058 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2059 caps->active_tc_bitmap);
2060 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2061 break;
2062 case ICE_AQC_CAPS_RSS:
2063 caps->rss_table_size = number;
2064 caps->rss_table_entry_width = logical_id;
2065 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2066 caps->rss_table_size);
2067 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2068 caps->rss_table_entry_width);
2069 break;
2070 case ICE_AQC_CAPS_RXQS:
2071 caps->num_rxq = number;
2072 caps->rxq_first_id = phys_id;
2073 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2074 caps->num_rxq);
2075 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2076 caps->rxq_first_id);
2077 break;
2078 case ICE_AQC_CAPS_TXQS:
2079 caps->num_txq = number;
2080 caps->txq_first_id = phys_id;
2081 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2082 caps->num_txq);
2083 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2084 caps->txq_first_id);
2085 break;
2086 case ICE_AQC_CAPS_MSIX:
2087 caps->num_msix_vectors = number;
2088 caps->msix_vector_first_id = phys_id;
2089 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2090 caps->num_msix_vectors);
2091 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2092 caps->msix_vector_first_id);
2093 break;
2094 case ICE_AQC_CAPS_PENDING_NVM_VER:
2095 caps->nvm_update_pending_nvm = true;
2096 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2097 break;
2098 case ICE_AQC_CAPS_PENDING_OROM_VER:
2099 caps->nvm_update_pending_orom = true;
2100 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2101 break;
2102 case ICE_AQC_CAPS_PENDING_NET_VER:
2103 caps->nvm_update_pending_netlist = true;
2104 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2105 break;
2106 case ICE_AQC_CAPS_NVM_MGMT:
2107 caps->nvm_unified_update =
2108 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2109 true : false;
2110 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2111 caps->nvm_unified_update);
2112 break;
2113 case ICE_AQC_CAPS_RDMA:
2114 caps->rdma = (number == 1);
2115 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2116 break;
2117 case ICE_AQC_CAPS_MAX_MTU:
2118 caps->max_mtu = number;
2119 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2120 prefix, caps->max_mtu);
2121 break;
2122 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2123 caps->pcie_reset_avoidance = (number > 0);
2124 ice_debug(hw, ICE_DBG_INIT,
2125 "%s: pcie_reset_avoidance = %d\n", prefix,
2126 caps->pcie_reset_avoidance);
2127 break;
2128 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2129 caps->reset_restrict_support = (number == 1);
2130 ice_debug(hw, ICE_DBG_INIT,
2131 "%s: reset_restrict_support = %d\n", prefix,
2132 caps->reset_restrict_support);
2133 break;
2134 case ICE_AQC_CAPS_FW_LAG_SUPPORT:
2135 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
2136 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
2137 prefix, caps->roce_lag);
2138 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
2139 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
2140 prefix, caps->sriov_lag);
2141 break;
2142 default:
2143 /* Not one of the recognized common capabilities */
2144 found = false;
2145 }
2146
2147 return found;
2148}
2149
2150/**
2151 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2152 * @hw: pointer to the HW structure
2153 * @caps: pointer to capabilities structure to fix
2154 *
2155 * Re-calculate the capabilities that are dependent on the number of physical
2156 * ports; i.e. some features are not supported or function differently on
2157 * devices with more than 4 ports.
2158 */
2159static void
2160ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2161{
2162 /* This assumes device capabilities are always scanned before function
2163 * capabilities during the initialization flow.
2164 */
2165 if (hw->dev_caps.num_funcs > 4) {
2166 /* Max 4 TCs per port */
2167 caps->maxtc = 4;
2168 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2169 caps->maxtc);
2170 if (caps->rdma) {
2171 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2172 caps->rdma = 0;
2173 }
2174
2175 /* print message only when processing device capabilities
2176 * during initialization.
2177 */
2178 if (caps == &hw->dev_caps.common_cap)
2179 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2180 }
2181}
2182
2183/**
2184 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2185 * @hw: pointer to the HW struct
2186 * @func_p: pointer to function capabilities structure
2187 * @cap: pointer to the capability element to parse
2188 *
2189 * Extract function capabilities for ICE_AQC_CAPS_VF.
2190 */
2191static void
2192ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2193 struct ice_aqc_list_caps_elem *cap)
2194{
2195 u32 logical_id = le32_to_cpu(cap->logical_id);
2196 u32 number = le32_to_cpu(cap->number);
2197
2198 func_p->num_allocd_vfs = number;
2199 func_p->vf_base_id = logical_id;
2200 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2201 func_p->num_allocd_vfs);
2202 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2203 func_p->vf_base_id);
2204}
2205
2206/**
2207 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2208 * @hw: pointer to the HW struct
2209 * @func_p: pointer to function capabilities structure
2210 * @cap: pointer to the capability element to parse
2211 *
2212 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2213 */
2214static void
2215ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2216 struct ice_aqc_list_caps_elem *cap)
2217{
2218 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2219 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2220 le32_to_cpu(cap->number));
2221 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2222 func_p->guar_num_vsi);
2223}
2224
2225/**
2226 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2227 * @hw: pointer to the HW struct
2228 * @func_p: pointer to function capabilities structure
2229 * @cap: pointer to the capability element to parse
2230 *
2231 * Extract function capabilities for ICE_AQC_CAPS_1588.
2232 */
2233static void
2234ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2235 struct ice_aqc_list_caps_elem *cap)
2236{
2237 struct ice_ts_func_info *info = &func_p->ts_func_info;
2238 u32 number = le32_to_cpu(cap->number);
2239
2240 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2241 func_p->common_cap.ieee_1588 = info->ena;
2242
2243 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2244 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2245 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2246 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2247
2248 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
2249 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2250
2251 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2252 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2253 } else {
2254 /* Unknown clock frequency, so assume a (probably incorrect)
2255 * default to avoid out-of-bounds look ups of frequency
2256 * related information.
2257 */
2258 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2259 info->clk_freq);
2260 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2261 }
2262
2263 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2264 func_p->common_cap.ieee_1588);
2265 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2266 info->src_tmr_owned);
2267 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2268 info->tmr_ena);
2269 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2270 info->tmr_index_owned);
2271 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2272 info->tmr_index_assoc);
2273 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2274 info->clk_freq);
2275 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2276 info->clk_src);
2277}
2278
2279/**
2280 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2281 * @hw: pointer to the HW struct
2282 * @func_p: pointer to function capabilities structure
2283 *
2284 * Extract function capabilities for ICE_AQC_CAPS_FD.
2285 */
2286static void
2287ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2288{
2289 u32 reg_val, gsize, bsize;
2290
2291 reg_val = rd32(hw, GLQF_FD_SIZE);
2292 switch (hw->mac_type) {
2293 case ICE_MAC_E830:
2294 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
2295 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
2296 break;
2297 case ICE_MAC_E810:
2298 default:
2299 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
2300 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
2301 }
2302 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize);
2303 func_p->fd_fltr_best_effort = bsize;
2304
2305 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2306 func_p->fd_fltr_guar);
2307 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2308 func_p->fd_fltr_best_effort);
2309}
2310
2311/**
2312 * ice_parse_func_caps - Parse function capabilities
2313 * @hw: pointer to the HW struct
2314 * @func_p: pointer to function capabilities structure
2315 * @buf: buffer containing the function capability records
2316 * @cap_count: the number of capabilities
2317 *
2318 * Helper function to parse function (0x000A) capabilities list. For
2319 * capabilities shared between device and function, this relies on
2320 * ice_parse_common_caps.
2321 *
2322 * Loop through the list of provided capabilities and extract the relevant
2323 * data into the function capabilities structured.
2324 */
2325static void
2326ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2327 void *buf, u32 cap_count)
2328{
2329 struct ice_aqc_list_caps_elem *cap_resp;
2330 u32 i;
2331
2332 cap_resp = buf;
2333
2334 memset(func_p, 0, sizeof(*func_p));
2335
2336 for (i = 0; i < cap_count; i++) {
2337 u16 cap = le16_to_cpu(cap_resp[i].cap);
2338 bool found;
2339
2340 found = ice_parse_common_caps(hw, &func_p->common_cap,
2341 &cap_resp[i], "func caps");
2342
2343 switch (cap) {
2344 case ICE_AQC_CAPS_VF:
2345 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2346 break;
2347 case ICE_AQC_CAPS_VSI:
2348 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2349 break;
2350 case ICE_AQC_CAPS_1588:
2351 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2352 break;
2353 case ICE_AQC_CAPS_FD:
2354 ice_parse_fdir_func_caps(hw, func_p);
2355 break;
2356 default:
2357 /* Don't list common capabilities as unknown */
2358 if (!found)
2359 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2360 i, cap);
2361 break;
2362 }
2363 }
2364
2365 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2366}
2367
2368/**
2369 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2370 * @hw: pointer to the HW struct
2371 * @dev_p: pointer to device capabilities structure
2372 * @cap: capability element to parse
2373 *
2374 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2375 */
2376static void
2377ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2378 struct ice_aqc_list_caps_elem *cap)
2379{
2380 u32 number = le32_to_cpu(cap->number);
2381
2382 dev_p->num_funcs = hweight32(number);
2383 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2384 dev_p->num_funcs);
2385}
2386
2387/**
2388 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2389 * @hw: pointer to the HW struct
2390 * @dev_p: pointer to device capabilities structure
2391 * @cap: capability element to parse
2392 *
2393 * Parse ICE_AQC_CAPS_VF for device capabilities.
2394 */
2395static void
2396ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2397 struct ice_aqc_list_caps_elem *cap)
2398{
2399 u32 number = le32_to_cpu(cap->number);
2400
2401 dev_p->num_vfs_exposed = number;
2402 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2403 dev_p->num_vfs_exposed);
2404}
2405
2406/**
2407 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2408 * @hw: pointer to the HW struct
2409 * @dev_p: pointer to device capabilities structure
2410 * @cap: capability element to parse
2411 *
2412 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2413 */
2414static void
2415ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2416 struct ice_aqc_list_caps_elem *cap)
2417{
2418 u32 number = le32_to_cpu(cap->number);
2419
2420 dev_p->num_vsi_allocd_to_host = number;
2421 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2422 dev_p->num_vsi_allocd_to_host);
2423}
2424
2425/**
2426 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2427 * @hw: pointer to the HW struct
2428 * @dev_p: pointer to device capabilities structure
2429 * @cap: capability element to parse
2430 *
2431 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2432 */
2433static void
2434ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2435 struct ice_aqc_list_caps_elem *cap)
2436{
2437 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2438 u32 logical_id = le32_to_cpu(cap->logical_id);
2439 u32 phys_id = le32_to_cpu(cap->phys_id);
2440 u32 number = le32_to_cpu(cap->number);
2441
2442 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2443 dev_p->common_cap.ieee_1588 = info->ena;
2444
2445 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2446 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2447 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2448
2449 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number);
2450 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2451 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2452
2453 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
2454 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
2455
2456 info->ena_ports = logical_id;
2457 info->tmr_own_map = phys_id;
2458
2459 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2460 dev_p->common_cap.ieee_1588);
2461 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2462 info->tmr0_owner);
2463 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2464 info->tmr0_owned);
2465 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2466 info->tmr0_ena);
2467 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2468 info->tmr1_owner);
2469 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2470 info->tmr1_owned);
2471 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2472 info->tmr1_ena);
2473 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
2474 info->ts_ll_read);
2475 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
2476 info->ts_ll_int_read);
2477 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2478 info->ena_ports);
2479 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2480 info->tmr_own_map);
2481}
2482
2483/**
2484 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2485 * @hw: pointer to the HW struct
2486 * @dev_p: pointer to device capabilities structure
2487 * @cap: capability element to parse
2488 *
2489 * Parse ICE_AQC_CAPS_FD for device capabilities.
2490 */
2491static void
2492ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2493 struct ice_aqc_list_caps_elem *cap)
2494{
2495 u32 number = le32_to_cpu(cap->number);
2496
2497 dev_p->num_flow_director_fltr = number;
2498 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2499 dev_p->num_flow_director_fltr);
2500}
2501
2502/**
2503 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
2504 * @hw: pointer to the HW struct
2505 * @dev_p: pointer to device capabilities structure
2506 * @cap: capability element to parse
2507 *
2508 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
2509 * enabled sensors.
2510 */
2511static void
2512ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2513 struct ice_aqc_list_caps_elem *cap)
2514{
2515 dev_p->supported_sensors = le32_to_cpu(cap->number);
2516
2517 ice_debug(hw, ICE_DBG_INIT,
2518 "dev caps: supported sensors (bitmap) = 0x%x\n",
2519 dev_p->supported_sensors);
2520}
2521
2522/**
2523 * ice_parse_dev_caps - Parse device capabilities
2524 * @hw: pointer to the HW struct
2525 * @dev_p: pointer to device capabilities structure
2526 * @buf: buffer containing the device capability records
2527 * @cap_count: the number of capabilities
2528 *
2529 * Helper device to parse device (0x000B) capabilities list. For
2530 * capabilities shared between device and function, this relies on
2531 * ice_parse_common_caps.
2532 *
2533 * Loop through the list of provided capabilities and extract the relevant
2534 * data into the device capabilities structured.
2535 */
2536static void
2537ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2538 void *buf, u32 cap_count)
2539{
2540 struct ice_aqc_list_caps_elem *cap_resp;
2541 u32 i;
2542
2543 cap_resp = buf;
2544
2545 memset(dev_p, 0, sizeof(*dev_p));
2546
2547 for (i = 0; i < cap_count; i++) {
2548 u16 cap = le16_to_cpu(cap_resp[i].cap);
2549 bool found;
2550
2551 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2552 &cap_resp[i], "dev caps");
2553
2554 switch (cap) {
2555 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2556 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2557 break;
2558 case ICE_AQC_CAPS_VF:
2559 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2560 break;
2561 case ICE_AQC_CAPS_VSI:
2562 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2563 break;
2564 case ICE_AQC_CAPS_1588:
2565 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2566 break;
2567 case ICE_AQC_CAPS_FD:
2568 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2569 break;
2570 case ICE_AQC_CAPS_SENSOR_READING:
2571 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
2572 break;
2573 default:
2574 /* Don't list common capabilities as unknown */
2575 if (!found)
2576 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2577 i, cap);
2578 break;
2579 }
2580 }
2581
2582 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2583}
2584
2585/**
2586 * ice_is_pf_c827 - check if pf contains c827 phy
2587 * @hw: pointer to the hw struct
2588 */
2589bool ice_is_pf_c827(struct ice_hw *hw)
2590{
2591 struct ice_aqc_get_link_topo cmd = {};
2592 u8 node_part_number;
2593 u16 node_handle;
2594 int status;
2595
2596 if (hw->mac_type != ICE_MAC_E810)
2597 return false;
2598
2599 if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
2600 return true;
2601
2602 cmd.addr.topo_params.node_type_ctx =
2603 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
2604 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
2605 cmd.addr.topo_params.index = 0;
2606
2607 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
2608 &node_handle);
2609
2610 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
2611 return false;
2612
2613 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
2614 return true;
2615
2616 return false;
2617}
2618
2619/**
2620 * ice_is_phy_rclk_in_netlist
2621 * @hw: pointer to the hw struct
2622 *
2623 * Check if the PHY Recovered Clock device is present in the netlist
2624 */
2625bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
2626{
2627 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2628 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
2629 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2630 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
2631 return false;
2632
2633 return true;
2634}
2635
2636/**
2637 * ice_is_clock_mux_in_netlist
2638 * @hw: pointer to the hw struct
2639 *
2640 * Check if the Clock Multiplexer device is present in the netlist
2641 */
2642bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
2643{
2644 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
2645 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
2646 NULL))
2647 return false;
2648
2649 return true;
2650}
2651
2652/**
2653 * ice_is_cgu_in_netlist - check for CGU presence
2654 * @hw: pointer to the hw struct
2655 *
2656 * Check if the Clock Generation Unit (CGU) device is present in the netlist.
2657 * Save the CGU part number in the hw structure for later use.
2658 * Return:
2659 * * true - cgu is present
2660 * * false - cgu is not present
2661 */
2662bool ice_is_cgu_in_netlist(struct ice_hw *hw)
2663{
2664 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2665 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
2666 NULL)) {
2667 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
2668 return true;
2669 } else if (!ice_find_netlist_node(hw,
2670 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2671 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
2672 NULL)) {
2673 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
2674 return true;
2675 }
2676
2677 return false;
2678}
2679
2680/**
2681 * ice_is_gps_in_netlist
2682 * @hw: pointer to the hw struct
2683 *
2684 * Check if the GPS generic device is present in the netlist
2685 */
2686bool ice_is_gps_in_netlist(struct ice_hw *hw)
2687{
2688 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
2689 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
2690 return false;
2691
2692 return true;
2693}
2694
2695/**
2696 * ice_aq_list_caps - query function/device capabilities
2697 * @hw: pointer to the HW struct
2698 * @buf: a buffer to hold the capabilities
2699 * @buf_size: size of the buffer
2700 * @cap_count: if not NULL, set to the number of capabilities reported
2701 * @opc: capabilities type to discover, device or function
2702 * @cd: pointer to command details structure or NULL
2703 *
2704 * Get the function (0x000A) or device (0x000B) capabilities description from
2705 * firmware and store it in the buffer.
2706 *
2707 * If the cap_count pointer is not NULL, then it is set to the number of
2708 * capabilities firmware will report. Note that if the buffer size is too
2709 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2710 * cap_count will still be updated in this case. It is recommended that the
2711 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2712 * firmware could return) to avoid this.
2713 */
2714int
2715ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2716 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2717{
2718 struct ice_aqc_list_caps *cmd;
2719 struct ice_aq_desc desc;
2720 int status;
2721
2722 cmd = &desc.params.get_cap;
2723
2724 if (opc != ice_aqc_opc_list_func_caps &&
2725 opc != ice_aqc_opc_list_dev_caps)
2726 return -EINVAL;
2727
2728 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2729 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2730
2731 if (cap_count)
2732 *cap_count = le32_to_cpu(cmd->count);
2733
2734 return status;
2735}
2736
2737/**
2738 * ice_discover_dev_caps - Read and extract device capabilities
2739 * @hw: pointer to the hardware structure
2740 * @dev_caps: pointer to device capabilities structure
2741 *
2742 * Read the device capabilities and extract them into the dev_caps structure
2743 * for later use.
2744 */
2745int
2746ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2747{
2748 u32 cap_count = 0;
2749 void *cbuf;
2750 int status;
2751
2752 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2753 if (!cbuf)
2754 return -ENOMEM;
2755
2756 /* Although the driver doesn't know the number of capabilities the
2757 * device will return, we can simply send a 4KB buffer, the maximum
2758 * possible size that firmware can return.
2759 */
2760 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2761
2762 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2763 ice_aqc_opc_list_dev_caps, NULL);
2764 if (!status)
2765 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2766 kfree(cbuf);
2767
2768 return status;
2769}
2770
2771/**
2772 * ice_discover_func_caps - Read and extract function capabilities
2773 * @hw: pointer to the hardware structure
2774 * @func_caps: pointer to function capabilities structure
2775 *
2776 * Read the function capabilities and extract them into the func_caps structure
2777 * for later use.
2778 */
2779static int
2780ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2781{
2782 u32 cap_count = 0;
2783 void *cbuf;
2784 int status;
2785
2786 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2787 if (!cbuf)
2788 return -ENOMEM;
2789
2790 /* Although the driver doesn't know the number of capabilities the
2791 * device will return, we can simply send a 4KB buffer, the maximum
2792 * possible size that firmware can return.
2793 */
2794 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2795
2796 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2797 ice_aqc_opc_list_func_caps, NULL);
2798 if (!status)
2799 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2800 kfree(cbuf);
2801
2802 return status;
2803}
2804
2805/**
2806 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2807 * @hw: pointer to the hardware structure
2808 */
2809void ice_set_safe_mode_caps(struct ice_hw *hw)
2810{
2811 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2812 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2813 struct ice_hw_common_caps cached_caps;
2814 u32 num_funcs;
2815
2816 /* cache some func_caps values that should be restored after memset */
2817 cached_caps = func_caps->common_cap;
2818
2819 /* unset func capabilities */
2820 memset(func_caps, 0, sizeof(*func_caps));
2821
2822#define ICE_RESTORE_FUNC_CAP(name) \
2823 func_caps->common_cap.name = cached_caps.name
2824
2825 /* restore cached values */
2826 ICE_RESTORE_FUNC_CAP(valid_functions);
2827 ICE_RESTORE_FUNC_CAP(txq_first_id);
2828 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2829 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2830 ICE_RESTORE_FUNC_CAP(max_mtu);
2831 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2832 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2833 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2834 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2835
2836 /* one Tx and one Rx queue in safe mode */
2837 func_caps->common_cap.num_rxq = 1;
2838 func_caps->common_cap.num_txq = 1;
2839
2840 /* two MSIX vectors, one for traffic and one for misc causes */
2841 func_caps->common_cap.num_msix_vectors = 2;
2842 func_caps->guar_num_vsi = 1;
2843
2844 /* cache some dev_caps values that should be restored after memset */
2845 cached_caps = dev_caps->common_cap;
2846 num_funcs = dev_caps->num_funcs;
2847
2848 /* unset dev capabilities */
2849 memset(dev_caps, 0, sizeof(*dev_caps));
2850
2851#define ICE_RESTORE_DEV_CAP(name) \
2852 dev_caps->common_cap.name = cached_caps.name
2853
2854 /* restore cached values */
2855 ICE_RESTORE_DEV_CAP(valid_functions);
2856 ICE_RESTORE_DEV_CAP(txq_first_id);
2857 ICE_RESTORE_DEV_CAP(rxq_first_id);
2858 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2859 ICE_RESTORE_DEV_CAP(max_mtu);
2860 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2861 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2862 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2863 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2864 dev_caps->num_funcs = num_funcs;
2865
2866 /* one Tx and one Rx queue per function in safe mode */
2867 dev_caps->common_cap.num_rxq = num_funcs;
2868 dev_caps->common_cap.num_txq = num_funcs;
2869
2870 /* two MSIX vectors per function */
2871 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2872}
2873
2874/**
2875 * ice_get_caps - get info about the HW
2876 * @hw: pointer to the hardware structure
2877 */
2878int ice_get_caps(struct ice_hw *hw)
2879{
2880 int status;
2881
2882 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2883 if (status)
2884 return status;
2885
2886 return ice_discover_func_caps(hw, &hw->func_caps);
2887}
2888
2889/**
2890 * ice_aq_manage_mac_write - manage MAC address write command
2891 * @hw: pointer to the HW struct
2892 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2893 * @flags: flags to control write behavior
2894 * @cd: pointer to command details structure or NULL
2895 *
2896 * This function is used to write MAC address to the NVM (0x0108).
2897 */
2898int
2899ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2900 struct ice_sq_cd *cd)
2901{
2902 struct ice_aqc_manage_mac_write *cmd;
2903 struct ice_aq_desc desc;
2904
2905 cmd = &desc.params.mac_write;
2906 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2907
2908 cmd->flags = flags;
2909 ether_addr_copy(cmd->mac_addr, mac_addr);
2910
2911 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2912}
2913
2914/**
2915 * ice_aq_clear_pxe_mode
2916 * @hw: pointer to the HW struct
2917 *
2918 * Tell the firmware that the driver is taking over from PXE (0x0110).
2919 */
2920static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
2921{
2922 struct ice_aq_desc desc;
2923
2924 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2925 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2926
2927 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2928}
2929
2930/**
2931 * ice_clear_pxe_mode - clear pxe operations mode
2932 * @hw: pointer to the HW struct
2933 *
2934 * Make sure all PXE mode settings are cleared, including things
2935 * like descriptor fetch/write-back mode.
2936 */
2937void ice_clear_pxe_mode(struct ice_hw *hw)
2938{
2939 if (ice_check_sq_alive(hw, &hw->adminq))
2940 ice_aq_clear_pxe_mode(hw);
2941}
2942
2943/**
2944 * ice_aq_set_port_params - set physical port parameters.
2945 * @pi: pointer to the port info struct
2946 * @double_vlan: if set double VLAN is enabled
2947 * @cd: pointer to command details structure or NULL
2948 *
2949 * Set Physical port parameters (0x0203)
2950 */
2951int
2952ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
2953 struct ice_sq_cd *cd)
2954
2955{
2956 struct ice_aqc_set_port_params *cmd;
2957 struct ice_hw *hw = pi->hw;
2958 struct ice_aq_desc desc;
2959 u16 cmd_flags = 0;
2960
2961 cmd = &desc.params.set_port_params;
2962
2963 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2964 if (double_vlan)
2965 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2966 cmd->cmd_flags = cpu_to_le16(cmd_flags);
2967
2968 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2969}
2970
2971/**
2972 * ice_is_100m_speed_supported
2973 * @hw: pointer to the HW struct
2974 *
2975 * returns true if 100M speeds are supported by the device,
2976 * false otherwise.
2977 */
2978bool ice_is_100m_speed_supported(struct ice_hw *hw)
2979{
2980 switch (hw->device_id) {
2981 case ICE_DEV_ID_E822C_SGMII:
2982 case ICE_DEV_ID_E822L_SGMII:
2983 case ICE_DEV_ID_E823L_1GBE:
2984 case ICE_DEV_ID_E823C_SGMII:
2985 return true;
2986 default:
2987 return false;
2988 }
2989}
2990
2991/**
2992 * ice_get_link_speed_based_on_phy_type - returns link speed
2993 * @phy_type_low: lower part of phy_type
2994 * @phy_type_high: higher part of phy_type
2995 *
2996 * This helper function will convert an entry in PHY type structure
2997 * [phy_type_low, phy_type_high] to its corresponding link speed.
2998 * Note: In the structure of [phy_type_low, phy_type_high], there should
2999 * be one bit set, as this function will convert one PHY type to its
3000 * speed.
3001 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3002 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3003 */
3004static u16
3005ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3006{
3007 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3008 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3009
3010 switch (phy_type_low) {
3011 case ICE_PHY_TYPE_LOW_100BASE_TX:
3012 case ICE_PHY_TYPE_LOW_100M_SGMII:
3013 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3014 break;
3015 case ICE_PHY_TYPE_LOW_1000BASE_T:
3016 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3017 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3018 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3019 case ICE_PHY_TYPE_LOW_1G_SGMII:
3020 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3021 break;
3022 case ICE_PHY_TYPE_LOW_2500BASE_T:
3023 case ICE_PHY_TYPE_LOW_2500BASE_X:
3024 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3025 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3026 break;
3027 case ICE_PHY_TYPE_LOW_5GBASE_T:
3028 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3029 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3030 break;
3031 case ICE_PHY_TYPE_LOW_10GBASE_T:
3032 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3033 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3034 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3035 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3036 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3037 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3038 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3039 break;
3040 case ICE_PHY_TYPE_LOW_25GBASE_T:
3041 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3042 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3043 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3044 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3045 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3046 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3047 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3048 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3049 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3050 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3051 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3052 break;
3053 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3054 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3055 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3056 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3057 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3058 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3059 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3060 break;
3061 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3062 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3063 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3064 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3065 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3066 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3067 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3068 case ICE_PHY_TYPE_LOW_50G_AUI2:
3069 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3070 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3071 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3072 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3073 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3074 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3075 case ICE_PHY_TYPE_LOW_50G_AUI1:
3076 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3077 break;
3078 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3079 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3080 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3081 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3082 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3083 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3084 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3085 case ICE_PHY_TYPE_LOW_100G_AUI4:
3086 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3087 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3088 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3089 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3090 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3091 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3092 break;
3093 default:
3094 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3095 break;
3096 }
3097
3098 switch (phy_type_high) {
3099 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3100 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3101 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3102 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3103 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3104 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3105 break;
3106 default:
3107 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3108 break;
3109 }
3110
3111 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3112 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3113 return ICE_AQ_LINK_SPEED_UNKNOWN;
3114 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3115 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3116 return ICE_AQ_LINK_SPEED_UNKNOWN;
3117 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3118 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3119 return speed_phy_type_low;
3120 else
3121 return speed_phy_type_high;
3122}
3123
3124/**
3125 * ice_update_phy_type
3126 * @phy_type_low: pointer to the lower part of phy_type
3127 * @phy_type_high: pointer to the higher part of phy_type
3128 * @link_speeds_bitmap: targeted link speeds bitmap
3129 *
3130 * Note: For the link_speeds_bitmap structure, you can check it at
3131 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3132 * link_speeds_bitmap include multiple speeds.
3133 *
3134 * Each entry in this [phy_type_low, phy_type_high] structure will
3135 * present a certain link speed. This helper function will turn on bits
3136 * in [phy_type_low, phy_type_high] structure based on the value of
3137 * link_speeds_bitmap input parameter.
3138 */
3139void
3140ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3141 u16 link_speeds_bitmap)
3142{
3143 u64 pt_high;
3144 u64 pt_low;
3145 int index;
3146 u16 speed;
3147
3148 /* We first check with low part of phy_type */
3149 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3150 pt_low = BIT_ULL(index);
3151 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3152
3153 if (link_speeds_bitmap & speed)
3154 *phy_type_low |= BIT_ULL(index);
3155 }
3156
3157 /* We then check with high part of phy_type */
3158 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3159 pt_high = BIT_ULL(index);
3160 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3161
3162 if (link_speeds_bitmap & speed)
3163 *phy_type_high |= BIT_ULL(index);
3164 }
3165}
3166
3167/**
3168 * ice_aq_set_phy_cfg
3169 * @hw: pointer to the HW struct
3170 * @pi: port info structure of the interested logical port
3171 * @cfg: structure with PHY configuration data to be set
3172 * @cd: pointer to command details structure or NULL
3173 *
3174 * Set the various PHY configuration parameters supported on the Port.
3175 * One or more of the Set PHY config parameters may be ignored in an MFP
3176 * mode as the PF may not have the privilege to set some of the PHY Config
3177 * parameters. This status will be indicated by the command response (0x0601).
3178 */
3179int
3180ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3181 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3182{
3183 struct ice_aq_desc desc;
3184 int status;
3185
3186 if (!cfg)
3187 return -EINVAL;
3188
3189 /* Ensure that only valid bits of cfg->caps can be turned on. */
3190 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3191 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3192 cfg->caps);
3193
3194 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3195 }
3196
3197 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3198 desc.params.set_phy.lport_num = pi->lport;
3199 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3200
3201 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3202 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3203 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
3204 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3205 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
3206 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3207 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3208 cfg->low_power_ctrl_an);
3209 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3210 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3211 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3212 cfg->link_fec_opt);
3213
3214 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3215 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3216 status = 0;
3217
3218 if (!status)
3219 pi->phy.curr_user_phy_cfg = *cfg;
3220
3221 return status;
3222}
3223
3224/**
3225 * ice_update_link_info - update status of the HW network link
3226 * @pi: port info structure of the interested logical port
3227 */
3228int ice_update_link_info(struct ice_port_info *pi)
3229{
3230 struct ice_link_status *li;
3231 int status;
3232
3233 if (!pi)
3234 return -EINVAL;
3235
3236 li = &pi->phy.link_info;
3237
3238 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3239 if (status)
3240 return status;
3241
3242 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3243 struct ice_aqc_get_phy_caps_data *pcaps;
3244 struct ice_hw *hw;
3245
3246 hw = pi->hw;
3247 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
3248 GFP_KERNEL);
3249 if (!pcaps)
3250 return -ENOMEM;
3251
3252 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3253 pcaps, NULL);
3254
3255 devm_kfree(ice_hw_to_dev(hw), pcaps);
3256 }
3257
3258 return status;
3259}
3260
3261/**
3262 * ice_cache_phy_user_req
3263 * @pi: port information structure
3264 * @cache_data: PHY logging data
3265 * @cache_mode: PHY logging mode
3266 *
3267 * Log the user request on (FC, FEC, SPEED) for later use.
3268 */
3269static void
3270ice_cache_phy_user_req(struct ice_port_info *pi,
3271 struct ice_phy_cache_mode_data cache_data,
3272 enum ice_phy_cache_mode cache_mode)
3273{
3274 if (!pi)
3275 return;
3276
3277 switch (cache_mode) {
3278 case ICE_FC_MODE:
3279 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3280 break;
3281 case ICE_SPEED_MODE:
3282 pi->phy.curr_user_speed_req =
3283 cache_data.data.curr_user_speed_req;
3284 break;
3285 case ICE_FEC_MODE:
3286 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3287 break;
3288 default:
3289 break;
3290 }
3291}
3292
3293/**
3294 * ice_caps_to_fc_mode
3295 * @caps: PHY capabilities
3296 *
3297 * Convert PHY FC capabilities to ice FC mode
3298 */
3299enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3300{
3301 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3302 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3303 return ICE_FC_FULL;
3304
3305 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3306 return ICE_FC_TX_PAUSE;
3307
3308 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3309 return ICE_FC_RX_PAUSE;
3310
3311 return ICE_FC_NONE;
3312}
3313
3314/**
3315 * ice_caps_to_fec_mode
3316 * @caps: PHY capabilities
3317 * @fec_options: Link FEC options
3318 *
3319 * Convert PHY FEC capabilities to ice FEC mode
3320 */
3321enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3322{
3323 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3324 return ICE_FEC_AUTO;
3325
3326 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3327 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3328 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3329 ICE_AQC_PHY_FEC_25G_KR_REQ))
3330 return ICE_FEC_BASER;
3331
3332 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3333 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3334 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3335 return ICE_FEC_RS;
3336
3337 return ICE_FEC_NONE;
3338}
3339
3340/**
3341 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3342 * @pi: port information structure
3343 * @cfg: PHY configuration data to set FC mode
3344 * @req_mode: FC mode to configure
3345 */
3346int
3347ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3348 enum ice_fc_mode req_mode)
3349{
3350 struct ice_phy_cache_mode_data cache_data;
3351 u8 pause_mask = 0x0;
3352
3353 if (!pi || !cfg)
3354 return -EINVAL;
3355
3356 switch (req_mode) {
3357 case ICE_FC_FULL:
3358 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3359 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3360 break;
3361 case ICE_FC_RX_PAUSE:
3362 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3363 break;
3364 case ICE_FC_TX_PAUSE:
3365 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3366 break;
3367 default:
3368 break;
3369 }
3370
3371 /* clear the old pause settings */
3372 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3373 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3374
3375 /* set the new capabilities */
3376 cfg->caps |= pause_mask;
3377
3378 /* Cache user FC request */
3379 cache_data.data.curr_user_fc_req = req_mode;
3380 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3381
3382 return 0;
3383}
3384
3385/**
3386 * ice_set_fc
3387 * @pi: port information structure
3388 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3389 * @ena_auto_link_update: enable automatic link update
3390 *
3391 * Set the requested flow control mode.
3392 */
3393int
3394ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3395{
3396 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3397 struct ice_aqc_get_phy_caps_data *pcaps;
3398 struct ice_hw *hw;
3399 int status;
3400
3401 if (!pi || !aq_failures)
3402 return -EINVAL;
3403
3404 *aq_failures = 0;
3405 hw = pi->hw;
3406
3407 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3408 if (!pcaps)
3409 return -ENOMEM;
3410
3411 /* Get the current PHY config */
3412 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3413 pcaps, NULL);
3414 if (status) {
3415 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3416 goto out;
3417 }
3418
3419 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3420
3421 /* Configure the set PHY data */
3422 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3423 if (status)
3424 goto out;
3425
3426 /* If the capabilities have changed, then set the new config */
3427 if (cfg.caps != pcaps->caps) {
3428 int retry_count, retry_max = 10;
3429
3430 /* Auto restart link so settings take effect */
3431 if (ena_auto_link_update)
3432 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3433
3434 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3435 if (status) {
3436 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3437 goto out;
3438 }
3439
3440 /* Update the link info
3441 * It sometimes takes a really long time for link to
3442 * come back from the atomic reset. Thus, we wait a
3443 * little bit.
3444 */
3445 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3446 status = ice_update_link_info(pi);
3447
3448 if (!status)
3449 break;
3450
3451 mdelay(100);
3452 }
3453
3454 if (status)
3455 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3456 }
3457
3458out:
3459 devm_kfree(ice_hw_to_dev(hw), pcaps);
3460 return status;
3461}
3462
3463/**
3464 * ice_phy_caps_equals_cfg
3465 * @phy_caps: PHY capabilities
3466 * @phy_cfg: PHY configuration
3467 *
3468 * Helper function to determine if PHY capabilities matches PHY
3469 * configuration
3470 */
3471bool
3472ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3473 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3474{
3475 u8 caps_mask, cfg_mask;
3476
3477 if (!phy_caps || !phy_cfg)
3478 return false;
3479
3480 /* These bits are not common between capabilities and configuration.
3481 * Do not use them to determine equality.
3482 */
3483 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3484 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3485 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3486
3487 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3488 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3489 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3490 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3491 phy_caps->eee_cap != phy_cfg->eee_cap ||
3492 phy_caps->eeer_value != phy_cfg->eeer_value ||
3493 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3494 return false;
3495
3496 return true;
3497}
3498
3499/**
3500 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3501 * @pi: port information structure
3502 * @caps: PHY ability structure to copy date from
3503 * @cfg: PHY configuration structure to copy data to
3504 *
3505 * Helper function to copy AQC PHY get ability data to PHY set configuration
3506 * data structure
3507 */
3508void
3509ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3510 struct ice_aqc_get_phy_caps_data *caps,
3511 struct ice_aqc_set_phy_cfg_data *cfg)
3512{
3513 if (!pi || !caps || !cfg)
3514 return;
3515
3516 memset(cfg, 0, sizeof(*cfg));
3517 cfg->phy_type_low = caps->phy_type_low;
3518 cfg->phy_type_high = caps->phy_type_high;
3519 cfg->caps = caps->caps;
3520 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3521 cfg->eee_cap = caps->eee_cap;
3522 cfg->eeer_value = caps->eeer_value;
3523 cfg->link_fec_opt = caps->link_fec_options;
3524 cfg->module_compliance_enforcement =
3525 caps->module_compliance_enforcement;
3526}
3527
3528/**
3529 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3530 * @pi: port information structure
3531 * @cfg: PHY configuration data to set FEC mode
3532 * @fec: FEC mode to configure
3533 */
3534int
3535ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3536 enum ice_fec_mode fec)
3537{
3538 struct ice_aqc_get_phy_caps_data *pcaps;
3539 struct ice_hw *hw;
3540 int status;
3541
3542 if (!pi || !cfg)
3543 return -EINVAL;
3544
3545 hw = pi->hw;
3546
3547 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3548 if (!pcaps)
3549 return -ENOMEM;
3550
3551 status = ice_aq_get_phy_caps(pi, false,
3552 (ice_fw_supports_report_dflt_cfg(hw) ?
3553 ICE_AQC_REPORT_DFLT_CFG :
3554 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3555 if (status)
3556 goto out;
3557
3558 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3559 cfg->link_fec_opt = pcaps->link_fec_options;
3560
3561 switch (fec) {
3562 case ICE_FEC_BASER:
3563 /* Clear RS bits, and AND BASE-R ability
3564 * bits and OR request bits.
3565 */
3566 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3567 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3568 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3569 ICE_AQC_PHY_FEC_25G_KR_REQ;
3570 break;
3571 case ICE_FEC_RS:
3572 /* Clear BASE-R bits, and AND RS ability
3573 * bits and OR request bits.
3574 */
3575 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3576 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3577 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3578 break;
3579 case ICE_FEC_NONE:
3580 /* Clear all FEC option bits. */
3581 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3582 break;
3583 case ICE_FEC_AUTO:
3584 /* AND auto FEC bit, and all caps bits. */
3585 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3586 cfg->link_fec_opt |= pcaps->link_fec_options;
3587 break;
3588 default:
3589 status = -EINVAL;
3590 break;
3591 }
3592
3593 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3594 !ice_fw_supports_report_dflt_cfg(hw)) {
3595 struct ice_link_default_override_tlv tlv = { 0 };
3596
3597 status = ice_get_link_default_override(&tlv, pi);
3598 if (status)
3599 goto out;
3600
3601 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3602 (tlv.options & ICE_LINK_OVERRIDE_EN))
3603 cfg->link_fec_opt = tlv.fec_options;
3604 }
3605
3606out:
3607 kfree(pcaps);
3608
3609 return status;
3610}
3611
3612/**
3613 * ice_get_link_status - get status of the HW network link
3614 * @pi: port information structure
3615 * @link_up: pointer to bool (true/false = linkup/linkdown)
3616 *
3617 * Variable link_up is true if link is up, false if link is down.
3618 * The variable link_up is invalid if status is non zero. As a
3619 * result of this call, link status reporting becomes enabled
3620 */
3621int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3622{
3623 struct ice_phy_info *phy_info;
3624 int status = 0;
3625
3626 if (!pi || !link_up)
3627 return -EINVAL;
3628
3629 phy_info = &pi->phy;
3630
3631 if (phy_info->get_link_info) {
3632 status = ice_update_link_info(pi);
3633
3634 if (status)
3635 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3636 status);
3637 }
3638
3639 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3640
3641 return status;
3642}
3643
3644/**
3645 * ice_aq_set_link_restart_an
3646 * @pi: pointer to the port information structure
3647 * @ena_link: if true: enable link, if false: disable link
3648 * @cd: pointer to command details structure or NULL
3649 *
3650 * Sets up the link and restarts the Auto-Negotiation over the link.
3651 */
3652int
3653ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3654 struct ice_sq_cd *cd)
3655{
3656 struct ice_aqc_restart_an *cmd;
3657 struct ice_aq_desc desc;
3658
3659 cmd = &desc.params.restart_an;
3660
3661 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3662
3663 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3664 cmd->lport_num = pi->lport;
3665 if (ena_link)
3666 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3667 else
3668 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3669
3670 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3671}
3672
3673/**
3674 * ice_aq_set_event_mask
3675 * @hw: pointer to the HW struct
3676 * @port_num: port number of the physical function
3677 * @mask: event mask to be set
3678 * @cd: pointer to command details structure or NULL
3679 *
3680 * Set event mask (0x0613)
3681 */
3682int
3683ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3684 struct ice_sq_cd *cd)
3685{
3686 struct ice_aqc_set_event_mask *cmd;
3687 struct ice_aq_desc desc;
3688
3689 cmd = &desc.params.set_event_mask;
3690
3691 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3692
3693 cmd->lport_num = port_num;
3694
3695 cmd->event_mask = cpu_to_le16(mask);
3696 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3697}
3698
3699/**
3700 * ice_aq_set_mac_loopback
3701 * @hw: pointer to the HW struct
3702 * @ena_lpbk: Enable or Disable loopback
3703 * @cd: pointer to command details structure or NULL
3704 *
3705 * Enable/disable loopback on a given port
3706 */
3707int
3708ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3709{
3710 struct ice_aqc_set_mac_lb *cmd;
3711 struct ice_aq_desc desc;
3712
3713 cmd = &desc.params.set_mac_lb;
3714
3715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3716 if (ena_lpbk)
3717 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3718
3719 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3720}
3721
3722/**
3723 * ice_aq_set_port_id_led
3724 * @pi: pointer to the port information
3725 * @is_orig_mode: is this LED set to original mode (by the net-list)
3726 * @cd: pointer to command details structure or NULL
3727 *
3728 * Set LED value for the given port (0x06e9)
3729 */
3730int
3731ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3732 struct ice_sq_cd *cd)
3733{
3734 struct ice_aqc_set_port_id_led *cmd;
3735 struct ice_hw *hw = pi->hw;
3736 struct ice_aq_desc desc;
3737
3738 cmd = &desc.params.set_port_id_led;
3739
3740 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3741
3742 if (is_orig_mode)
3743 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3744 else
3745 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3746
3747 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3748}
3749
3750/**
3751 * ice_aq_get_port_options
3752 * @hw: pointer to the HW struct
3753 * @options: buffer for the resultant port options
3754 * @option_count: input - size of the buffer in port options structures,
3755 * output - number of returned port options
3756 * @lport: logical port to call the command with (optional)
3757 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
3758 * when PF owns more than 1 port it must be true
3759 * @active_option_idx: index of active port option in returned buffer
3760 * @active_option_valid: active option in returned buffer is valid
3761 * @pending_option_idx: index of pending port option in returned buffer
3762 * @pending_option_valid: pending option in returned buffer is valid
3763 *
3764 * Calls Get Port Options AQC (0x06ea) and verifies result.
3765 */
3766int
3767ice_aq_get_port_options(struct ice_hw *hw,
3768 struct ice_aqc_get_port_options_elem *options,
3769 u8 *option_count, u8 lport, bool lport_valid,
3770 u8 *active_option_idx, bool *active_option_valid,
3771 u8 *pending_option_idx, bool *pending_option_valid)
3772{
3773 struct ice_aqc_get_port_options *cmd;
3774 struct ice_aq_desc desc;
3775 int status;
3776 u8 i;
3777
3778 /* options buffer shall be able to hold max returned options */
3779 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
3780 return -EINVAL;
3781
3782 cmd = &desc.params.get_port_options;
3783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
3784
3785 if (lport_valid)
3786 cmd->lport_num = lport;
3787 cmd->lport_num_valid = lport_valid;
3788
3789 status = ice_aq_send_cmd(hw, &desc, options,
3790 *option_count * sizeof(*options), NULL);
3791 if (status)
3792 return status;
3793
3794 /* verify direct FW response & set output parameters */
3795 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
3796 cmd->port_options_count);
3797 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
3798 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
3799 cmd->port_options);
3800 if (*active_option_valid) {
3801 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
3802 cmd->port_options);
3803 if (*active_option_idx > (*option_count - 1))
3804 return -EIO;
3805 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
3806 *active_option_idx);
3807 }
3808
3809 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
3810 cmd->pending_port_option_status);
3811 if (*pending_option_valid) {
3812 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
3813 cmd->pending_port_option_status);
3814 if (*pending_option_idx > (*option_count - 1))
3815 return -EIO;
3816 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
3817 *pending_option_idx);
3818 }
3819
3820 /* mask output options fields */
3821 for (i = 0; i < *option_count; i++) {
3822 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
3823 options[i].pmd);
3824 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
3825 options[i].max_lane_speed);
3826 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
3827 options[i].pmd, options[i].max_lane_speed);
3828 }
3829
3830 return 0;
3831}
3832
3833/**
3834 * ice_aq_set_port_option
3835 * @hw: pointer to the HW struct
3836 * @lport: logical port to call the command with
3837 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
3838 * when PF owns more than 1 port it must be true
3839 * @new_option: new port option to be written
3840 *
3841 * Calls Set Port Options AQC (0x06eb).
3842 */
3843int
3844ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
3845 u8 new_option)
3846{
3847 struct ice_aqc_set_port_option *cmd;
3848 struct ice_aq_desc desc;
3849
3850 if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
3851 return -EINVAL;
3852
3853 cmd = &desc.params.set_port_option;
3854 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
3855
3856 if (lport_valid)
3857 cmd->lport_num = lport;
3858
3859 cmd->lport_num_valid = lport_valid;
3860 cmd->selected_port_option = new_option;
3861
3862 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3863}
3864
3865/**
3866 * ice_aq_sff_eeprom
3867 * @hw: pointer to the HW struct
3868 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3869 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3870 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3871 * @page: QSFP page
3872 * @set_page: set or ignore the page
3873 * @data: pointer to data buffer to be read/written to the I2C device.
3874 * @length: 1-16 for read, 1 for write.
3875 * @write: 0 read, 1 for write.
3876 * @cd: pointer to command details structure or NULL
3877 *
3878 * Read/Write SFF EEPROM (0x06EE)
3879 */
3880int
3881ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3882 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3883 bool write, struct ice_sq_cd *cd)
3884{
3885 struct ice_aqc_sff_eeprom *cmd;
3886 struct ice_aq_desc desc;
3887 u16 i2c_bus_addr;
3888 int status;
3889
3890 if (!data || (mem_addr & 0xff00))
3891 return -EINVAL;
3892
3893 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3894 cmd = &desc.params.read_write_sff_param;
3895 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3896 cmd->lport_num = (u8)(lport & 0xff);
3897 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3898 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
3899 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page);
3900 if (write)
3901 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE;
3902 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr);
3903 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3904 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M);
3905
3906 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3907 return status;
3908}
3909
3910static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type)
3911{
3912 switch (type) {
3913 case ICE_LUT_VSI:
3914 return ICE_LUT_VSI_SIZE;
3915 case ICE_LUT_GLOBAL:
3916 return ICE_LUT_GLOBAL_SIZE;
3917 case ICE_LUT_PF:
3918 return ICE_LUT_PF_SIZE;
3919 }
3920 WARN_ONCE(1, "incorrect type passed");
3921 return ICE_LUT_VSI_SIZE;
3922}
3923
3924static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size)
3925{
3926 switch (size) {
3927 case ICE_LUT_VSI_SIZE:
3928 return ICE_AQC_LUT_SIZE_SMALL;
3929 case ICE_LUT_GLOBAL_SIZE:
3930 return ICE_AQC_LUT_SIZE_512;
3931 case ICE_LUT_PF_SIZE:
3932 return ICE_AQC_LUT_SIZE_2K;
3933 }
3934 WARN_ONCE(1, "incorrect size passed");
3935 return 0;
3936}
3937
3938/**
3939 * __ice_aq_get_set_rss_lut
3940 * @hw: pointer to the hardware structure
3941 * @params: RSS LUT parameters
3942 * @set: set true to set the table, false to get the table
3943 *
3944 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3945 */
3946static int
3947__ice_aq_get_set_rss_lut(struct ice_hw *hw,
3948 struct ice_aq_get_set_rss_lut_params *params, bool set)
3949{
3950 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0;
3951 enum ice_lut_type lut_type = params->lut_type;
3952 struct ice_aqc_get_set_rss_lut *desc_params;
3953 enum ice_aqc_lut_flags flags;
3954 enum ice_lut_size lut_size;
3955 struct ice_aq_desc desc;
3956 u8 *lut = params->lut;
3957
3958
3959 if (!lut || !ice_is_vsi_valid(hw, vsi_handle))
3960 return -EINVAL;
3961
3962 lut_size = ice_lut_type_to_size(lut_type);
3963 if (lut_size > params->lut_size)
3964 return -EINVAL;
3965 else if (set && lut_size != params->lut_size)
3966 return -EINVAL;
3967
3968 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
3969 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
3970 if (set)
3971 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3972
3973 desc_params = &desc.params.get_set_rss_lut;
3974 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3975 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
3976
3977 if (lut_type == ICE_LUT_GLOBAL)
3978 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX,
3979 params->global_lut_id);
3980
3981 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size);
3982 desc_params->flags = cpu_to_le16(flags);
3983
3984 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3985}
3986
3987/**
3988 * ice_aq_get_rss_lut
3989 * @hw: pointer to the hardware structure
3990 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3991 *
3992 * get the RSS lookup table, PF or VSI type
3993 */
3994int
3995ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3996{
3997 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3998}
3999
4000/**
4001 * ice_aq_set_rss_lut
4002 * @hw: pointer to the hardware structure
4003 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4004 *
4005 * set the RSS lookup table, PF or VSI type
4006 */
4007int
4008ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4009{
4010 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4011}
4012
4013/**
4014 * __ice_aq_get_set_rss_key
4015 * @hw: pointer to the HW struct
4016 * @vsi_id: VSI FW index
4017 * @key: pointer to key info struct
4018 * @set: set true to set the key, false to get the key
4019 *
4020 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4021 */
4022static int
4023__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4024 struct ice_aqc_get_set_rss_keys *key, bool set)
4025{
4026 struct ice_aqc_get_set_rss_key *desc_params;
4027 u16 key_size = sizeof(*key);
4028 struct ice_aq_desc desc;
4029
4030 if (set) {
4031 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4032 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4033 } else {
4034 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4035 }
4036
4037 desc_params = &desc.params.get_set_rss_key;
4038 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
4039
4040 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4041}
4042
4043/**
4044 * ice_aq_get_rss_key
4045 * @hw: pointer to the HW struct
4046 * @vsi_handle: software VSI handle
4047 * @key: pointer to key info struct
4048 *
4049 * get the RSS key per VSI
4050 */
4051int
4052ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4053 struct ice_aqc_get_set_rss_keys *key)
4054{
4055 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4056 return -EINVAL;
4057
4058 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4059 key, false);
4060}
4061
4062/**
4063 * ice_aq_set_rss_key
4064 * @hw: pointer to the HW struct
4065 * @vsi_handle: software VSI handle
4066 * @keys: pointer to key info struct
4067 *
4068 * set the RSS key per VSI
4069 */
4070int
4071ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4072 struct ice_aqc_get_set_rss_keys *keys)
4073{
4074 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4075 return -EINVAL;
4076
4077 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4078 keys, true);
4079}
4080
4081/**
4082 * ice_aq_add_lan_txq
4083 * @hw: pointer to the hardware structure
4084 * @num_qgrps: Number of added queue groups
4085 * @qg_list: list of queue groups to be added
4086 * @buf_size: size of buffer for indirect command
4087 * @cd: pointer to command details structure or NULL
4088 *
4089 * Add Tx LAN queue (0x0C30)
4090 *
4091 * NOTE:
4092 * Prior to calling add Tx LAN queue:
4093 * Initialize the following as part of the Tx queue context:
4094 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4095 * Cache profile and Packet shaper profile.
4096 *
4097 * After add Tx LAN queue AQ command is completed:
4098 * Interrupts should be associated with specific queues,
4099 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4100 * flow.
4101 */
4102static int
4103ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4104 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4105 struct ice_sq_cd *cd)
4106{
4107 struct ice_aqc_add_tx_qgrp *list;
4108 struct ice_aqc_add_txqs *cmd;
4109 struct ice_aq_desc desc;
4110 u16 i, sum_size = 0;
4111
4112 cmd = &desc.params.add_txqs;
4113
4114 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4115
4116 if (!qg_list)
4117 return -EINVAL;
4118
4119 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4120 return -EINVAL;
4121
4122 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4123 sum_size += struct_size(list, txqs, list->num_txqs);
4124 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4125 list->num_txqs);
4126 }
4127
4128 if (buf_size != sum_size)
4129 return -EINVAL;
4130
4131 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4132
4133 cmd->num_qgrps = num_qgrps;
4134
4135 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4136}
4137
4138/**
4139 * ice_aq_dis_lan_txq
4140 * @hw: pointer to the hardware structure
4141 * @num_qgrps: number of groups in the list
4142 * @qg_list: the list of groups to disable
4143 * @buf_size: the total size of the qg_list buffer in bytes
4144 * @rst_src: if called due to reset, specifies the reset source
4145 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4146 * @cd: pointer to command details structure or NULL
4147 *
4148 * Disable LAN Tx queue (0x0C31)
4149 */
4150static int
4151ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4152 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4153 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4154 struct ice_sq_cd *cd)
4155{
4156 struct ice_aqc_dis_txq_item *item;
4157 struct ice_aqc_dis_txqs *cmd;
4158 struct ice_aq_desc desc;
4159 u16 vmvf_and_timeout;
4160 u16 i, sz = 0;
4161 int status;
4162
4163 cmd = &desc.params.dis_txqs;
4164 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4165
4166 /* qg_list can be NULL only in VM/VF reset flow */
4167 if (!qg_list && !rst_src)
4168 return -EINVAL;
4169
4170 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4171 return -EINVAL;
4172
4173 cmd->num_entries = num_qgrps;
4174
4175 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5);
4176
4177 switch (rst_src) {
4178 case ICE_VM_RESET:
4179 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4180 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M;
4181 break;
4182 case ICE_VF_RESET:
4183 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4184 /* In this case, FW expects vmvf_num to be absolute VF ID */
4185 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) &
4186 ICE_AQC_Q_DIS_VMVF_NUM_M;
4187 break;
4188 case ICE_NO_RESET:
4189 default:
4190 break;
4191 }
4192
4193 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout);
4194
4195 /* flush pipe on time out */
4196 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4197 /* If no queue group info, we are in a reset flow. Issue the AQ */
4198 if (!qg_list)
4199 goto do_aq;
4200
4201 /* set RD bit to indicate that command buffer is provided by the driver
4202 * and it needs to be read by the firmware
4203 */
4204 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4205
4206 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4207 u16 item_size = struct_size(item, q_id, item->num_qs);
4208
4209 /* If the num of queues is even, add 2 bytes of padding */
4210 if ((item->num_qs % 2) == 0)
4211 item_size += 2;
4212
4213 sz += item_size;
4214
4215 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4216 }
4217
4218 if (buf_size != sz)
4219 return -EINVAL;
4220
4221do_aq:
4222 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4223 if (status) {
4224 if (!qg_list)
4225 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4226 vmvf_num, hw->adminq.sq_last_status);
4227 else
4228 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4229 le16_to_cpu(qg_list[0].q_id[0]),
4230 hw->adminq.sq_last_status);
4231 }
4232 return status;
4233}
4234
4235/**
4236 * ice_aq_cfg_lan_txq
4237 * @hw: pointer to the hardware structure
4238 * @buf: buffer for command
4239 * @buf_size: size of buffer in bytes
4240 * @num_qs: number of queues being configured
4241 * @oldport: origination lport
4242 * @newport: destination lport
4243 * @cd: pointer to command details structure or NULL
4244 *
4245 * Move/Configure LAN Tx queue (0x0C32)
4246 *
4247 * There is a better AQ command to use for moving nodes, so only coding
4248 * this one for configuring the node.
4249 */
4250int
4251ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
4252 u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
4253 struct ice_sq_cd *cd)
4254{
4255 struct ice_aqc_cfg_txqs *cmd;
4256 struct ice_aq_desc desc;
4257 int status;
4258
4259 cmd = &desc.params.cfg_txqs;
4260 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
4261 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4262
4263 if (!buf)
4264 return -EINVAL;
4265
4266 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
4267 cmd->num_qs = num_qs;
4268 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
4269 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
4270 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
4271 cmd->blocked_cgds = 0;
4272
4273 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4274 if (status)
4275 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n",
4276 hw->adminq.sq_last_status);
4277 return status;
4278}
4279
4280/**
4281 * ice_aq_add_rdma_qsets
4282 * @hw: pointer to the hardware structure
4283 * @num_qset_grps: Number of RDMA Qset groups
4284 * @qset_list: list of Qset groups to be added
4285 * @buf_size: size of buffer for indirect command
4286 * @cd: pointer to command details structure or NULL
4287 *
4288 * Add Tx RDMA Qsets (0x0C33)
4289 */
4290static int
4291ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4292 struct ice_aqc_add_rdma_qset_data *qset_list,
4293 u16 buf_size, struct ice_sq_cd *cd)
4294{
4295 struct ice_aqc_add_rdma_qset_data *list;
4296 struct ice_aqc_add_rdma_qset *cmd;
4297 struct ice_aq_desc desc;
4298 u16 i, sum_size = 0;
4299
4300 cmd = &desc.params.add_rdma_qset;
4301
4302 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4303
4304 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4305 return -EINVAL;
4306
4307 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4308 u16 num_qsets = le16_to_cpu(list->num_qsets);
4309
4310 sum_size += struct_size(list, rdma_qsets, num_qsets);
4311 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4312 num_qsets);
4313 }
4314
4315 if (buf_size != sum_size)
4316 return -EINVAL;
4317
4318 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4319
4320 cmd->num_qset_grps = num_qset_grps;
4321
4322 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4323}
4324
4325/* End of FW Admin Queue command wrappers */
4326
4327/**
4328 * ice_write_byte - write a byte to a packed context structure
4329 * @src_ctx: the context structure to read from
4330 * @dest_ctx: the context to be written to
4331 * @ce_info: a description of the struct to be filled
4332 */
4333static void
4334ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4335{
4336 u8 src_byte, dest_byte, mask;
4337 u8 *from, *dest;
4338 u16 shift_width;
4339
4340 /* copy from the next struct field */
4341 from = src_ctx + ce_info->offset;
4342
4343 /* prepare the bits and mask */
4344 shift_width = ce_info->lsb % 8;
4345 mask = (u8)(BIT(ce_info->width) - 1);
4346
4347 src_byte = *from;
4348 src_byte &= mask;
4349
4350 /* shift to correct alignment */
4351 mask <<= shift_width;
4352 src_byte <<= shift_width;
4353
4354 /* get the current bits from the target bit string */
4355 dest = dest_ctx + (ce_info->lsb / 8);
4356
4357 memcpy(&dest_byte, dest, sizeof(dest_byte));
4358
4359 dest_byte &= ~mask; /* get the bits not changing */
4360 dest_byte |= src_byte; /* add in the new bits */
4361
4362 /* put it all back */
4363 memcpy(dest, &dest_byte, sizeof(dest_byte));
4364}
4365
4366/**
4367 * ice_write_word - write a word to a packed context structure
4368 * @src_ctx: the context structure to read from
4369 * @dest_ctx: the context to be written to
4370 * @ce_info: a description of the struct to be filled
4371 */
4372static void
4373ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4374{
4375 u16 src_word, mask;
4376 __le16 dest_word;
4377 u8 *from, *dest;
4378 u16 shift_width;
4379
4380 /* copy from the next struct field */
4381 from = src_ctx + ce_info->offset;
4382
4383 /* prepare the bits and mask */
4384 shift_width = ce_info->lsb % 8;
4385 mask = BIT(ce_info->width) - 1;
4386
4387 /* don't swizzle the bits until after the mask because the mask bits
4388 * will be in a different bit position on big endian machines
4389 */
4390 src_word = *(u16 *)from;
4391 src_word &= mask;
4392
4393 /* shift to correct alignment */
4394 mask <<= shift_width;
4395 src_word <<= shift_width;
4396
4397 /* get the current bits from the target bit string */
4398 dest = dest_ctx + (ce_info->lsb / 8);
4399
4400 memcpy(&dest_word, dest, sizeof(dest_word));
4401
4402 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
4403 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
4404
4405 /* put it all back */
4406 memcpy(dest, &dest_word, sizeof(dest_word));
4407}
4408
4409/**
4410 * ice_write_dword - write a dword to a packed context structure
4411 * @src_ctx: the context structure to read from
4412 * @dest_ctx: the context to be written to
4413 * @ce_info: a description of the struct to be filled
4414 */
4415static void
4416ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4417{
4418 u32 src_dword, mask;
4419 __le32 dest_dword;
4420 u8 *from, *dest;
4421 u16 shift_width;
4422
4423 /* copy from the next struct field */
4424 from = src_ctx + ce_info->offset;
4425
4426 /* prepare the bits and mask */
4427 shift_width = ce_info->lsb % 8;
4428
4429 /* if the field width is exactly 32 on an x86 machine, then the shift
4430 * operation will not work because the SHL instructions count is masked
4431 * to 5 bits so the shift will do nothing
4432 */
4433 if (ce_info->width < 32)
4434 mask = BIT(ce_info->width) - 1;
4435 else
4436 mask = (u32)~0;
4437
4438 /* don't swizzle the bits until after the mask because the mask bits
4439 * will be in a different bit position on big endian machines
4440 */
4441 src_dword = *(u32 *)from;
4442 src_dword &= mask;
4443
4444 /* shift to correct alignment */
4445 mask <<= shift_width;
4446 src_dword <<= shift_width;
4447
4448 /* get the current bits from the target bit string */
4449 dest = dest_ctx + (ce_info->lsb / 8);
4450
4451 memcpy(&dest_dword, dest, sizeof(dest_dword));
4452
4453 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
4454 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
4455
4456 /* put it all back */
4457 memcpy(dest, &dest_dword, sizeof(dest_dword));
4458}
4459
4460/**
4461 * ice_write_qword - write a qword to a packed context structure
4462 * @src_ctx: the context structure to read from
4463 * @dest_ctx: the context to be written to
4464 * @ce_info: a description of the struct to be filled
4465 */
4466static void
4467ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4468{
4469 u64 src_qword, mask;
4470 __le64 dest_qword;
4471 u8 *from, *dest;
4472 u16 shift_width;
4473
4474 /* copy from the next struct field */
4475 from = src_ctx + ce_info->offset;
4476
4477 /* prepare the bits and mask */
4478 shift_width = ce_info->lsb % 8;
4479
4480 /* if the field width is exactly 64 on an x86 machine, then the shift
4481 * operation will not work because the SHL instructions count is masked
4482 * to 6 bits so the shift will do nothing
4483 */
4484 if (ce_info->width < 64)
4485 mask = BIT_ULL(ce_info->width) - 1;
4486 else
4487 mask = (u64)~0;
4488
4489 /* don't swizzle the bits until after the mask because the mask bits
4490 * will be in a different bit position on big endian machines
4491 */
4492 src_qword = *(u64 *)from;
4493 src_qword &= mask;
4494
4495 /* shift to correct alignment */
4496 mask <<= shift_width;
4497 src_qword <<= shift_width;
4498
4499 /* get the current bits from the target bit string */
4500 dest = dest_ctx + (ce_info->lsb / 8);
4501
4502 memcpy(&dest_qword, dest, sizeof(dest_qword));
4503
4504 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
4505 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
4506
4507 /* put it all back */
4508 memcpy(dest, &dest_qword, sizeof(dest_qword));
4509}
4510
4511/**
4512 * ice_set_ctx - set context bits in packed structure
4513 * @hw: pointer to the hardware structure
4514 * @src_ctx: pointer to a generic non-packed context structure
4515 * @dest_ctx: pointer to memory for the packed structure
4516 * @ce_info: a description of the structure to be transformed
4517 */
4518int
4519ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4520 const struct ice_ctx_ele *ce_info)
4521{
4522 int f;
4523
4524 for (f = 0; ce_info[f].width; f++) {
4525 /* We have to deal with each element of the FW response
4526 * using the correct size so that we are correct regardless
4527 * of the endianness of the machine.
4528 */
4529 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4530 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4531 f, ce_info[f].width, ce_info[f].size_of);
4532 continue;
4533 }
4534 switch (ce_info[f].size_of) {
4535 case sizeof(u8):
4536 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4537 break;
4538 case sizeof(u16):
4539 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4540 break;
4541 case sizeof(u32):
4542 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4543 break;
4544 case sizeof(u64):
4545 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4546 break;
4547 default:
4548 return -EINVAL;
4549 }
4550 }
4551
4552 return 0;
4553}
4554
4555/**
4556 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4557 * @hw: pointer to the HW struct
4558 * @vsi_handle: software VSI handle
4559 * @tc: TC number
4560 * @q_handle: software queue handle
4561 */
4562struct ice_q_ctx *
4563ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4564{
4565 struct ice_vsi_ctx *vsi;
4566 struct ice_q_ctx *q_ctx;
4567
4568 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4569 if (!vsi)
4570 return NULL;
4571 if (q_handle >= vsi->num_lan_q_entries[tc])
4572 return NULL;
4573 if (!vsi->lan_q_ctx[tc])
4574 return NULL;
4575 q_ctx = vsi->lan_q_ctx[tc];
4576 return &q_ctx[q_handle];
4577}
4578
4579/**
4580 * ice_ena_vsi_txq
4581 * @pi: port information structure
4582 * @vsi_handle: software VSI handle
4583 * @tc: TC number
4584 * @q_handle: software queue handle
4585 * @num_qgrps: Number of added queue groups
4586 * @buf: list of queue groups to be added
4587 * @buf_size: size of buffer for indirect command
4588 * @cd: pointer to command details structure or NULL
4589 *
4590 * This function adds one LAN queue
4591 */
4592int
4593ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4594 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4595 struct ice_sq_cd *cd)
4596{
4597 struct ice_aqc_txsched_elem_data node = { 0 };
4598 struct ice_sched_node *parent;
4599 struct ice_q_ctx *q_ctx;
4600 struct ice_hw *hw;
4601 int status;
4602
4603 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4604 return -EIO;
4605
4606 if (num_qgrps > 1 || buf->num_txqs > 1)
4607 return -ENOSPC;
4608
4609 hw = pi->hw;
4610
4611 if (!ice_is_vsi_valid(hw, vsi_handle))
4612 return -EINVAL;
4613
4614 mutex_lock(&pi->sched_lock);
4615
4616 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4617 if (!q_ctx) {
4618 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4619 q_handle);
4620 status = -EINVAL;
4621 goto ena_txq_exit;
4622 }
4623
4624 /* find a parent node */
4625 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4626 ICE_SCHED_NODE_OWNER_LAN);
4627 if (!parent) {
4628 status = -EINVAL;
4629 goto ena_txq_exit;
4630 }
4631
4632 buf->parent_teid = parent->info.node_teid;
4633 node.parent_teid = parent->info.node_teid;
4634 /* Mark that the values in the "generic" section as valid. The default
4635 * value in the "generic" section is zero. This means that :
4636 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4637 * - 0 priority among siblings, indicated by Bit 1-3.
4638 * - WFQ, indicated by Bit 4.
4639 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4640 * Bit 5-6.
4641 * - Bit 7 is reserved.
4642 * Without setting the generic section as valid in valid_sections, the
4643 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4644 */
4645 buf->txqs[0].info.valid_sections =
4646 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4647 ICE_AQC_ELEM_VALID_EIR;
4648 buf->txqs[0].info.generic = 0;
4649 buf->txqs[0].info.cir_bw.bw_profile_idx =
4650 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4651 buf->txqs[0].info.cir_bw.bw_alloc =
4652 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4653 buf->txqs[0].info.eir_bw.bw_profile_idx =
4654 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4655 buf->txqs[0].info.eir_bw.bw_alloc =
4656 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4657
4658 /* add the LAN queue */
4659 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4660 if (status) {
4661 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4662 le16_to_cpu(buf->txqs[0].txq_id),
4663 hw->adminq.sq_last_status);
4664 goto ena_txq_exit;
4665 }
4666
4667 node.node_teid = buf->txqs[0].q_teid;
4668 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4669 q_ctx->q_handle = q_handle;
4670 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4671
4672 /* add a leaf node into scheduler tree queue layer */
4673 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
4674 if (!status)
4675 status = ice_sched_replay_q_bw(pi, q_ctx);
4676
4677ena_txq_exit:
4678 mutex_unlock(&pi->sched_lock);
4679 return status;
4680}
4681
4682/**
4683 * ice_dis_vsi_txq
4684 * @pi: port information structure
4685 * @vsi_handle: software VSI handle
4686 * @tc: TC number
4687 * @num_queues: number of queues
4688 * @q_handles: pointer to software queue handle array
4689 * @q_ids: pointer to the q_id array
4690 * @q_teids: pointer to queue node teids
4691 * @rst_src: if called due to reset, specifies the reset source
4692 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4693 * @cd: pointer to command details structure or NULL
4694 *
4695 * This function removes queues and their corresponding nodes in SW DB
4696 */
4697int
4698ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4699 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4700 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4701 struct ice_sq_cd *cd)
4702{
4703 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
4704 u16 i, buf_size = __struct_size(qg_list);
4705 struct ice_q_ctx *q_ctx;
4706 int status = -ENOENT;
4707 struct ice_hw *hw;
4708
4709 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4710 return -EIO;
4711
4712 hw = pi->hw;
4713
4714 if (!num_queues) {
4715 /* if queue is disabled already yet the disable queue command
4716 * has to be sent to complete the VF reset, then call
4717 * ice_aq_dis_lan_txq without any queue information
4718 */
4719 if (rst_src)
4720 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4721 vmvf_num, NULL);
4722 return -EIO;
4723 }
4724
4725 mutex_lock(&pi->sched_lock);
4726
4727 for (i = 0; i < num_queues; i++) {
4728 struct ice_sched_node *node;
4729
4730 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4731 if (!node)
4732 continue;
4733 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4734 if (!q_ctx) {
4735 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4736 q_handles[i]);
4737 continue;
4738 }
4739 if (q_ctx->q_handle != q_handles[i]) {
4740 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4741 q_ctx->q_handle, q_handles[i]);
4742 continue;
4743 }
4744 qg_list->parent_teid = node->info.parent_teid;
4745 qg_list->num_qs = 1;
4746 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4747 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4748 vmvf_num, cd);
4749
4750 if (status)
4751 break;
4752 ice_free_sched_node(pi, node);
4753 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4754 q_ctx->q_teid = ICE_INVAL_TEID;
4755 }
4756 mutex_unlock(&pi->sched_lock);
4757 return status;
4758}
4759
4760/**
4761 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4762 * @pi: port information structure
4763 * @vsi_handle: software VSI handle
4764 * @tc_bitmap: TC bitmap
4765 * @maxqs: max queues array per TC
4766 * @owner: LAN or RDMA
4767 *
4768 * This function adds/updates the VSI queues per TC.
4769 */
4770static int
4771ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4772 u16 *maxqs, u8 owner)
4773{
4774 int status = 0;
4775 u8 i;
4776
4777 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4778 return -EIO;
4779
4780 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4781 return -EINVAL;
4782
4783 mutex_lock(&pi->sched_lock);
4784
4785 ice_for_each_traffic_class(i) {
4786 /* configuration is possible only if TC node is present */
4787 if (!ice_sched_get_tc_node(pi, i))
4788 continue;
4789
4790 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4791 ice_is_tc_ena(tc_bitmap, i));
4792 if (status)
4793 break;
4794 }
4795
4796 mutex_unlock(&pi->sched_lock);
4797 return status;
4798}
4799
4800/**
4801 * ice_cfg_vsi_lan - configure VSI LAN queues
4802 * @pi: port information structure
4803 * @vsi_handle: software VSI handle
4804 * @tc_bitmap: TC bitmap
4805 * @max_lanqs: max LAN queues array per TC
4806 *
4807 * This function adds/updates the VSI LAN queues per TC.
4808 */
4809int
4810ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4811 u16 *max_lanqs)
4812{
4813 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4814 ICE_SCHED_NODE_OWNER_LAN);
4815}
4816
4817/**
4818 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4819 * @pi: port information structure
4820 * @vsi_handle: software VSI handle
4821 * @tc_bitmap: TC bitmap
4822 * @max_rdmaqs: max RDMA queues array per TC
4823 *
4824 * This function adds/updates the VSI RDMA queues per TC.
4825 */
4826int
4827ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4828 u16 *max_rdmaqs)
4829{
4830 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
4831 ICE_SCHED_NODE_OWNER_RDMA);
4832}
4833
4834/**
4835 * ice_ena_vsi_rdma_qset
4836 * @pi: port information structure
4837 * @vsi_handle: software VSI handle
4838 * @tc: TC number
4839 * @rdma_qset: pointer to RDMA Qset
4840 * @num_qsets: number of RDMA Qsets
4841 * @qset_teid: pointer to Qset node TEIDs
4842 *
4843 * This function adds RDMA Qset
4844 */
4845int
4846ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4847 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4848{
4849 struct ice_aqc_txsched_elem_data node = { 0 };
4850 struct ice_aqc_add_rdma_qset_data *buf;
4851 struct ice_sched_node *parent;
4852 struct ice_hw *hw;
4853 u16 i, buf_size;
4854 int ret;
4855
4856 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4857 return -EIO;
4858 hw = pi->hw;
4859
4860 if (!ice_is_vsi_valid(hw, vsi_handle))
4861 return -EINVAL;
4862
4863 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4864 buf = kzalloc(buf_size, GFP_KERNEL);
4865 if (!buf)
4866 return -ENOMEM;
4867 mutex_lock(&pi->sched_lock);
4868
4869 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4870 ICE_SCHED_NODE_OWNER_RDMA);
4871 if (!parent) {
4872 ret = -EINVAL;
4873 goto rdma_error_exit;
4874 }
4875 buf->parent_teid = parent->info.node_teid;
4876 node.parent_teid = parent->info.node_teid;
4877
4878 buf->num_qsets = cpu_to_le16(num_qsets);
4879 for (i = 0; i < num_qsets; i++) {
4880 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4881 buf->rdma_qsets[i].info.valid_sections =
4882 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4883 ICE_AQC_ELEM_VALID_EIR;
4884 buf->rdma_qsets[i].info.generic = 0;
4885 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4886 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4887 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4888 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4889 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4890 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4891 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4892 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4893 }
4894 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4895 if (ret) {
4896 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4897 goto rdma_error_exit;
4898 }
4899 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4900 for (i = 0; i < num_qsets; i++) {
4901 node.node_teid = buf->rdma_qsets[i].qset_teid;
4902 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4903 &node, NULL);
4904 if (ret)
4905 break;
4906 qset_teid[i] = le32_to_cpu(node.node_teid);
4907 }
4908rdma_error_exit:
4909 mutex_unlock(&pi->sched_lock);
4910 kfree(buf);
4911 return ret;
4912}
4913
4914/**
4915 * ice_dis_vsi_rdma_qset - free RDMA resources
4916 * @pi: port_info struct
4917 * @count: number of RDMA Qsets to free
4918 * @qset_teid: TEID of Qset node
4919 * @q_id: list of queue IDs being disabled
4920 */
4921int
4922ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4923 u16 *q_id)
4924{
4925 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
4926 u16 qg_size = __struct_size(qg_list);
4927 struct ice_hw *hw;
4928 int status = 0;
4929 int i;
4930
4931 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4932 return -EIO;
4933
4934 hw = pi->hw;
4935
4936 mutex_lock(&pi->sched_lock);
4937
4938 for (i = 0; i < count; i++) {
4939 struct ice_sched_node *node;
4940
4941 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4942 if (!node)
4943 continue;
4944
4945 qg_list->parent_teid = node->info.parent_teid;
4946 qg_list->num_qs = 1;
4947 qg_list->q_id[0] =
4948 cpu_to_le16(q_id[i] |
4949 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4950
4951 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4952 ICE_NO_RESET, 0, NULL);
4953 if (status)
4954 break;
4955
4956 ice_free_sched_node(pi, node);
4957 }
4958
4959 mutex_unlock(&pi->sched_lock);
4960 return status;
4961}
4962
4963/**
4964 * ice_aq_get_cgu_abilities - get cgu abilities
4965 * @hw: pointer to the HW struct
4966 * @abilities: CGU abilities
4967 *
4968 * Get CGU abilities (0x0C61)
4969 * Return: 0 on success or negative value on failure.
4970 */
4971int
4972ice_aq_get_cgu_abilities(struct ice_hw *hw,
4973 struct ice_aqc_get_cgu_abilities *abilities)
4974{
4975 struct ice_aq_desc desc;
4976
4977 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
4978 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
4979}
4980
4981/**
4982 * ice_aq_set_input_pin_cfg - set input pin config
4983 * @hw: pointer to the HW struct
4984 * @input_idx: Input index
4985 * @flags1: Input flags
4986 * @flags2: Input flags
4987 * @freq: Frequency in Hz
4988 * @phase_delay: Delay in ps
4989 *
4990 * Set CGU input config (0x0C62)
4991 * Return: 0 on success or negative value on failure.
4992 */
4993int
4994ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
4995 u32 freq, s32 phase_delay)
4996{
4997 struct ice_aqc_set_cgu_input_config *cmd;
4998 struct ice_aq_desc desc;
4999
5000 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
5001 cmd = &desc.params.set_cgu_input_config;
5002 cmd->input_idx = input_idx;
5003 cmd->flags1 = flags1;
5004 cmd->flags2 = flags2;
5005 cmd->freq = cpu_to_le32(freq);
5006 cmd->phase_delay = cpu_to_le32(phase_delay);
5007
5008 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5009}
5010
5011/**
5012 * ice_aq_get_input_pin_cfg - get input pin config
5013 * @hw: pointer to the HW struct
5014 * @input_idx: Input index
5015 * @status: Pin status
5016 * @type: Pin type
5017 * @flags1: Input flags
5018 * @flags2: Input flags
5019 * @freq: Frequency in Hz
5020 * @phase_delay: Delay in ps
5021 *
5022 * Get CGU input config (0x0C63)
5023 * Return: 0 on success or negative value on failure.
5024 */
5025int
5026ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
5027 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
5028{
5029 struct ice_aqc_get_cgu_input_config *cmd;
5030 struct ice_aq_desc desc;
5031 int ret;
5032
5033 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
5034 cmd = &desc.params.get_cgu_input_config;
5035 cmd->input_idx = input_idx;
5036
5037 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5038 if (!ret) {
5039 if (status)
5040 *status = cmd->status;
5041 if (type)
5042 *type = cmd->type;
5043 if (flags1)
5044 *flags1 = cmd->flags1;
5045 if (flags2)
5046 *flags2 = cmd->flags2;
5047 if (freq)
5048 *freq = le32_to_cpu(cmd->freq);
5049 if (phase_delay)
5050 *phase_delay = le32_to_cpu(cmd->phase_delay);
5051 }
5052
5053 return ret;
5054}
5055
5056/**
5057 * ice_aq_set_output_pin_cfg - set output pin config
5058 * @hw: pointer to the HW struct
5059 * @output_idx: Output index
5060 * @flags: Output flags
5061 * @src_sel: Index of DPLL block
5062 * @freq: Output frequency
5063 * @phase_delay: Output phase compensation
5064 *
5065 * Set CGU output config (0x0C64)
5066 * Return: 0 on success or negative value on failure.
5067 */
5068int
5069ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
5070 u8 src_sel, u32 freq, s32 phase_delay)
5071{
5072 struct ice_aqc_set_cgu_output_config *cmd;
5073 struct ice_aq_desc desc;
5074
5075 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
5076 cmd = &desc.params.set_cgu_output_config;
5077 cmd->output_idx = output_idx;
5078 cmd->flags = flags;
5079 cmd->src_sel = src_sel;
5080 cmd->freq = cpu_to_le32(freq);
5081 cmd->phase_delay = cpu_to_le32(phase_delay);
5082
5083 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5084}
5085
5086/**
5087 * ice_aq_get_output_pin_cfg - get output pin config
5088 * @hw: pointer to the HW struct
5089 * @output_idx: Output index
5090 * @flags: Output flags
5091 * @src_sel: Internal DPLL source
5092 * @freq: Output frequency
5093 * @src_freq: Source frequency
5094 *
5095 * Get CGU output config (0x0C65)
5096 * Return: 0 on success or negative value on failure.
5097 */
5098int
5099ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
5100 u8 *src_sel, u32 *freq, u32 *src_freq)
5101{
5102 struct ice_aqc_get_cgu_output_config *cmd;
5103 struct ice_aq_desc desc;
5104 int ret;
5105
5106 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
5107 cmd = &desc.params.get_cgu_output_config;
5108 cmd->output_idx = output_idx;
5109
5110 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5111 if (!ret) {
5112 if (flags)
5113 *flags = cmd->flags;
5114 if (src_sel)
5115 *src_sel = cmd->src_sel;
5116 if (freq)
5117 *freq = le32_to_cpu(cmd->freq);
5118 if (src_freq)
5119 *src_freq = le32_to_cpu(cmd->src_freq);
5120 }
5121
5122 return ret;
5123}
5124
5125/**
5126 * ice_aq_get_cgu_dpll_status - get dpll status
5127 * @hw: pointer to the HW struct
5128 * @dpll_num: DPLL index
5129 * @ref_state: Reference clock state
5130 * @config: current DPLL config
5131 * @dpll_state: current DPLL state
5132 * @phase_offset: Phase offset in ns
5133 * @eec_mode: EEC_mode
5134 *
5135 * Get CGU DPLL status (0x0C66)
5136 * Return: 0 on success or negative value on failure.
5137 */
5138int
5139ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
5140 u8 *dpll_state, u8 *config, s64 *phase_offset,
5141 u8 *eec_mode)
5142{
5143 struct ice_aqc_get_cgu_dpll_status *cmd;
5144 struct ice_aq_desc desc;
5145 int status;
5146
5147 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
5148 cmd = &desc.params.get_cgu_dpll_status;
5149 cmd->dpll_num = dpll_num;
5150
5151 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5152 if (!status) {
5153 *ref_state = cmd->ref_state;
5154 *dpll_state = cmd->dpll_state;
5155 *config = cmd->config;
5156 *phase_offset = le32_to_cpu(cmd->phase_offset_h);
5157 *phase_offset <<= 32;
5158 *phase_offset += le32_to_cpu(cmd->phase_offset_l);
5159 *phase_offset = sign_extend64(*phase_offset, 47);
5160 *eec_mode = cmd->eec_mode;
5161 }
5162
5163 return status;
5164}
5165
5166/**
5167 * ice_aq_set_cgu_dpll_config - set dpll config
5168 * @hw: pointer to the HW struct
5169 * @dpll_num: DPLL index
5170 * @ref_state: Reference clock state
5171 * @config: DPLL config
5172 * @eec_mode: EEC mode
5173 *
5174 * Set CGU DPLL config (0x0C67)
5175 * Return: 0 on success or negative value on failure.
5176 */
5177int
5178ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
5179 u8 config, u8 eec_mode)
5180{
5181 struct ice_aqc_set_cgu_dpll_config *cmd;
5182 struct ice_aq_desc desc;
5183
5184 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
5185 cmd = &desc.params.set_cgu_dpll_config;
5186 cmd->dpll_num = dpll_num;
5187 cmd->ref_state = ref_state;
5188 cmd->config = config;
5189 cmd->eec_mode = eec_mode;
5190
5191 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5192}
5193
5194/**
5195 * ice_aq_set_cgu_ref_prio - set input reference priority
5196 * @hw: pointer to the HW struct
5197 * @dpll_num: DPLL index
5198 * @ref_idx: Reference pin index
5199 * @ref_priority: Reference input priority
5200 *
5201 * Set CGU reference priority (0x0C68)
5202 * Return: 0 on success or negative value on failure.
5203 */
5204int
5205ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
5206 u8 ref_priority)
5207{
5208 struct ice_aqc_set_cgu_ref_prio *cmd;
5209 struct ice_aq_desc desc;
5210
5211 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
5212 cmd = &desc.params.set_cgu_ref_prio;
5213 cmd->dpll_num = dpll_num;
5214 cmd->ref_idx = ref_idx;
5215 cmd->ref_priority = ref_priority;
5216
5217 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5218}
5219
5220/**
5221 * ice_aq_get_cgu_ref_prio - get input reference priority
5222 * @hw: pointer to the HW struct
5223 * @dpll_num: DPLL index
5224 * @ref_idx: Reference pin index
5225 * @ref_prio: Reference input priority
5226 *
5227 * Get CGU reference priority (0x0C69)
5228 * Return: 0 on success or negative value on failure.
5229 */
5230int
5231ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
5232 u8 *ref_prio)
5233{
5234 struct ice_aqc_get_cgu_ref_prio *cmd;
5235 struct ice_aq_desc desc;
5236 int status;
5237
5238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
5239 cmd = &desc.params.get_cgu_ref_prio;
5240 cmd->dpll_num = dpll_num;
5241 cmd->ref_idx = ref_idx;
5242
5243 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5244 if (!status)
5245 *ref_prio = cmd->ref_priority;
5246
5247 return status;
5248}
5249
5250/**
5251 * ice_aq_get_cgu_info - get cgu info
5252 * @hw: pointer to the HW struct
5253 * @cgu_id: CGU ID
5254 * @cgu_cfg_ver: CGU config version
5255 * @cgu_fw_ver: CGU firmware version
5256 *
5257 * Get CGU info (0x0C6A)
5258 * Return: 0 on success or negative value on failure.
5259 */
5260int
5261ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
5262 u32 *cgu_fw_ver)
5263{
5264 struct ice_aqc_get_cgu_info *cmd;
5265 struct ice_aq_desc desc;
5266 int status;
5267
5268 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
5269 cmd = &desc.params.get_cgu_info;
5270
5271 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5272 if (!status) {
5273 *cgu_id = le32_to_cpu(cmd->cgu_id);
5274 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver);
5275 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver);
5276 }
5277
5278 return status;
5279}
5280
5281/**
5282 * ice_aq_set_phy_rec_clk_out - set RCLK phy out
5283 * @hw: pointer to the HW struct
5284 * @phy_output: PHY reference clock output pin
5285 * @enable: GPIO state to be applied
5286 * @freq: PHY output frequency
5287 *
5288 * Set phy recovered clock as reference (0x0630)
5289 * Return: 0 on success or negative value on failure.
5290 */
5291int
5292ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
5293 u32 *freq)
5294{
5295 struct ice_aqc_set_phy_rec_clk_out *cmd;
5296 struct ice_aq_desc desc;
5297 int status;
5298
5299 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
5300 cmd = &desc.params.set_phy_rec_clk_out;
5301 cmd->phy_output = phy_output;
5302 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
5303 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
5304 cmd->freq = cpu_to_le32(*freq);
5305
5306 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5307 if (!status)
5308 *freq = le32_to_cpu(cmd->freq);
5309
5310 return status;
5311}
5312
5313/**
5314 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info
5315 * @hw: pointer to the HW struct
5316 * @phy_output: PHY reference clock output pin
5317 * @port_num: Port number
5318 * @flags: PHY flags
5319 * @node_handle: PHY output frequency
5320 *
5321 * Get PHY recovered clock output info (0x0631)
5322 * Return: 0 on success or negative value on failure.
5323 */
5324int
5325ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
5326 u8 *flags, u16 *node_handle)
5327{
5328 struct ice_aqc_get_phy_rec_clk_out *cmd;
5329 struct ice_aq_desc desc;
5330 int status;
5331
5332 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
5333 cmd = &desc.params.get_phy_rec_clk_out;
5334 cmd->phy_output = *phy_output;
5335
5336 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5337 if (!status) {
5338 *phy_output = cmd->phy_output;
5339 if (port_num)
5340 *port_num = cmd->port_num;
5341 if (flags)
5342 *flags = cmd->flags;
5343 if (node_handle)
5344 *node_handle = le16_to_cpu(cmd->node_handle);
5345 }
5346
5347 return status;
5348}
5349
5350/**
5351 * ice_aq_get_sensor_reading
5352 * @hw: pointer to the HW struct
5353 * @data: pointer to data to be read from the sensor
5354 *
5355 * Get sensor reading (0x0632)
5356 */
5357int ice_aq_get_sensor_reading(struct ice_hw *hw,
5358 struct ice_aqc_get_sensor_reading_resp *data)
5359{
5360 struct ice_aqc_get_sensor_reading *cmd;
5361 struct ice_aq_desc desc;
5362 int status;
5363
5364 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
5365 cmd = &desc.params.get_sensor_reading;
5366#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
5367#define ICE_INTERNAL_TEMP_SENSOR 0
5368 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
5369 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT;
5370
5371 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5372 if (!status)
5373 memcpy(data, &desc.params.get_sensor_reading_resp,
5374 sizeof(*data));
5375
5376 return status;
5377}
5378
5379/**
5380 * ice_replay_pre_init - replay pre initialization
5381 * @hw: pointer to the HW struct
5382 *
5383 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5384 */
5385static int ice_replay_pre_init(struct ice_hw *hw)
5386{
5387 struct ice_switch_info *sw = hw->switch_info;
5388 u8 i;
5389
5390 /* Delete old entries from replay filter list head if there is any */
5391 ice_rm_all_sw_replay_rule_info(hw);
5392 /* In start of replay, move entries into replay_rules list, it
5393 * will allow adding rules entries back to filt_rules list,
5394 * which is operational list.
5395 */
5396 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5397 list_replace_init(&sw->recp_list[i].filt_rules,
5398 &sw->recp_list[i].filt_replay_rules);
5399 ice_sched_replay_agg_vsi_preinit(hw);
5400
5401 return 0;
5402}
5403
5404/**
5405 * ice_replay_vsi - replay VSI configuration
5406 * @hw: pointer to the HW struct
5407 * @vsi_handle: driver VSI handle
5408 *
5409 * Restore all VSI configuration after reset. It is required to call this
5410 * function with main VSI first.
5411 */
5412int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5413{
5414 int status;
5415
5416 if (!ice_is_vsi_valid(hw, vsi_handle))
5417 return -EINVAL;
5418
5419 /* Replay pre-initialization if there is any */
5420 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
5421 status = ice_replay_pre_init(hw);
5422 if (status)
5423 return status;
5424 }
5425 /* Replay per VSI all RSS configurations */
5426 status = ice_replay_rss_cfg(hw, vsi_handle);
5427 if (status)
5428 return status;
5429 /* Replay per VSI all filters */
5430 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
5431 if (!status)
5432 status = ice_replay_vsi_agg(hw, vsi_handle);
5433 return status;
5434}
5435
5436/**
5437 * ice_replay_post - post replay configuration cleanup
5438 * @hw: pointer to the HW struct
5439 *
5440 * Post replay cleanup.
5441 */
5442void ice_replay_post(struct ice_hw *hw)
5443{
5444 /* Delete old entries from replay filter list head */
5445 ice_rm_all_sw_replay_rule_info(hw);
5446 ice_sched_replay_agg(hw);
5447}
5448
5449/**
5450 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5451 * @hw: ptr to the hardware info
5452 * @reg: offset of 64 bit HW register to read from
5453 * @prev_stat_loaded: bool to specify if previous stats are loaded
5454 * @prev_stat: ptr to previous loaded stat value
5455 * @cur_stat: ptr to current stat value
5456 */
5457void
5458ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5459 u64 *prev_stat, u64 *cur_stat)
5460{
5461 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5462
5463 /* device stats are not reset at PFR, they likely will not be zeroed
5464 * when the driver starts. Thus, save the value from the first read
5465 * without adding to the statistic value so that we report stats which
5466 * count up from zero.
5467 */
5468 if (!prev_stat_loaded) {
5469 *prev_stat = new_data;
5470 return;
5471 }
5472
5473 /* Calculate the difference between the new and old values, and then
5474 * add it to the software stat value.
5475 */
5476 if (new_data >= *prev_stat)
5477 *cur_stat += new_data - *prev_stat;
5478 else
5479 /* to manage the potential roll-over */
5480 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5481
5482 /* Update the previously stored value to prepare for next read */
5483 *prev_stat = new_data;
5484}
5485
5486/**
5487 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5488 * @hw: ptr to the hardware info
5489 * @reg: offset of HW register to read from
5490 * @prev_stat_loaded: bool to specify if previous stats are loaded
5491 * @prev_stat: ptr to previous loaded stat value
5492 * @cur_stat: ptr to current stat value
5493 */
5494void
5495ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5496 u64 *prev_stat, u64 *cur_stat)
5497{
5498 u32 new_data;
5499
5500 new_data = rd32(hw, reg);
5501
5502 /* device stats are not reset at PFR, they likely will not be zeroed
5503 * when the driver starts. Thus, save the value from the first read
5504 * without adding to the statistic value so that we report stats which
5505 * count up from zero.
5506 */
5507 if (!prev_stat_loaded) {
5508 *prev_stat = new_data;
5509 return;
5510 }
5511
5512 /* Calculate the difference between the new and old values, and then
5513 * add it to the software stat value.
5514 */
5515 if (new_data >= *prev_stat)
5516 *cur_stat += new_data - *prev_stat;
5517 else
5518 /* to manage the potential roll-over */
5519 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5520
5521 /* Update the previously stored value to prepare for next read */
5522 *prev_stat = new_data;
5523}
5524
5525/**
5526 * ice_sched_query_elem - query element information from HW
5527 * @hw: pointer to the HW struct
5528 * @node_teid: node TEID to be queried
5529 * @buf: buffer to element information
5530 *
5531 * This function queries HW element information
5532 */
5533int
5534ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5535 struct ice_aqc_txsched_elem_data *buf)
5536{
5537 u16 buf_size, num_elem_ret = 0;
5538 int status;
5539
5540 buf_size = sizeof(*buf);
5541 memset(buf, 0, buf_size);
5542 buf->node_teid = cpu_to_le32(node_teid);
5543 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5544 NULL);
5545 if (status || num_elem_ret != 1)
5546 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5547 return status;
5548}
5549
5550/**
5551 * ice_aq_read_i2c
5552 * @hw: pointer to the hw struct
5553 * @topo_addr: topology address for a device to communicate with
5554 * @bus_addr: 7-bit I2C bus address
5555 * @addr: I2C memory address (I2C offset) with up to 16 bits
5556 * @params: I2C parameters: bit [7] - Repeated start,
5557 * bits [6:5] data offset size,
5558 * bit [4] - I2C address type,
5559 * bits [3:0] - data size to read (0-16 bytes)
5560 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5561 * @cd: pointer to command details structure or NULL
5562 *
5563 * Read I2C (0x06E2)
5564 */
5565int
5566ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5567 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5568 struct ice_sq_cd *cd)
5569{
5570 struct ice_aq_desc desc = { 0 };
5571 struct ice_aqc_i2c *cmd;
5572 u8 data_size;
5573 int status;
5574
5575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5576 cmd = &desc.params.read_write_i2c;
5577
5578 if (!data)
5579 return -EINVAL;
5580
5581 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
5582
5583 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
5584 cmd->topo_addr = topo_addr;
5585 cmd->i2c_params = params;
5586 cmd->i2c_addr = addr;
5587
5588 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5589 if (!status) {
5590 struct ice_aqc_read_i2c_resp *resp;
5591 u8 i;
5592
5593 resp = &desc.params.read_i2c_resp;
5594 for (i = 0; i < data_size; i++) {
5595 *data = resp->i2c_data[i];
5596 data++;
5597 }
5598 }
5599
5600 return status;
5601}
5602
5603/**
5604 * ice_aq_write_i2c
5605 * @hw: pointer to the hw struct
5606 * @topo_addr: topology address for a device to communicate with
5607 * @bus_addr: 7-bit I2C bus address
5608 * @addr: I2C memory address (I2C offset) with up to 16 bits
5609 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5610 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5611 * @cd: pointer to command details structure or NULL
5612 *
5613 * Write I2C (0x06E3)
5614 *
5615 * * Return:
5616 * * 0 - Successful write to the i2c device
5617 * * -EINVAL - Data size greater than 4 bytes
5618 * * -EIO - FW error
5619 */
5620int
5621ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5622 u16 bus_addr, __le16 addr, u8 params, const u8 *data,
5623 struct ice_sq_cd *cd)
5624{
5625 struct ice_aq_desc desc = { 0 };
5626 struct ice_aqc_i2c *cmd;
5627 u8 data_size;
5628
5629 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5630 cmd = &desc.params.read_write_i2c;
5631
5632 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
5633
5634 /* data_size limited to 4 */
5635 if (data_size > 4)
5636 return -EINVAL;
5637
5638 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
5639 cmd->topo_addr = topo_addr;
5640 cmd->i2c_params = params;
5641 cmd->i2c_addr = addr;
5642
5643 memcpy(cmd->i2c_data, data, data_size);
5644
5645 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5646}
5647
5648/**
5649 * ice_aq_set_gpio
5650 * @hw: pointer to the hw struct
5651 * @gpio_ctrl_handle: GPIO controller node handle
5652 * @pin_idx: IO Number of the GPIO that needs to be set
5653 * @value: SW provide IO value to set in the LSB
5654 * @cd: pointer to command details structure or NULL
5655 *
5656 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5657 */
5658int
5659ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5660 struct ice_sq_cd *cd)
5661{
5662 struct ice_aqc_gpio *cmd;
5663 struct ice_aq_desc desc;
5664
5665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5666 cmd = &desc.params.read_write_gpio;
5667 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
5668 cmd->gpio_num = pin_idx;
5669 cmd->gpio_val = value ? 1 : 0;
5670
5671 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5672}
5673
5674/**
5675 * ice_aq_get_gpio
5676 * @hw: pointer to the hw struct
5677 * @gpio_ctrl_handle: GPIO controller node handle
5678 * @pin_idx: IO Number of the GPIO that needs to be set
5679 * @value: IO value read
5680 * @cd: pointer to command details structure or NULL
5681 *
5682 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5683 * the topology
5684 */
5685int
5686ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5687 bool *value, struct ice_sq_cd *cd)
5688{
5689 struct ice_aqc_gpio *cmd;
5690 struct ice_aq_desc desc;
5691 int status;
5692
5693 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5694 cmd = &desc.params.read_write_gpio;
5695 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
5696 cmd->gpio_num = pin_idx;
5697
5698 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5699 if (status)
5700 return status;
5701
5702 *value = !!cmd->gpio_val;
5703 return 0;
5704}
5705
5706/**
5707 * ice_is_fw_api_min_ver
5708 * @hw: pointer to the hardware structure
5709 * @maj: major version
5710 * @min: minor version
5711 * @patch: patch version
5712 *
5713 * Checks if the firmware API is minimum version
5714 */
5715static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
5716{
5717 if (hw->api_maj_ver == maj) {
5718 if (hw->api_min_ver > min)
5719 return true;
5720 if (hw->api_min_ver == min && hw->api_patch >= patch)
5721 return true;
5722 } else if (hw->api_maj_ver > maj) {
5723 return true;
5724 }
5725
5726 return false;
5727}
5728
5729/**
5730 * ice_fw_supports_link_override
5731 * @hw: pointer to the hardware structure
5732 *
5733 * Checks if the firmware supports link override
5734 */
5735bool ice_fw_supports_link_override(struct ice_hw *hw)
5736{
5737 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
5738 ICE_FW_API_LINK_OVERRIDE_MIN,
5739 ICE_FW_API_LINK_OVERRIDE_PATCH);
5740}
5741
5742/**
5743 * ice_get_link_default_override
5744 * @ldo: pointer to the link default override struct
5745 * @pi: pointer to the port info struct
5746 *
5747 * Gets the link default override for a port
5748 */
5749int
5750ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5751 struct ice_port_info *pi)
5752{
5753 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5754 struct ice_hw *hw = pi->hw;
5755 int status;
5756
5757 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5758 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5759 if (status) {
5760 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5761 return status;
5762 }
5763
5764 /* Each port has its own config; calculate for our port */
5765 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5766 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5767
5768 /* link options first */
5769 status = ice_read_sr_word(hw, tlv_start, &buf);
5770 if (status) {
5771 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5772 return status;
5773 }
5774 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf);
5775 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5776 ICE_LINK_OVERRIDE_PHY_CFG_S;
5777
5778 /* link PHY config */
5779 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5780 status = ice_read_sr_word(hw, offset, &buf);
5781 if (status) {
5782 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5783 return status;
5784 }
5785 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5786
5787 /* PHY types low */
5788 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5789 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5790 status = ice_read_sr_word(hw, (offset + i), &buf);
5791 if (status) {
5792 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5793 return status;
5794 }
5795 /* shift 16 bits at a time to fill 64 bits */
5796 ldo->phy_type_low |= ((u64)buf << (i * 16));
5797 }
5798
5799 /* PHY types high */
5800 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5801 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5802 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5803 status = ice_read_sr_word(hw, (offset + i), &buf);
5804 if (status) {
5805 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5806 return status;
5807 }
5808 /* shift 16 bits at a time to fill 64 bits */
5809 ldo->phy_type_high |= ((u64)buf << (i * 16));
5810 }
5811
5812 return status;
5813}
5814
5815/**
5816 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5817 * @caps: get PHY capability data
5818 */
5819bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5820{
5821 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5822 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5823 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5824 ICE_AQC_PHY_AN_EN_CLAUSE37))
5825 return true;
5826
5827 return false;
5828}
5829
5830/**
5831 * ice_aq_set_lldp_mib - Set the LLDP MIB
5832 * @hw: pointer to the HW struct
5833 * @mib_type: Local, Remote or both Local and Remote MIBs
5834 * @buf: pointer to the caller-supplied buffer to store the MIB block
5835 * @buf_size: size of the buffer (in bytes)
5836 * @cd: pointer to command details structure or NULL
5837 *
5838 * Set the LLDP MIB. (0x0A08)
5839 */
5840int
5841ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5842 struct ice_sq_cd *cd)
5843{
5844 struct ice_aqc_lldp_set_local_mib *cmd;
5845 struct ice_aq_desc desc;
5846
5847 cmd = &desc.params.lldp_set_mib;
5848
5849 if (buf_size == 0 || !buf)
5850 return -EINVAL;
5851
5852 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5853
5854 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5855 desc.datalen = cpu_to_le16(buf_size);
5856
5857 cmd->type = mib_type;
5858 cmd->length = cpu_to_le16(buf_size);
5859
5860 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5861}
5862
5863/**
5864 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5865 * @hw: pointer to HW struct
5866 */
5867bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5868{
5869 if (hw->mac_type != ICE_MAC_E810)
5870 return false;
5871
5872 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
5873 ICE_FW_API_LLDP_FLTR_MIN,
5874 ICE_FW_API_LLDP_FLTR_PATCH);
5875}
5876
5877/**
5878 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5879 * @hw: pointer to HW struct
5880 * @vsi_num: absolute HW index for VSI
5881 * @add: boolean for if adding or removing a filter
5882 */
5883int
5884ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5885{
5886 struct ice_aqc_lldp_filter_ctrl *cmd;
5887 struct ice_aq_desc desc;
5888
5889 cmd = &desc.params.lldp_filter_ctrl;
5890
5891 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5892
5893 if (add)
5894 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5895 else
5896 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5897
5898 cmd->vsi_num = cpu_to_le16(vsi_num);
5899
5900 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5901}
5902
5903/**
5904 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
5905 * @hw: pointer to HW struct
5906 */
5907int ice_lldp_execute_pending_mib(struct ice_hw *hw)
5908{
5909 struct ice_aq_desc desc;
5910
5911 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
5912
5913 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5914}
5915
5916/**
5917 * ice_fw_supports_report_dflt_cfg
5918 * @hw: pointer to the hardware structure
5919 *
5920 * Checks if the firmware supports report default configuration
5921 */
5922bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5923{
5924 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
5925 ICE_FW_API_REPORT_DFLT_CFG_MIN,
5926 ICE_FW_API_REPORT_DFLT_CFG_PATCH);
5927}
5928
5929/* each of the indexes into the following array match the speed of a return
5930 * value from the list of AQ returned speeds like the range:
5931 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
5932 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
5933 * array. The array is defined as 15 elements long because the link_speed
5934 * returned by the firmware is a 16 bit * value, but is indexed
5935 * by [fls(speed) - 1]
5936 */
5937static const u32 ice_aq_to_link_speed[] = {
5938 SPEED_10, /* BIT(0) */
5939 SPEED_100,
5940 SPEED_1000,
5941 SPEED_2500,
5942 SPEED_5000,
5943 SPEED_10000,
5944 SPEED_20000,
5945 SPEED_25000,
5946 SPEED_40000,
5947 SPEED_50000,
5948 SPEED_100000, /* BIT(10) */
5949 SPEED_200000,
5950};
5951
5952/**
5953 * ice_get_link_speed - get integer speed from table
5954 * @index: array index from fls(aq speed) - 1
5955 *
5956 * Returns: u32 value containing integer speed
5957 */
5958u32 ice_get_link_speed(u16 index)
5959{
5960 if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
5961 return 0;
5962
5963 return ice_aq_to_link_speed[index];
5964}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_lib.h"
6#include "ice_sched.h"
7#include "ice_adminq_cmd.h"
8#include "ice_flow.h"
9
10#define ICE_PF_RESET_WAIT_COUNT 300
11
12/**
13 * ice_set_mac_type - Sets MAC type
14 * @hw: pointer to the HW structure
15 *
16 * This function sets the MAC type of the adapter based on the
17 * vendor ID and device ID stored in the HW structure.
18 */
19static enum ice_status ice_set_mac_type(struct ice_hw *hw)
20{
21 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
22 return ICE_ERR_DEVICE_NOT_SUPPORTED;
23
24 switch (hw->device_id) {
25 case ICE_DEV_ID_E810C_BACKPLANE:
26 case ICE_DEV_ID_E810C_QSFP:
27 case ICE_DEV_ID_E810C_SFP:
28 case ICE_DEV_ID_E810_XXV_BACKPLANE:
29 case ICE_DEV_ID_E810_XXV_QSFP:
30 case ICE_DEV_ID_E810_XXV_SFP:
31 hw->mac_type = ICE_MAC_E810;
32 break;
33 case ICE_DEV_ID_E823C_10G_BASE_T:
34 case ICE_DEV_ID_E823C_BACKPLANE:
35 case ICE_DEV_ID_E823C_QSFP:
36 case ICE_DEV_ID_E823C_SFP:
37 case ICE_DEV_ID_E823C_SGMII:
38 case ICE_DEV_ID_E822C_10G_BASE_T:
39 case ICE_DEV_ID_E822C_BACKPLANE:
40 case ICE_DEV_ID_E822C_QSFP:
41 case ICE_DEV_ID_E822C_SFP:
42 case ICE_DEV_ID_E822C_SGMII:
43 case ICE_DEV_ID_E822L_10G_BASE_T:
44 case ICE_DEV_ID_E822L_BACKPLANE:
45 case ICE_DEV_ID_E822L_SFP:
46 case ICE_DEV_ID_E822L_SGMII:
47 case ICE_DEV_ID_E823L_10G_BASE_T:
48 case ICE_DEV_ID_E823L_1GBE:
49 case ICE_DEV_ID_E823L_BACKPLANE:
50 case ICE_DEV_ID_E823L_QSFP:
51 case ICE_DEV_ID_E823L_SFP:
52 hw->mac_type = ICE_MAC_GENERIC;
53 break;
54 default:
55 hw->mac_type = ICE_MAC_UNKNOWN;
56 break;
57 }
58
59 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
60 return 0;
61}
62
63/**
64 * ice_is_e810
65 * @hw: pointer to the hardware structure
66 *
67 * returns true if the device is E810 based, false if not.
68 */
69bool ice_is_e810(struct ice_hw *hw)
70{
71 return hw->mac_type == ICE_MAC_E810;
72}
73
74/**
75 * ice_clear_pf_cfg - Clear PF configuration
76 * @hw: pointer to the hardware structure
77 *
78 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
79 * configuration, flow director filters, etc.).
80 */
81enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
82{
83 struct ice_aq_desc desc;
84
85 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
86
87 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
88}
89
90/**
91 * ice_aq_manage_mac_read - manage MAC address read command
92 * @hw: pointer to the HW struct
93 * @buf: a virtual buffer to hold the manage MAC read response
94 * @buf_size: Size of the virtual buffer
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function is used to return per PF station MAC address (0x0107).
98 * NOTE: Upon successful completion of this command, MAC address information
99 * is returned in user specified buffer. Please interpret user specified
100 * buffer as "manage_mac_read" response.
101 * Response such as various MAC addresses are stored in HW struct (port.mac)
102 * ice_discover_dev_caps is expected to be called before this function is
103 * called.
104 */
105static enum ice_status
106ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
107 struct ice_sq_cd *cd)
108{
109 struct ice_aqc_manage_mac_read_resp *resp;
110 struct ice_aqc_manage_mac_read *cmd;
111 struct ice_aq_desc desc;
112 enum ice_status status;
113 u16 flags;
114 u8 i;
115
116 cmd = &desc.params.mac_read;
117
118 if (buf_size < sizeof(*resp))
119 return ICE_ERR_BUF_TOO_SHORT;
120
121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
122
123 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
124 if (status)
125 return status;
126
127 resp = buf;
128 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
129
130 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
131 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
132 return ICE_ERR_CFG;
133 }
134
135 /* A single port can report up to two (LAN and WoL) addresses */
136 for (i = 0; i < cmd->num_addr; i++)
137 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
138 ether_addr_copy(hw->port_info->mac.lan_addr,
139 resp[i].mac_addr);
140 ether_addr_copy(hw->port_info->mac.perm_addr,
141 resp[i].mac_addr);
142 break;
143 }
144
145 return 0;
146}
147
148/**
149 * ice_aq_get_phy_caps - returns PHY capabilities
150 * @pi: port information structure
151 * @qual_mods: report qualified modules
152 * @report_mode: report mode capabilities
153 * @pcaps: structure for PHY capabilities to be filled
154 * @cd: pointer to command details structure or NULL
155 *
156 * Returns the various PHY capabilities supported on the Port (0x0600)
157 */
158enum ice_status
159ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
160 struct ice_aqc_get_phy_caps_data *pcaps,
161 struct ice_sq_cd *cd)
162{
163 struct ice_aqc_get_phy_caps *cmd;
164 u16 pcaps_size = sizeof(*pcaps);
165 struct ice_aq_desc desc;
166 enum ice_status status;
167 struct ice_hw *hw;
168
169 cmd = &desc.params.get_phy;
170
171 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
172 return ICE_ERR_PARAM;
173 hw = pi->hw;
174
175 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
176 !ice_fw_supports_report_dflt_cfg(hw))
177 return ICE_ERR_PARAM;
178
179 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
180
181 if (qual_mods)
182 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
183
184 cmd->param0 |= cpu_to_le16(report_mode);
185 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
186
187 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
188 report_mode);
189 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
190 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
191 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
192 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
193 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
194 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
195 pcaps->low_power_ctrl_an);
196 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
197 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
198 pcaps->eeer_value);
199 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
200 pcaps->link_fec_options);
201 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
202 pcaps->module_compliance_enforcement);
203 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
204 pcaps->extended_compliance_code);
205 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
206 pcaps->module_type[0]);
207 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
208 pcaps->module_type[1]);
209 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
210 pcaps->module_type[2]);
211
212 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
213 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
214 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
215 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
216 sizeof(pi->phy.link_info.module_type));
217 }
218
219 return status;
220}
221
222/**
223 * ice_aq_get_link_topo_handle - get link topology node return status
224 * @pi: port information structure
225 * @node_type: requested node type
226 * @cd: pointer to command details structure or NULL
227 *
228 * Get link topology node return status for specified node type (0x06E0)
229 *
230 * Node type cage can be used to determine if cage is present. If AQC
231 * returns error (ENOENT), then no cage present. If no cage present, then
232 * connection type is backplane or BASE-T.
233 */
234static enum ice_status
235ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
236 struct ice_sq_cd *cd)
237{
238 struct ice_aqc_get_link_topo *cmd;
239 struct ice_aq_desc desc;
240
241 cmd = &desc.params.get_link_topo;
242
243 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
244
245 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
246 ICE_AQC_LINK_TOPO_NODE_CTX_S);
247
248 /* set node type */
249 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
250
251 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
252}
253
254/**
255 * ice_is_media_cage_present
256 * @pi: port information structure
257 *
258 * Returns true if media cage is present, else false. If no cage, then
259 * media type is backplane or BASE-T.
260 */
261static bool ice_is_media_cage_present(struct ice_port_info *pi)
262{
263 /* Node type cage can be used to determine if cage is present. If AQC
264 * returns error (ENOENT), then no cage present. If no cage present then
265 * connection type is backplane or BASE-T.
266 */
267 return !ice_aq_get_link_topo_handle(pi,
268 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
269 NULL);
270}
271
272/**
273 * ice_get_media_type - Gets media type
274 * @pi: port information structure
275 */
276static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
277{
278 struct ice_link_status *hw_link_info;
279
280 if (!pi)
281 return ICE_MEDIA_UNKNOWN;
282
283 hw_link_info = &pi->phy.link_info;
284 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
285 /* If more than one media type is selected, report unknown */
286 return ICE_MEDIA_UNKNOWN;
287
288 if (hw_link_info->phy_type_low) {
289 /* 1G SGMII is a special case where some DA cable PHYs
290 * may show this as an option when it really shouldn't
291 * be since SGMII is meant to be between a MAC and a PHY
292 * in a backplane. Try to detect this case and handle it
293 */
294 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
295 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
296 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
297 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
298 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
299 return ICE_MEDIA_DA;
300
301 switch (hw_link_info->phy_type_low) {
302 case ICE_PHY_TYPE_LOW_1000BASE_SX:
303 case ICE_PHY_TYPE_LOW_1000BASE_LX:
304 case ICE_PHY_TYPE_LOW_10GBASE_SR:
305 case ICE_PHY_TYPE_LOW_10GBASE_LR:
306 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
307 case ICE_PHY_TYPE_LOW_25GBASE_SR:
308 case ICE_PHY_TYPE_LOW_25GBASE_LR:
309 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
310 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
311 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
312 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
313 case ICE_PHY_TYPE_LOW_50GBASE_SR:
314 case ICE_PHY_TYPE_LOW_50GBASE_FR:
315 case ICE_PHY_TYPE_LOW_50GBASE_LR:
316 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
317 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
318 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
319 case ICE_PHY_TYPE_LOW_100GBASE_DR:
320 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
321 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
322 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
323 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
324 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
325 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
326 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
327 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
328 return ICE_MEDIA_FIBER;
329 case ICE_PHY_TYPE_LOW_100BASE_TX:
330 case ICE_PHY_TYPE_LOW_1000BASE_T:
331 case ICE_PHY_TYPE_LOW_2500BASE_T:
332 case ICE_PHY_TYPE_LOW_5GBASE_T:
333 case ICE_PHY_TYPE_LOW_10GBASE_T:
334 case ICE_PHY_TYPE_LOW_25GBASE_T:
335 return ICE_MEDIA_BASET;
336 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
337 case ICE_PHY_TYPE_LOW_25GBASE_CR:
338 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
339 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
340 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
341 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
342 case ICE_PHY_TYPE_LOW_50GBASE_CP:
343 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
344 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
345 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
346 return ICE_MEDIA_DA;
347 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
348 case ICE_PHY_TYPE_LOW_40G_XLAUI:
349 case ICE_PHY_TYPE_LOW_50G_LAUI2:
350 case ICE_PHY_TYPE_LOW_50G_AUI2:
351 case ICE_PHY_TYPE_LOW_50G_AUI1:
352 case ICE_PHY_TYPE_LOW_100G_AUI4:
353 case ICE_PHY_TYPE_LOW_100G_CAUI4:
354 if (ice_is_media_cage_present(pi))
355 return ICE_MEDIA_DA;
356 fallthrough;
357 case ICE_PHY_TYPE_LOW_1000BASE_KX:
358 case ICE_PHY_TYPE_LOW_2500BASE_KX:
359 case ICE_PHY_TYPE_LOW_2500BASE_X:
360 case ICE_PHY_TYPE_LOW_5GBASE_KR:
361 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
362 case ICE_PHY_TYPE_LOW_25GBASE_KR:
363 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
364 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
365 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
366 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
367 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
368 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
369 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
370 return ICE_MEDIA_BACKPLANE;
371 }
372 } else {
373 switch (hw_link_info->phy_type_high) {
374 case ICE_PHY_TYPE_HIGH_100G_AUI2:
375 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
376 if (ice_is_media_cage_present(pi))
377 return ICE_MEDIA_DA;
378 fallthrough;
379 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
380 return ICE_MEDIA_BACKPLANE;
381 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
382 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
383 return ICE_MEDIA_FIBER;
384 }
385 }
386 return ICE_MEDIA_UNKNOWN;
387}
388
389/**
390 * ice_aq_get_link_info
391 * @pi: port information structure
392 * @ena_lse: enable/disable LinkStatusEvent reporting
393 * @link: pointer to link status structure - optional
394 * @cd: pointer to command details structure or NULL
395 *
396 * Get Link Status (0x607). Returns the link status of the adapter.
397 */
398enum ice_status
399ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
400 struct ice_link_status *link, struct ice_sq_cd *cd)
401{
402 struct ice_aqc_get_link_status_data link_data = { 0 };
403 struct ice_aqc_get_link_status *resp;
404 struct ice_link_status *li_old, *li;
405 enum ice_media_type *hw_media_type;
406 struct ice_fc_info *hw_fc_info;
407 bool tx_pause, rx_pause;
408 struct ice_aq_desc desc;
409 enum ice_status status;
410 struct ice_hw *hw;
411 u16 cmd_flags;
412
413 if (!pi)
414 return ICE_ERR_PARAM;
415 hw = pi->hw;
416 li_old = &pi->phy.link_info_old;
417 hw_media_type = &pi->phy.media_type;
418 li = &pi->phy.link_info;
419 hw_fc_info = &pi->fc;
420
421 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
422 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
423 resp = &desc.params.get_link_status;
424 resp->cmd_flags = cpu_to_le16(cmd_flags);
425 resp->lport_num = pi->lport;
426
427 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
428
429 if (status)
430 return status;
431
432 /* save off old link status information */
433 *li_old = *li;
434
435 /* update current link status information */
436 li->link_speed = le16_to_cpu(link_data.link_speed);
437 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
438 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
439 *hw_media_type = ice_get_media_type(pi);
440 li->link_info = link_data.link_info;
441 li->link_cfg_err = link_data.link_cfg_err;
442 li->an_info = link_data.an_info;
443 li->ext_info = link_data.ext_info;
444 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
445 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
446 li->topo_media_conflict = link_data.topo_media_conflict;
447 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
448 ICE_AQ_CFG_PACING_TYPE_M);
449
450 /* update fc info */
451 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
452 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
453 if (tx_pause && rx_pause)
454 hw_fc_info->current_mode = ICE_FC_FULL;
455 else if (tx_pause)
456 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
457 else if (rx_pause)
458 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
459 else
460 hw_fc_info->current_mode = ICE_FC_NONE;
461
462 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
463
464 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
465 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
466 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
467 (unsigned long long)li->phy_type_low);
468 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
469 (unsigned long long)li->phy_type_high);
470 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
471 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
472 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
473 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
474 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
475 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
476 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
477 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
478 li->max_frame_size);
479 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
480
481 /* save link status information */
482 if (link)
483 *link = *li;
484
485 /* flag cleared so calling functions don't call AQ again */
486 pi->phy.get_link_info = false;
487
488 return 0;
489}
490
491/**
492 * ice_fill_tx_timer_and_fc_thresh
493 * @hw: pointer to the HW struct
494 * @cmd: pointer to MAC cfg structure
495 *
496 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
497 * descriptor
498 */
499static void
500ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
501 struct ice_aqc_set_mac_cfg *cmd)
502{
503 u16 fc_thres_val, tx_timer_val;
504 u32 val;
505
506 /* We read back the transmit timer and FC threshold value of
507 * LFC. Thus, we will use index =
508 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
509 *
510 * Also, because we are operating on transmit timer and FC
511 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
512 */
513#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
514
515 /* Retrieve the transmit timer */
516 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
517 tx_timer_val = val &
518 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
519 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
520
521 /* Retrieve the FC threshold */
522 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
523 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
524
525 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
526}
527
528/**
529 * ice_aq_set_mac_cfg
530 * @hw: pointer to the HW struct
531 * @max_frame_size: Maximum Frame Size to be supported
532 * @cd: pointer to command details structure or NULL
533 *
534 * Set MAC configuration (0x0603)
535 */
536enum ice_status
537ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
538{
539 struct ice_aqc_set_mac_cfg *cmd;
540 struct ice_aq_desc desc;
541
542 cmd = &desc.params.set_mac_cfg;
543
544 if (max_frame_size == 0)
545 return ICE_ERR_PARAM;
546
547 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
548
549 cmd->max_frame_size = cpu_to_le16(max_frame_size);
550
551 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
552
553 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
554}
555
556/**
557 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
558 * @hw: pointer to the HW struct
559 */
560static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
561{
562 struct ice_switch_info *sw;
563 enum ice_status status;
564
565 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
566 sizeof(*hw->switch_info), GFP_KERNEL);
567 sw = hw->switch_info;
568
569 if (!sw)
570 return ICE_ERR_NO_MEMORY;
571
572 INIT_LIST_HEAD(&sw->vsi_list_map_head);
573
574 status = ice_init_def_sw_recp(hw);
575 if (status) {
576 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
577 return status;
578 }
579 return 0;
580}
581
582/**
583 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
584 * @hw: pointer to the HW struct
585 */
586static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
587{
588 struct ice_switch_info *sw = hw->switch_info;
589 struct ice_vsi_list_map_info *v_pos_map;
590 struct ice_vsi_list_map_info *v_tmp_map;
591 struct ice_sw_recipe *recps;
592 u8 i;
593
594 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
595 list_entry) {
596 list_del(&v_pos_map->list_entry);
597 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
598 }
599 recps = hw->switch_info->recp_list;
600 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
601 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
602
603 recps[i].root_rid = i;
604 mutex_destroy(&recps[i].filt_rule_lock);
605 list_for_each_entry_safe(lst_itr, tmp_entry,
606 &recps[i].filt_rules, list_entry) {
607 list_del(&lst_itr->list_entry);
608 devm_kfree(ice_hw_to_dev(hw), lst_itr);
609 }
610 }
611 ice_rm_all_sw_replay_rule_info(hw);
612 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
613 devm_kfree(ice_hw_to_dev(hw), sw);
614}
615
616/**
617 * ice_get_fw_log_cfg - get FW logging configuration
618 * @hw: pointer to the HW struct
619 */
620static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
621{
622 struct ice_aq_desc desc;
623 enum ice_status status;
624 __le16 *config;
625 u16 size;
626
627 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
628 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
629 if (!config)
630 return ICE_ERR_NO_MEMORY;
631
632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
633
634 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
635 if (!status) {
636 u16 i;
637
638 /* Save FW logging information into the HW structure */
639 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
640 u16 v, m, flgs;
641
642 v = le16_to_cpu(config[i]);
643 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
644 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
645
646 if (m < ICE_AQC_FW_LOG_ID_MAX)
647 hw->fw_log.evnts[m].cur = flgs;
648 }
649 }
650
651 devm_kfree(ice_hw_to_dev(hw), config);
652
653 return status;
654}
655
656/**
657 * ice_cfg_fw_log - configure FW logging
658 * @hw: pointer to the HW struct
659 * @enable: enable certain FW logging events if true, disable all if false
660 *
661 * This function enables/disables the FW logging via Rx CQ events and a UART
662 * port based on predetermined configurations. FW logging via the Rx CQ can be
663 * enabled/disabled for individual PF's. However, FW logging via the UART can
664 * only be enabled/disabled for all PFs on the same device.
665 *
666 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
667 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
668 * before initializing the device.
669 *
670 * When re/configuring FW logging, callers need to update the "cfg" elements of
671 * the hw->fw_log.evnts array with the desired logging event configurations for
672 * modules of interest. When disabling FW logging completely, the callers can
673 * just pass false in the "enable" parameter. On completion, the function will
674 * update the "cur" element of the hw->fw_log.evnts array with the resulting
675 * logging event configurations of the modules that are being re/configured. FW
676 * logging modules that are not part of a reconfiguration operation retain their
677 * previous states.
678 *
679 * Before resetting the device, it is recommended that the driver disables FW
680 * logging before shutting down the control queue. When disabling FW logging
681 * ("enable" = false), the latest configurations of FW logging events stored in
682 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
683 * a device reset.
684 *
685 * When enabling FW logging to emit log messages via the Rx CQ during the
686 * device's initialization phase, a mechanism alternative to interrupt handlers
687 * needs to be used to extract FW log messages from the Rx CQ periodically and
688 * to prevent the Rx CQ from being full and stalling other types of control
689 * messages from FW to SW. Interrupts are typically disabled during the device's
690 * initialization phase.
691 */
692static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
693{
694 struct ice_aqc_fw_logging *cmd;
695 enum ice_status status = 0;
696 u16 i, chgs = 0, len = 0;
697 struct ice_aq_desc desc;
698 __le16 *data = NULL;
699 u8 actv_evnts = 0;
700 void *buf = NULL;
701
702 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
703 return 0;
704
705 /* Disable FW logging only when the control queue is still responsive */
706 if (!enable &&
707 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
708 return 0;
709
710 /* Get current FW log settings */
711 status = ice_get_fw_log_cfg(hw);
712 if (status)
713 return status;
714
715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
716 cmd = &desc.params.fw_logging;
717
718 /* Indicate which controls are valid */
719 if (hw->fw_log.cq_en)
720 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
721
722 if (hw->fw_log.uart_en)
723 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
724
725 if (enable) {
726 /* Fill in an array of entries with FW logging modules and
727 * logging events being reconfigured.
728 */
729 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
730 u16 val;
731
732 /* Keep track of enabled event types */
733 actv_evnts |= hw->fw_log.evnts[i].cfg;
734
735 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
736 continue;
737
738 if (!data) {
739 data = devm_kcalloc(ice_hw_to_dev(hw),
740 ICE_AQC_FW_LOG_ID_MAX,
741 sizeof(*data),
742 GFP_KERNEL);
743 if (!data)
744 return ICE_ERR_NO_MEMORY;
745 }
746
747 val = i << ICE_AQC_FW_LOG_ID_S;
748 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
749 data[chgs++] = cpu_to_le16(val);
750 }
751
752 /* Only enable FW logging if at least one module is specified.
753 * If FW logging is currently enabled but all modules are not
754 * enabled to emit log messages, disable FW logging altogether.
755 */
756 if (actv_evnts) {
757 /* Leave if there is effectively no change */
758 if (!chgs)
759 goto out;
760
761 if (hw->fw_log.cq_en)
762 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
763
764 if (hw->fw_log.uart_en)
765 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
766
767 buf = data;
768 len = sizeof(*data) * chgs;
769 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
770 }
771 }
772
773 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
774 if (!status) {
775 /* Update the current configuration to reflect events enabled.
776 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
777 * logging mode is enabled for the device. They do not reflect
778 * actual modules being enabled to emit log messages. So, their
779 * values remain unchanged even when all modules are disabled.
780 */
781 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
782
783 hw->fw_log.actv_evnts = actv_evnts;
784 for (i = 0; i < cnt; i++) {
785 u16 v, m;
786
787 if (!enable) {
788 /* When disabling all FW logging events as part
789 * of device's de-initialization, the original
790 * configurations are retained, and can be used
791 * to reconfigure FW logging later if the device
792 * is re-initialized.
793 */
794 hw->fw_log.evnts[i].cur = 0;
795 continue;
796 }
797
798 v = le16_to_cpu(data[i]);
799 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
800 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
801 }
802 }
803
804out:
805 if (data)
806 devm_kfree(ice_hw_to_dev(hw), data);
807
808 return status;
809}
810
811/**
812 * ice_output_fw_log
813 * @hw: pointer to the HW struct
814 * @desc: pointer to the AQ message descriptor
815 * @buf: pointer to the buffer accompanying the AQ message
816 *
817 * Formats a FW Log message and outputs it via the standard driver logs.
818 */
819void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
820{
821 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
822 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
823 le16_to_cpu(desc->datalen));
824 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
825}
826
827/**
828 * ice_get_itr_intrl_gran
829 * @hw: pointer to the HW struct
830 *
831 * Determines the ITR/INTRL granularities based on the maximum aggregate
832 * bandwidth according to the device's configuration during power-on.
833 */
834static void ice_get_itr_intrl_gran(struct ice_hw *hw)
835{
836 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
837 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
838 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
839
840 switch (max_agg_bw) {
841 case ICE_MAX_AGG_BW_200G:
842 case ICE_MAX_AGG_BW_100G:
843 case ICE_MAX_AGG_BW_50G:
844 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
845 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
846 break;
847 case ICE_MAX_AGG_BW_25G:
848 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
849 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
850 break;
851 }
852}
853
854/**
855 * ice_init_hw - main hardware initialization routine
856 * @hw: pointer to the hardware structure
857 */
858enum ice_status ice_init_hw(struct ice_hw *hw)
859{
860 struct ice_aqc_get_phy_caps_data *pcaps;
861 enum ice_status status;
862 u16 mac_buf_len;
863 void *mac_buf;
864
865 /* Set MAC type based on DeviceID */
866 status = ice_set_mac_type(hw);
867 if (status)
868 return status;
869
870 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
871 PF_FUNC_RID_FUNC_NUM_M) >>
872 PF_FUNC_RID_FUNC_NUM_S;
873
874 status = ice_reset(hw, ICE_RESET_PFR);
875 if (status)
876 return status;
877
878 ice_get_itr_intrl_gran(hw);
879
880 status = ice_create_all_ctrlq(hw);
881 if (status)
882 goto err_unroll_cqinit;
883
884 /* Enable FW logging. Not fatal if this fails. */
885 status = ice_cfg_fw_log(hw, true);
886 if (status)
887 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
888
889 status = ice_clear_pf_cfg(hw);
890 if (status)
891 goto err_unroll_cqinit;
892
893 /* Set bit to enable Flow Director filters */
894 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
895 INIT_LIST_HEAD(&hw->fdir_list_head);
896
897 ice_clear_pxe_mode(hw);
898
899 status = ice_init_nvm(hw);
900 if (status)
901 goto err_unroll_cqinit;
902
903 status = ice_get_caps(hw);
904 if (status)
905 goto err_unroll_cqinit;
906
907 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
908 sizeof(*hw->port_info), GFP_KERNEL);
909 if (!hw->port_info) {
910 status = ICE_ERR_NO_MEMORY;
911 goto err_unroll_cqinit;
912 }
913
914 /* set the back pointer to HW */
915 hw->port_info->hw = hw;
916
917 /* Initialize port_info struct with switch configuration data */
918 status = ice_get_initial_sw_cfg(hw);
919 if (status)
920 goto err_unroll_alloc;
921
922 hw->evb_veb = true;
923
924 /* Query the allocated resources for Tx scheduler */
925 status = ice_sched_query_res_alloc(hw);
926 if (status) {
927 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
928 goto err_unroll_alloc;
929 }
930 ice_sched_get_psm_clk_freq(hw);
931
932 /* Initialize port_info struct with scheduler data */
933 status = ice_sched_init_port(hw->port_info);
934 if (status)
935 goto err_unroll_sched;
936
937 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
938 if (!pcaps) {
939 status = ICE_ERR_NO_MEMORY;
940 goto err_unroll_sched;
941 }
942
943 /* Initialize port_info struct with PHY capabilities */
944 status = ice_aq_get_phy_caps(hw->port_info, false,
945 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
946 NULL);
947 devm_kfree(ice_hw_to_dev(hw), pcaps);
948 if (status)
949 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
950 status);
951
952 /* Initialize port_info struct with link information */
953 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
954 if (status)
955 goto err_unroll_sched;
956
957 /* need a valid SW entry point to build a Tx tree */
958 if (!hw->sw_entry_point_layer) {
959 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
960 status = ICE_ERR_CFG;
961 goto err_unroll_sched;
962 }
963 INIT_LIST_HEAD(&hw->agg_list);
964 /* Initialize max burst size */
965 if (!hw->max_burst_size)
966 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
967
968 status = ice_init_fltr_mgmt_struct(hw);
969 if (status)
970 goto err_unroll_sched;
971
972 /* Get MAC information */
973 /* A single port can report up to two (LAN and WoL) addresses */
974 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
975 sizeof(struct ice_aqc_manage_mac_read_resp),
976 GFP_KERNEL);
977 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
978
979 if (!mac_buf) {
980 status = ICE_ERR_NO_MEMORY;
981 goto err_unroll_fltr_mgmt_struct;
982 }
983
984 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
985 devm_kfree(ice_hw_to_dev(hw), mac_buf);
986
987 if (status)
988 goto err_unroll_fltr_mgmt_struct;
989 /* enable jumbo frame support at MAC level */
990 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
991 if (status)
992 goto err_unroll_fltr_mgmt_struct;
993 /* Obtain counter base index which would be used by flow director */
994 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
995 if (status)
996 goto err_unroll_fltr_mgmt_struct;
997 status = ice_init_hw_tbls(hw);
998 if (status)
999 goto err_unroll_fltr_mgmt_struct;
1000 mutex_init(&hw->tnl_lock);
1001 return 0;
1002
1003err_unroll_fltr_mgmt_struct:
1004 ice_cleanup_fltr_mgmt_struct(hw);
1005err_unroll_sched:
1006 ice_sched_cleanup_all(hw);
1007err_unroll_alloc:
1008 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1009err_unroll_cqinit:
1010 ice_destroy_all_ctrlq(hw);
1011 return status;
1012}
1013
1014/**
1015 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1016 * @hw: pointer to the hardware structure
1017 *
1018 * This should be called only during nominal operation, not as a result of
1019 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1020 * applicable initializations if it fails for any reason.
1021 */
1022void ice_deinit_hw(struct ice_hw *hw)
1023{
1024 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1025 ice_cleanup_fltr_mgmt_struct(hw);
1026
1027 ice_sched_cleanup_all(hw);
1028 ice_sched_clear_agg(hw);
1029 ice_free_seg(hw);
1030 ice_free_hw_tbls(hw);
1031 mutex_destroy(&hw->tnl_lock);
1032
1033 if (hw->port_info) {
1034 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1035 hw->port_info = NULL;
1036 }
1037
1038 /* Attempt to disable FW logging before shutting down control queues */
1039 ice_cfg_fw_log(hw, false);
1040 ice_destroy_all_ctrlq(hw);
1041
1042 /* Clear VSI contexts if not already cleared */
1043 ice_clear_all_vsi_ctx(hw);
1044}
1045
1046/**
1047 * ice_check_reset - Check to see if a global reset is complete
1048 * @hw: pointer to the hardware structure
1049 */
1050enum ice_status ice_check_reset(struct ice_hw *hw)
1051{
1052 u32 cnt, reg = 0, grst_timeout, uld_mask;
1053
1054 /* Poll for Device Active state in case a recent CORER, GLOBR,
1055 * or EMPR has occurred. The grst delay value is in 100ms units.
1056 * Add 1sec for outstanding AQ commands that can take a long time.
1057 */
1058 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1059 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1060
1061 for (cnt = 0; cnt < grst_timeout; cnt++) {
1062 mdelay(100);
1063 reg = rd32(hw, GLGEN_RSTAT);
1064 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1065 break;
1066 }
1067
1068 if (cnt == grst_timeout) {
1069 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1070 return ICE_ERR_RESET_FAILED;
1071 }
1072
1073#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1074 GLNVM_ULD_PCIER_DONE_1_M |\
1075 GLNVM_ULD_CORER_DONE_M |\
1076 GLNVM_ULD_GLOBR_DONE_M |\
1077 GLNVM_ULD_POR_DONE_M |\
1078 GLNVM_ULD_POR_DONE_1_M |\
1079 GLNVM_ULD_PCIER_DONE_2_M)
1080
1081 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1082 GLNVM_ULD_PE_DONE_M : 0);
1083
1084 /* Device is Active; check Global Reset processes are done */
1085 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1086 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1087 if (reg == uld_mask) {
1088 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1089 break;
1090 }
1091 mdelay(10);
1092 }
1093
1094 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1095 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1096 reg);
1097 return ICE_ERR_RESET_FAILED;
1098 }
1099
1100 return 0;
1101}
1102
1103/**
1104 * ice_pf_reset - Reset the PF
1105 * @hw: pointer to the hardware structure
1106 *
1107 * If a global reset has been triggered, this function checks
1108 * for its completion and then issues the PF reset
1109 */
1110static enum ice_status ice_pf_reset(struct ice_hw *hw)
1111{
1112 u32 cnt, reg;
1113
1114 /* If at function entry a global reset was already in progress, i.e.
1115 * state is not 'device active' or any of the reset done bits are not
1116 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1117 * global reset is done.
1118 */
1119 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1120 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1121 /* poll on global reset currently in progress until done */
1122 if (ice_check_reset(hw))
1123 return ICE_ERR_RESET_FAILED;
1124
1125 return 0;
1126 }
1127
1128 /* Reset the PF */
1129 reg = rd32(hw, PFGEN_CTRL);
1130
1131 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1132
1133 /* Wait for the PFR to complete. The wait time is the global config lock
1134 * timeout plus the PFR timeout which will account for a possible reset
1135 * that is occurring during a download package operation.
1136 */
1137 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1138 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1139 reg = rd32(hw, PFGEN_CTRL);
1140 if (!(reg & PFGEN_CTRL_PFSWR_M))
1141 break;
1142
1143 mdelay(1);
1144 }
1145
1146 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1147 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1148 return ICE_ERR_RESET_FAILED;
1149 }
1150
1151 return 0;
1152}
1153
1154/**
1155 * ice_reset - Perform different types of reset
1156 * @hw: pointer to the hardware structure
1157 * @req: reset request
1158 *
1159 * This function triggers a reset as specified by the req parameter.
1160 *
1161 * Note:
1162 * If anything other than a PF reset is triggered, PXE mode is restored.
1163 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1164 * interface has been restored in the rebuild flow.
1165 */
1166enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1167{
1168 u32 val = 0;
1169
1170 switch (req) {
1171 case ICE_RESET_PFR:
1172 return ice_pf_reset(hw);
1173 case ICE_RESET_CORER:
1174 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1175 val = GLGEN_RTRIG_CORER_M;
1176 break;
1177 case ICE_RESET_GLOBR:
1178 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1179 val = GLGEN_RTRIG_GLOBR_M;
1180 break;
1181 default:
1182 return ICE_ERR_PARAM;
1183 }
1184
1185 val |= rd32(hw, GLGEN_RTRIG);
1186 wr32(hw, GLGEN_RTRIG, val);
1187 ice_flush(hw);
1188
1189 /* wait for the FW to be ready */
1190 return ice_check_reset(hw);
1191}
1192
1193/**
1194 * ice_copy_rxq_ctx_to_hw
1195 * @hw: pointer to the hardware structure
1196 * @ice_rxq_ctx: pointer to the rxq context
1197 * @rxq_index: the index of the Rx queue
1198 *
1199 * Copies rxq context from dense structure to HW register space
1200 */
1201static enum ice_status
1202ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1203{
1204 u8 i;
1205
1206 if (!ice_rxq_ctx)
1207 return ICE_ERR_BAD_PTR;
1208
1209 if (rxq_index > QRX_CTRL_MAX_INDEX)
1210 return ICE_ERR_PARAM;
1211
1212 /* Copy each dword separately to HW */
1213 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1214 wr32(hw, QRX_CONTEXT(i, rxq_index),
1215 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1216
1217 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1218 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1219 }
1220
1221 return 0;
1222}
1223
1224/* LAN Rx Queue Context */
1225static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1226 /* Field Width LSB */
1227 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1228 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1229 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1230 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1231 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1232 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1233 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1234 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1235 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1236 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1237 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1238 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1239 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1240 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1241 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1242 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1243 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1244 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1245 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1246 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1247 { 0 }
1248};
1249
1250/**
1251 * ice_write_rxq_ctx
1252 * @hw: pointer to the hardware structure
1253 * @rlan_ctx: pointer to the rxq context
1254 * @rxq_index: the index of the Rx queue
1255 *
1256 * Converts rxq context from sparse to dense structure and then writes
1257 * it to HW register space and enables the hardware to prefetch descriptors
1258 * instead of only fetching them on demand
1259 */
1260enum ice_status
1261ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1262 u32 rxq_index)
1263{
1264 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1265
1266 if (!rlan_ctx)
1267 return ICE_ERR_BAD_PTR;
1268
1269 rlan_ctx->prefena = 1;
1270
1271 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1272 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1273}
1274
1275/* LAN Tx Queue Context */
1276const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1277 /* Field Width LSB */
1278 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1279 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1280 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1281 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1282 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1283 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1284 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1285 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1286 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1287 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1288 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1289 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1290 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1291 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1292 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1293 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1294 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1295 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1296 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1297 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1298 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1299 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1300 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1301 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1302 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1303 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1304 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1305 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1306 { 0 }
1307};
1308
1309/* Sideband Queue command wrappers */
1310
1311/**
1312 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1313 * @hw: pointer to the HW struct
1314 * @desc: descriptor describing the command
1315 * @buf: buffer to use for indirect commands (NULL for direct commands)
1316 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1317 * @cd: pointer to command details structure
1318 */
1319static int
1320ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1321 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1322{
1323 return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
1324 (struct ice_aq_desc *)desc,
1325 buf, buf_size, cd));
1326}
1327
1328/**
1329 * ice_sbq_rw_reg - Fill Sideband Queue command
1330 * @hw: pointer to the HW struct
1331 * @in: message info to be filled in descriptor
1332 */
1333int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1334{
1335 struct ice_sbq_cmd_desc desc = {0};
1336 struct ice_sbq_msg_req msg = {0};
1337 u16 msg_len;
1338 int status;
1339
1340 msg_len = sizeof(msg);
1341
1342 msg.dest_dev = in->dest_dev;
1343 msg.opcode = in->opcode;
1344 msg.flags = ICE_SBQ_MSG_FLAGS;
1345 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1346 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1347 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1348
1349 if (in->opcode)
1350 msg.data = cpu_to_le32(in->data);
1351 else
1352 /* data read comes back in completion, so shorten the struct by
1353 * sizeof(msg.data)
1354 */
1355 msg_len -= sizeof(msg.data);
1356
1357 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1358 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1359 desc.param0.cmd_len = cpu_to_le16(msg_len);
1360 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1361 if (!status && !in->opcode)
1362 in->data = le32_to_cpu
1363 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1364 return status;
1365}
1366
1367/* FW Admin Queue command wrappers */
1368
1369/* Software lock/mutex that is meant to be held while the Global Config Lock
1370 * in firmware is acquired by the software to prevent most (but not all) types
1371 * of AQ commands from being sent to FW
1372 */
1373DEFINE_MUTEX(ice_global_cfg_lock_sw);
1374
1375/**
1376 * ice_should_retry_sq_send_cmd
1377 * @opcode: AQ opcode
1378 *
1379 * Decide if we should retry the send command routine for the ATQ, depending
1380 * on the opcode.
1381 */
1382static bool ice_should_retry_sq_send_cmd(u16 opcode)
1383{
1384 switch (opcode) {
1385 case ice_aqc_opc_get_link_topo:
1386 case ice_aqc_opc_lldp_stop:
1387 case ice_aqc_opc_lldp_start:
1388 case ice_aqc_opc_lldp_filter_ctrl:
1389 return true;
1390 }
1391
1392 return false;
1393}
1394
1395/**
1396 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1397 * @hw: pointer to the HW struct
1398 * @cq: pointer to the specific Control queue
1399 * @desc: prefilled descriptor describing the command
1400 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1401 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1402 * @cd: pointer to command details structure
1403 *
1404 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1405 * Queue if the EBUSY AQ error is returned.
1406 */
1407static enum ice_status
1408ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1409 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1410 struct ice_sq_cd *cd)
1411{
1412 struct ice_aq_desc desc_cpy;
1413 enum ice_status status;
1414 bool is_cmd_for_retry;
1415 u8 *buf_cpy = NULL;
1416 u8 idx = 0;
1417 u16 opcode;
1418
1419 opcode = le16_to_cpu(desc->opcode);
1420 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1421 memset(&desc_cpy, 0, sizeof(desc_cpy));
1422
1423 if (is_cmd_for_retry) {
1424 if (buf) {
1425 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1426 if (!buf_cpy)
1427 return ICE_ERR_NO_MEMORY;
1428 }
1429
1430 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1431 }
1432
1433 do {
1434 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1435
1436 if (!is_cmd_for_retry || !status ||
1437 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1438 break;
1439
1440 if (buf_cpy)
1441 memcpy(buf, buf_cpy, buf_size);
1442
1443 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1444
1445 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1446
1447 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1448
1449 kfree(buf_cpy);
1450
1451 return status;
1452}
1453
1454/**
1455 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1456 * @hw: pointer to the HW struct
1457 * @desc: descriptor describing the command
1458 * @buf: buffer to use for indirect commands (NULL for direct commands)
1459 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1460 * @cd: pointer to command details structure
1461 *
1462 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1463 */
1464enum ice_status
1465ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1466 u16 buf_size, struct ice_sq_cd *cd)
1467{
1468 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1469 bool lock_acquired = false;
1470 enum ice_status status;
1471
1472 /* When a package download is in process (i.e. when the firmware's
1473 * Global Configuration Lock resource is held), only the Download
1474 * Package, Get Version, Get Package Info List and Release Resource
1475 * (with resource ID set to Global Config Lock) AdminQ commands are
1476 * allowed; all others must block until the package download completes
1477 * and the Global Config Lock is released. See also
1478 * ice_acquire_global_cfg_lock().
1479 */
1480 switch (le16_to_cpu(desc->opcode)) {
1481 case ice_aqc_opc_download_pkg:
1482 case ice_aqc_opc_get_pkg_info_list:
1483 case ice_aqc_opc_get_ver:
1484 break;
1485 case ice_aqc_opc_release_res:
1486 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1487 break;
1488 fallthrough;
1489 default:
1490 mutex_lock(&ice_global_cfg_lock_sw);
1491 lock_acquired = true;
1492 break;
1493 }
1494
1495 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1496 if (lock_acquired)
1497 mutex_unlock(&ice_global_cfg_lock_sw);
1498
1499 return status;
1500}
1501
1502/**
1503 * ice_aq_get_fw_ver
1504 * @hw: pointer to the HW struct
1505 * @cd: pointer to command details structure or NULL
1506 *
1507 * Get the firmware version (0x0001) from the admin queue commands
1508 */
1509enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1510{
1511 struct ice_aqc_get_ver *resp;
1512 struct ice_aq_desc desc;
1513 enum ice_status status;
1514
1515 resp = &desc.params.get_ver;
1516
1517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1518
1519 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1520
1521 if (!status) {
1522 hw->fw_branch = resp->fw_branch;
1523 hw->fw_maj_ver = resp->fw_major;
1524 hw->fw_min_ver = resp->fw_minor;
1525 hw->fw_patch = resp->fw_patch;
1526 hw->fw_build = le32_to_cpu(resp->fw_build);
1527 hw->api_branch = resp->api_branch;
1528 hw->api_maj_ver = resp->api_major;
1529 hw->api_min_ver = resp->api_minor;
1530 hw->api_patch = resp->api_patch;
1531 }
1532
1533 return status;
1534}
1535
1536/**
1537 * ice_aq_send_driver_ver
1538 * @hw: pointer to the HW struct
1539 * @dv: driver's major, minor version
1540 * @cd: pointer to command details structure or NULL
1541 *
1542 * Send the driver version (0x0002) to the firmware
1543 */
1544enum ice_status
1545ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1546 struct ice_sq_cd *cd)
1547{
1548 struct ice_aqc_driver_ver *cmd;
1549 struct ice_aq_desc desc;
1550 u16 len;
1551
1552 cmd = &desc.params.driver_ver;
1553
1554 if (!dv)
1555 return ICE_ERR_PARAM;
1556
1557 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1558
1559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1560 cmd->major_ver = dv->major_ver;
1561 cmd->minor_ver = dv->minor_ver;
1562 cmd->build_ver = dv->build_ver;
1563 cmd->subbuild_ver = dv->subbuild_ver;
1564
1565 len = 0;
1566 while (len < sizeof(dv->driver_string) &&
1567 isascii(dv->driver_string[len]) && dv->driver_string[len])
1568 len++;
1569
1570 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1571}
1572
1573/**
1574 * ice_aq_q_shutdown
1575 * @hw: pointer to the HW struct
1576 * @unloading: is the driver unloading itself
1577 *
1578 * Tell the Firmware that we're shutting down the AdminQ and whether
1579 * or not the driver is unloading as well (0x0003).
1580 */
1581enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1582{
1583 struct ice_aqc_q_shutdown *cmd;
1584 struct ice_aq_desc desc;
1585
1586 cmd = &desc.params.q_shutdown;
1587
1588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1589
1590 if (unloading)
1591 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1592
1593 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1594}
1595
1596/**
1597 * ice_aq_req_res
1598 * @hw: pointer to the HW struct
1599 * @res: resource ID
1600 * @access: access type
1601 * @sdp_number: resource number
1602 * @timeout: the maximum time in ms that the driver may hold the resource
1603 * @cd: pointer to command details structure or NULL
1604 *
1605 * Requests common resource using the admin queue commands (0x0008).
1606 * When attempting to acquire the Global Config Lock, the driver can
1607 * learn of three states:
1608 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1609 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1610 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1611 * successfully downloaded the package; the driver does
1612 * not have to download the package and can continue
1613 * loading
1614 *
1615 * Note that if the caller is in an acquire lock, perform action, release lock
1616 * phase of operation, it is possible that the FW may detect a timeout and issue
1617 * a CORER. In this case, the driver will receive a CORER interrupt and will
1618 * have to determine its cause. The calling thread that is handling this flow
1619 * will likely get an error propagated back to it indicating the Download
1620 * Package, Update Package or the Release Resource AQ commands timed out.
1621 */
1622static enum ice_status
1623ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1624 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1625 struct ice_sq_cd *cd)
1626{
1627 struct ice_aqc_req_res *cmd_resp;
1628 struct ice_aq_desc desc;
1629 enum ice_status status;
1630
1631 cmd_resp = &desc.params.res_owner;
1632
1633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1634
1635 cmd_resp->res_id = cpu_to_le16(res);
1636 cmd_resp->access_type = cpu_to_le16(access);
1637 cmd_resp->res_number = cpu_to_le32(sdp_number);
1638 cmd_resp->timeout = cpu_to_le32(*timeout);
1639 *timeout = 0;
1640
1641 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1642
1643 /* The completion specifies the maximum time in ms that the driver
1644 * may hold the resource in the Timeout field.
1645 */
1646
1647 /* Global config lock response utilizes an additional status field.
1648 *
1649 * If the Global config lock resource is held by some other driver, the
1650 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1651 * and the timeout field indicates the maximum time the current owner
1652 * of the resource has to free it.
1653 */
1654 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1655 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1656 *timeout = le32_to_cpu(cmd_resp->timeout);
1657 return 0;
1658 } else if (le16_to_cpu(cmd_resp->status) ==
1659 ICE_AQ_RES_GLBL_IN_PROG) {
1660 *timeout = le32_to_cpu(cmd_resp->timeout);
1661 return ICE_ERR_AQ_ERROR;
1662 } else if (le16_to_cpu(cmd_resp->status) ==
1663 ICE_AQ_RES_GLBL_DONE) {
1664 return ICE_ERR_AQ_NO_WORK;
1665 }
1666
1667 /* invalid FW response, force a timeout immediately */
1668 *timeout = 0;
1669 return ICE_ERR_AQ_ERROR;
1670 }
1671
1672 /* If the resource is held by some other driver, the command completes
1673 * with a busy return value and the timeout field indicates the maximum
1674 * time the current owner of the resource has to free it.
1675 */
1676 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1677 *timeout = le32_to_cpu(cmd_resp->timeout);
1678
1679 return status;
1680}
1681
1682/**
1683 * ice_aq_release_res
1684 * @hw: pointer to the HW struct
1685 * @res: resource ID
1686 * @sdp_number: resource number
1687 * @cd: pointer to command details structure or NULL
1688 *
1689 * release common resource using the admin queue commands (0x0009)
1690 */
1691static enum ice_status
1692ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1693 struct ice_sq_cd *cd)
1694{
1695 struct ice_aqc_req_res *cmd;
1696 struct ice_aq_desc desc;
1697
1698 cmd = &desc.params.res_owner;
1699
1700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1701
1702 cmd->res_id = cpu_to_le16(res);
1703 cmd->res_number = cpu_to_le32(sdp_number);
1704
1705 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1706}
1707
1708/**
1709 * ice_acquire_res
1710 * @hw: pointer to the HW structure
1711 * @res: resource ID
1712 * @access: access type (read or write)
1713 * @timeout: timeout in milliseconds
1714 *
1715 * This function will attempt to acquire the ownership of a resource.
1716 */
1717enum ice_status
1718ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1719 enum ice_aq_res_access_type access, u32 timeout)
1720{
1721#define ICE_RES_POLLING_DELAY_MS 10
1722 u32 delay = ICE_RES_POLLING_DELAY_MS;
1723 u32 time_left = timeout;
1724 enum ice_status status;
1725
1726 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1727
1728 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1729 * previously acquired the resource and performed any necessary updates;
1730 * in this case the caller does not obtain the resource and has no
1731 * further work to do.
1732 */
1733 if (status == ICE_ERR_AQ_NO_WORK)
1734 goto ice_acquire_res_exit;
1735
1736 if (status)
1737 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1738
1739 /* If necessary, poll until the current lock owner timeouts */
1740 timeout = time_left;
1741 while (status && timeout && time_left) {
1742 mdelay(delay);
1743 timeout = (timeout > delay) ? timeout - delay : 0;
1744 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1745
1746 if (status == ICE_ERR_AQ_NO_WORK)
1747 /* lock free, but no work to do */
1748 break;
1749
1750 if (!status)
1751 /* lock acquired */
1752 break;
1753 }
1754 if (status && status != ICE_ERR_AQ_NO_WORK)
1755 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1756
1757ice_acquire_res_exit:
1758 if (status == ICE_ERR_AQ_NO_WORK) {
1759 if (access == ICE_RES_WRITE)
1760 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1761 else
1762 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1763 }
1764 return status;
1765}
1766
1767/**
1768 * ice_release_res
1769 * @hw: pointer to the HW structure
1770 * @res: resource ID
1771 *
1772 * This function will release a resource using the proper Admin Command.
1773 */
1774void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1775{
1776 enum ice_status status;
1777 u32 total_delay = 0;
1778
1779 status = ice_aq_release_res(hw, res, 0, NULL);
1780
1781 /* there are some rare cases when trying to release the resource
1782 * results in an admin queue timeout, so handle them correctly
1783 */
1784 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1785 (total_delay < hw->adminq.sq_cmd_timeout)) {
1786 mdelay(1);
1787 status = ice_aq_release_res(hw, res, 0, NULL);
1788 total_delay++;
1789 }
1790}
1791
1792/**
1793 * ice_aq_alloc_free_res - command to allocate/free resources
1794 * @hw: pointer to the HW struct
1795 * @num_entries: number of resource entries in buffer
1796 * @buf: Indirect buffer to hold data parameters and response
1797 * @buf_size: size of buffer for indirect commands
1798 * @opc: pass in the command opcode
1799 * @cd: pointer to command details structure or NULL
1800 *
1801 * Helper function to allocate/free resources using the admin queue commands
1802 */
1803enum ice_status
1804ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1805 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1806 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1807{
1808 struct ice_aqc_alloc_free_res_cmd *cmd;
1809 struct ice_aq_desc desc;
1810
1811 cmd = &desc.params.sw_res_ctrl;
1812
1813 if (!buf)
1814 return ICE_ERR_PARAM;
1815
1816 if (buf_size < flex_array_size(buf, elem, num_entries))
1817 return ICE_ERR_PARAM;
1818
1819 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1820
1821 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1822
1823 cmd->num_entries = cpu_to_le16(num_entries);
1824
1825 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1826}
1827
1828/**
1829 * ice_alloc_hw_res - allocate resource
1830 * @hw: pointer to the HW struct
1831 * @type: type of resource
1832 * @num: number of resources to allocate
1833 * @btm: allocate from bottom
1834 * @res: pointer to array that will receive the resources
1835 */
1836enum ice_status
1837ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1838{
1839 struct ice_aqc_alloc_free_res_elem *buf;
1840 enum ice_status status;
1841 u16 buf_len;
1842
1843 buf_len = struct_size(buf, elem, num);
1844 buf = kzalloc(buf_len, GFP_KERNEL);
1845 if (!buf)
1846 return ICE_ERR_NO_MEMORY;
1847
1848 /* Prepare buffer to allocate resource. */
1849 buf->num_elems = cpu_to_le16(num);
1850 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1851 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1852 if (btm)
1853 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1854
1855 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1856 ice_aqc_opc_alloc_res, NULL);
1857 if (status)
1858 goto ice_alloc_res_exit;
1859
1860 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1861
1862ice_alloc_res_exit:
1863 kfree(buf);
1864 return status;
1865}
1866
1867/**
1868 * ice_free_hw_res - free allocated HW resource
1869 * @hw: pointer to the HW struct
1870 * @type: type of resource to free
1871 * @num: number of resources
1872 * @res: pointer to array that contains the resources to free
1873 */
1874enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1875{
1876 struct ice_aqc_alloc_free_res_elem *buf;
1877 enum ice_status status;
1878 u16 buf_len;
1879
1880 buf_len = struct_size(buf, elem, num);
1881 buf = kzalloc(buf_len, GFP_KERNEL);
1882 if (!buf)
1883 return ICE_ERR_NO_MEMORY;
1884
1885 /* Prepare buffer to free resource. */
1886 buf->num_elems = cpu_to_le16(num);
1887 buf->res_type = cpu_to_le16(type);
1888 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1889
1890 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1891 ice_aqc_opc_free_res, NULL);
1892 if (status)
1893 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1894
1895 kfree(buf);
1896 return status;
1897}
1898
1899/**
1900 * ice_get_num_per_func - determine number of resources per PF
1901 * @hw: pointer to the HW structure
1902 * @max: value to be evenly split between each PF
1903 *
1904 * Determine the number of valid functions by going through the bitmap returned
1905 * from parsing capabilities and use this to calculate the number of resources
1906 * per PF based on the max value passed in.
1907 */
1908static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1909{
1910 u8 funcs;
1911
1912#define ICE_CAPS_VALID_FUNCS_M 0xFF
1913 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1914 ICE_CAPS_VALID_FUNCS_M);
1915
1916 if (!funcs)
1917 return 0;
1918
1919 return max / funcs;
1920}
1921
1922/**
1923 * ice_parse_common_caps - parse common device/function capabilities
1924 * @hw: pointer to the HW struct
1925 * @caps: pointer to common capabilities structure
1926 * @elem: the capability element to parse
1927 * @prefix: message prefix for tracing capabilities
1928 *
1929 * Given a capability element, extract relevant details into the common
1930 * capability structure.
1931 *
1932 * Returns: true if the capability matches one of the common capability ids,
1933 * false otherwise.
1934 */
1935static bool
1936ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1937 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1938{
1939 u32 logical_id = le32_to_cpu(elem->logical_id);
1940 u32 phys_id = le32_to_cpu(elem->phys_id);
1941 u32 number = le32_to_cpu(elem->number);
1942 u16 cap = le16_to_cpu(elem->cap);
1943 bool found = true;
1944
1945 switch (cap) {
1946 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1947 caps->valid_functions = number;
1948 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1949 caps->valid_functions);
1950 break;
1951 case ICE_AQC_CAPS_SRIOV:
1952 caps->sr_iov_1_1 = (number == 1);
1953 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1954 caps->sr_iov_1_1);
1955 break;
1956 case ICE_AQC_CAPS_DCB:
1957 caps->dcb = (number == 1);
1958 caps->active_tc_bitmap = logical_id;
1959 caps->maxtc = phys_id;
1960 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1961 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1962 caps->active_tc_bitmap);
1963 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1964 break;
1965 case ICE_AQC_CAPS_RSS:
1966 caps->rss_table_size = number;
1967 caps->rss_table_entry_width = logical_id;
1968 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1969 caps->rss_table_size);
1970 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1971 caps->rss_table_entry_width);
1972 break;
1973 case ICE_AQC_CAPS_RXQS:
1974 caps->num_rxq = number;
1975 caps->rxq_first_id = phys_id;
1976 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1977 caps->num_rxq);
1978 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1979 caps->rxq_first_id);
1980 break;
1981 case ICE_AQC_CAPS_TXQS:
1982 caps->num_txq = number;
1983 caps->txq_first_id = phys_id;
1984 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1985 caps->num_txq);
1986 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1987 caps->txq_first_id);
1988 break;
1989 case ICE_AQC_CAPS_MSIX:
1990 caps->num_msix_vectors = number;
1991 caps->msix_vector_first_id = phys_id;
1992 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1993 caps->num_msix_vectors);
1994 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1995 caps->msix_vector_first_id);
1996 break;
1997 case ICE_AQC_CAPS_PENDING_NVM_VER:
1998 caps->nvm_update_pending_nvm = true;
1999 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2000 break;
2001 case ICE_AQC_CAPS_PENDING_OROM_VER:
2002 caps->nvm_update_pending_orom = true;
2003 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2004 break;
2005 case ICE_AQC_CAPS_PENDING_NET_VER:
2006 caps->nvm_update_pending_netlist = true;
2007 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2008 break;
2009 case ICE_AQC_CAPS_NVM_MGMT:
2010 caps->nvm_unified_update =
2011 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2012 true : false;
2013 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2014 caps->nvm_unified_update);
2015 break;
2016 case ICE_AQC_CAPS_RDMA:
2017 caps->rdma = (number == 1);
2018 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2019 break;
2020 case ICE_AQC_CAPS_MAX_MTU:
2021 caps->max_mtu = number;
2022 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2023 prefix, caps->max_mtu);
2024 break;
2025 default:
2026 /* Not one of the recognized common capabilities */
2027 found = false;
2028 }
2029
2030 return found;
2031}
2032
2033/**
2034 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2035 * @hw: pointer to the HW structure
2036 * @caps: pointer to capabilities structure to fix
2037 *
2038 * Re-calculate the capabilities that are dependent on the number of physical
2039 * ports; i.e. some features are not supported or function differently on
2040 * devices with more than 4 ports.
2041 */
2042static void
2043ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2044{
2045 /* This assumes device capabilities are always scanned before function
2046 * capabilities during the initialization flow.
2047 */
2048 if (hw->dev_caps.num_funcs > 4) {
2049 /* Max 4 TCs per port */
2050 caps->maxtc = 4;
2051 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2052 caps->maxtc);
2053 if (caps->rdma) {
2054 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2055 caps->rdma = 0;
2056 }
2057
2058 /* print message only when processing device capabilities
2059 * during initialization.
2060 */
2061 if (caps == &hw->dev_caps.common_cap)
2062 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2063 }
2064}
2065
2066/**
2067 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2068 * @hw: pointer to the HW struct
2069 * @func_p: pointer to function capabilities structure
2070 * @cap: pointer to the capability element to parse
2071 *
2072 * Extract function capabilities for ICE_AQC_CAPS_VF.
2073 */
2074static void
2075ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2076 struct ice_aqc_list_caps_elem *cap)
2077{
2078 u32 logical_id = le32_to_cpu(cap->logical_id);
2079 u32 number = le32_to_cpu(cap->number);
2080
2081 func_p->num_allocd_vfs = number;
2082 func_p->vf_base_id = logical_id;
2083 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2084 func_p->num_allocd_vfs);
2085 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2086 func_p->vf_base_id);
2087}
2088
2089/**
2090 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2091 * @hw: pointer to the HW struct
2092 * @func_p: pointer to function capabilities structure
2093 * @cap: pointer to the capability element to parse
2094 *
2095 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2096 */
2097static void
2098ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2099 struct ice_aqc_list_caps_elem *cap)
2100{
2101 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2102 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2103 le32_to_cpu(cap->number));
2104 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2105 func_p->guar_num_vsi);
2106}
2107
2108/**
2109 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2110 * @hw: pointer to the HW struct
2111 * @func_p: pointer to function capabilities structure
2112 * @cap: pointer to the capability element to parse
2113 *
2114 * Extract function capabilities for ICE_AQC_CAPS_1588.
2115 */
2116static void
2117ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2118 struct ice_aqc_list_caps_elem *cap)
2119{
2120 struct ice_ts_func_info *info = &func_p->ts_func_info;
2121 u32 number = le32_to_cpu(cap->number);
2122
2123 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2124 func_p->common_cap.ieee_1588 = info->ena;
2125
2126 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2127 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2128 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2129 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2130
2131 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2132 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2133
2134 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2135 func_p->common_cap.ieee_1588);
2136 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2137 info->src_tmr_owned);
2138 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2139 info->tmr_ena);
2140 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2141 info->tmr_index_owned);
2142 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2143 info->tmr_index_assoc);
2144 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2145 info->clk_freq);
2146 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2147 info->clk_src);
2148}
2149
2150/**
2151 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2152 * @hw: pointer to the HW struct
2153 * @func_p: pointer to function capabilities structure
2154 *
2155 * Extract function capabilities for ICE_AQC_CAPS_FD.
2156 */
2157static void
2158ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2159{
2160 u32 reg_val, val;
2161
2162 reg_val = rd32(hw, GLQF_FD_SIZE);
2163 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2164 GLQF_FD_SIZE_FD_GSIZE_S;
2165 func_p->fd_fltr_guar =
2166 ice_get_num_per_func(hw, val);
2167 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2168 GLQF_FD_SIZE_FD_BSIZE_S;
2169 func_p->fd_fltr_best_effort = val;
2170
2171 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2172 func_p->fd_fltr_guar);
2173 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2174 func_p->fd_fltr_best_effort);
2175}
2176
2177/**
2178 * ice_parse_func_caps - Parse function capabilities
2179 * @hw: pointer to the HW struct
2180 * @func_p: pointer to function capabilities structure
2181 * @buf: buffer containing the function capability records
2182 * @cap_count: the number of capabilities
2183 *
2184 * Helper function to parse function (0x000A) capabilities list. For
2185 * capabilities shared between device and function, this relies on
2186 * ice_parse_common_caps.
2187 *
2188 * Loop through the list of provided capabilities and extract the relevant
2189 * data into the function capabilities structured.
2190 */
2191static void
2192ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2193 void *buf, u32 cap_count)
2194{
2195 struct ice_aqc_list_caps_elem *cap_resp;
2196 u32 i;
2197
2198 cap_resp = buf;
2199
2200 memset(func_p, 0, sizeof(*func_p));
2201
2202 for (i = 0; i < cap_count; i++) {
2203 u16 cap = le16_to_cpu(cap_resp[i].cap);
2204 bool found;
2205
2206 found = ice_parse_common_caps(hw, &func_p->common_cap,
2207 &cap_resp[i], "func caps");
2208
2209 switch (cap) {
2210 case ICE_AQC_CAPS_VF:
2211 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2212 break;
2213 case ICE_AQC_CAPS_VSI:
2214 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2215 break;
2216 case ICE_AQC_CAPS_1588:
2217 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2218 break;
2219 case ICE_AQC_CAPS_FD:
2220 ice_parse_fdir_func_caps(hw, func_p);
2221 break;
2222 default:
2223 /* Don't list common capabilities as unknown */
2224 if (!found)
2225 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2226 i, cap);
2227 break;
2228 }
2229 }
2230
2231 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2232}
2233
2234/**
2235 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2236 * @hw: pointer to the HW struct
2237 * @dev_p: pointer to device capabilities structure
2238 * @cap: capability element to parse
2239 *
2240 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2241 */
2242static void
2243ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2244 struct ice_aqc_list_caps_elem *cap)
2245{
2246 u32 number = le32_to_cpu(cap->number);
2247
2248 dev_p->num_funcs = hweight32(number);
2249 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2250 dev_p->num_funcs);
2251}
2252
2253/**
2254 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2255 * @hw: pointer to the HW struct
2256 * @dev_p: pointer to device capabilities structure
2257 * @cap: capability element to parse
2258 *
2259 * Parse ICE_AQC_CAPS_VF for device capabilities.
2260 */
2261static void
2262ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2263 struct ice_aqc_list_caps_elem *cap)
2264{
2265 u32 number = le32_to_cpu(cap->number);
2266
2267 dev_p->num_vfs_exposed = number;
2268 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2269 dev_p->num_vfs_exposed);
2270}
2271
2272/**
2273 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2274 * @hw: pointer to the HW struct
2275 * @dev_p: pointer to device capabilities structure
2276 * @cap: capability element to parse
2277 *
2278 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2279 */
2280static void
2281ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2282 struct ice_aqc_list_caps_elem *cap)
2283{
2284 u32 number = le32_to_cpu(cap->number);
2285
2286 dev_p->num_vsi_allocd_to_host = number;
2287 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2288 dev_p->num_vsi_allocd_to_host);
2289}
2290
2291/**
2292 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2293 * @hw: pointer to the HW struct
2294 * @dev_p: pointer to device capabilities structure
2295 * @cap: capability element to parse
2296 *
2297 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2298 */
2299static void
2300ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2301 struct ice_aqc_list_caps_elem *cap)
2302{
2303 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2304 u32 logical_id = le32_to_cpu(cap->logical_id);
2305 u32 phys_id = le32_to_cpu(cap->phys_id);
2306 u32 number = le32_to_cpu(cap->number);
2307
2308 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2309 dev_p->common_cap.ieee_1588 = info->ena;
2310
2311 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2312 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2313 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2314
2315 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2316 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2317 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2318
2319 info->ena_ports = logical_id;
2320 info->tmr_own_map = phys_id;
2321
2322 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2323 dev_p->common_cap.ieee_1588);
2324 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2325 info->tmr0_owner);
2326 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2327 info->tmr0_owned);
2328 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2329 info->tmr0_ena);
2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2331 info->tmr1_owner);
2332 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2333 info->tmr1_owned);
2334 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2335 info->tmr1_ena);
2336 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2337 info->ena_ports);
2338 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2339 info->tmr_own_map);
2340}
2341
2342/**
2343 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2344 * @hw: pointer to the HW struct
2345 * @dev_p: pointer to device capabilities structure
2346 * @cap: capability element to parse
2347 *
2348 * Parse ICE_AQC_CAPS_FD for device capabilities.
2349 */
2350static void
2351ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2352 struct ice_aqc_list_caps_elem *cap)
2353{
2354 u32 number = le32_to_cpu(cap->number);
2355
2356 dev_p->num_flow_director_fltr = number;
2357 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2358 dev_p->num_flow_director_fltr);
2359}
2360
2361/**
2362 * ice_parse_dev_caps - Parse device capabilities
2363 * @hw: pointer to the HW struct
2364 * @dev_p: pointer to device capabilities structure
2365 * @buf: buffer containing the device capability records
2366 * @cap_count: the number of capabilities
2367 *
2368 * Helper device to parse device (0x000B) capabilities list. For
2369 * capabilities shared between device and function, this relies on
2370 * ice_parse_common_caps.
2371 *
2372 * Loop through the list of provided capabilities and extract the relevant
2373 * data into the device capabilities structured.
2374 */
2375static void
2376ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2377 void *buf, u32 cap_count)
2378{
2379 struct ice_aqc_list_caps_elem *cap_resp;
2380 u32 i;
2381
2382 cap_resp = buf;
2383
2384 memset(dev_p, 0, sizeof(*dev_p));
2385
2386 for (i = 0; i < cap_count; i++) {
2387 u16 cap = le16_to_cpu(cap_resp[i].cap);
2388 bool found;
2389
2390 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2391 &cap_resp[i], "dev caps");
2392
2393 switch (cap) {
2394 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2395 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2396 break;
2397 case ICE_AQC_CAPS_VF:
2398 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2399 break;
2400 case ICE_AQC_CAPS_VSI:
2401 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2402 break;
2403 case ICE_AQC_CAPS_1588:
2404 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2405 break;
2406 case ICE_AQC_CAPS_FD:
2407 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2408 break;
2409 default:
2410 /* Don't list common capabilities as unknown */
2411 if (!found)
2412 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2413 i, cap);
2414 break;
2415 }
2416 }
2417
2418 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2419}
2420
2421/**
2422 * ice_aq_list_caps - query function/device capabilities
2423 * @hw: pointer to the HW struct
2424 * @buf: a buffer to hold the capabilities
2425 * @buf_size: size of the buffer
2426 * @cap_count: if not NULL, set to the number of capabilities reported
2427 * @opc: capabilities type to discover, device or function
2428 * @cd: pointer to command details structure or NULL
2429 *
2430 * Get the function (0x000A) or device (0x000B) capabilities description from
2431 * firmware and store it in the buffer.
2432 *
2433 * If the cap_count pointer is not NULL, then it is set to the number of
2434 * capabilities firmware will report. Note that if the buffer size is too
2435 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2436 * cap_count will still be updated in this case. It is recommended that the
2437 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2438 * firmware could return) to avoid this.
2439 */
2440enum ice_status
2441ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2442 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2443{
2444 struct ice_aqc_list_caps *cmd;
2445 struct ice_aq_desc desc;
2446 enum ice_status status;
2447
2448 cmd = &desc.params.get_cap;
2449
2450 if (opc != ice_aqc_opc_list_func_caps &&
2451 opc != ice_aqc_opc_list_dev_caps)
2452 return ICE_ERR_PARAM;
2453
2454 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2455 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2456
2457 if (cap_count)
2458 *cap_count = le32_to_cpu(cmd->count);
2459
2460 return status;
2461}
2462
2463/**
2464 * ice_discover_dev_caps - Read and extract device capabilities
2465 * @hw: pointer to the hardware structure
2466 * @dev_caps: pointer to device capabilities structure
2467 *
2468 * Read the device capabilities and extract them into the dev_caps structure
2469 * for later use.
2470 */
2471enum ice_status
2472ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2473{
2474 enum ice_status status;
2475 u32 cap_count = 0;
2476 void *cbuf;
2477
2478 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2479 if (!cbuf)
2480 return ICE_ERR_NO_MEMORY;
2481
2482 /* Although the driver doesn't know the number of capabilities the
2483 * device will return, we can simply send a 4KB buffer, the maximum
2484 * possible size that firmware can return.
2485 */
2486 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2487
2488 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2489 ice_aqc_opc_list_dev_caps, NULL);
2490 if (!status)
2491 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2492 kfree(cbuf);
2493
2494 return status;
2495}
2496
2497/**
2498 * ice_discover_func_caps - Read and extract function capabilities
2499 * @hw: pointer to the hardware structure
2500 * @func_caps: pointer to function capabilities structure
2501 *
2502 * Read the function capabilities and extract them into the func_caps structure
2503 * for later use.
2504 */
2505static enum ice_status
2506ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2507{
2508 enum ice_status status;
2509 u32 cap_count = 0;
2510 void *cbuf;
2511
2512 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2513 if (!cbuf)
2514 return ICE_ERR_NO_MEMORY;
2515
2516 /* Although the driver doesn't know the number of capabilities the
2517 * device will return, we can simply send a 4KB buffer, the maximum
2518 * possible size that firmware can return.
2519 */
2520 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2521
2522 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2523 ice_aqc_opc_list_func_caps, NULL);
2524 if (!status)
2525 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2526 kfree(cbuf);
2527
2528 return status;
2529}
2530
2531/**
2532 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2533 * @hw: pointer to the hardware structure
2534 */
2535void ice_set_safe_mode_caps(struct ice_hw *hw)
2536{
2537 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2538 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2539 struct ice_hw_common_caps cached_caps;
2540 u32 num_funcs;
2541
2542 /* cache some func_caps values that should be restored after memset */
2543 cached_caps = func_caps->common_cap;
2544
2545 /* unset func capabilities */
2546 memset(func_caps, 0, sizeof(*func_caps));
2547
2548#define ICE_RESTORE_FUNC_CAP(name) \
2549 func_caps->common_cap.name = cached_caps.name
2550
2551 /* restore cached values */
2552 ICE_RESTORE_FUNC_CAP(valid_functions);
2553 ICE_RESTORE_FUNC_CAP(txq_first_id);
2554 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2555 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2556 ICE_RESTORE_FUNC_CAP(max_mtu);
2557 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2558 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2559 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2560 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2561
2562 /* one Tx and one Rx queue in safe mode */
2563 func_caps->common_cap.num_rxq = 1;
2564 func_caps->common_cap.num_txq = 1;
2565
2566 /* two MSIX vectors, one for traffic and one for misc causes */
2567 func_caps->common_cap.num_msix_vectors = 2;
2568 func_caps->guar_num_vsi = 1;
2569
2570 /* cache some dev_caps values that should be restored after memset */
2571 cached_caps = dev_caps->common_cap;
2572 num_funcs = dev_caps->num_funcs;
2573
2574 /* unset dev capabilities */
2575 memset(dev_caps, 0, sizeof(*dev_caps));
2576
2577#define ICE_RESTORE_DEV_CAP(name) \
2578 dev_caps->common_cap.name = cached_caps.name
2579
2580 /* restore cached values */
2581 ICE_RESTORE_DEV_CAP(valid_functions);
2582 ICE_RESTORE_DEV_CAP(txq_first_id);
2583 ICE_RESTORE_DEV_CAP(rxq_first_id);
2584 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2585 ICE_RESTORE_DEV_CAP(max_mtu);
2586 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2587 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2588 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2589 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2590 dev_caps->num_funcs = num_funcs;
2591
2592 /* one Tx and one Rx queue per function in safe mode */
2593 dev_caps->common_cap.num_rxq = num_funcs;
2594 dev_caps->common_cap.num_txq = num_funcs;
2595
2596 /* two MSIX vectors per function */
2597 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2598}
2599
2600/**
2601 * ice_get_caps - get info about the HW
2602 * @hw: pointer to the hardware structure
2603 */
2604enum ice_status ice_get_caps(struct ice_hw *hw)
2605{
2606 enum ice_status status;
2607
2608 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2609 if (status)
2610 return status;
2611
2612 return ice_discover_func_caps(hw, &hw->func_caps);
2613}
2614
2615/**
2616 * ice_aq_manage_mac_write - manage MAC address write command
2617 * @hw: pointer to the HW struct
2618 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2619 * @flags: flags to control write behavior
2620 * @cd: pointer to command details structure or NULL
2621 *
2622 * This function is used to write MAC address to the NVM (0x0108).
2623 */
2624enum ice_status
2625ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2626 struct ice_sq_cd *cd)
2627{
2628 struct ice_aqc_manage_mac_write *cmd;
2629 struct ice_aq_desc desc;
2630
2631 cmd = &desc.params.mac_write;
2632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2633
2634 cmd->flags = flags;
2635 ether_addr_copy(cmd->mac_addr, mac_addr);
2636
2637 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2638}
2639
2640/**
2641 * ice_aq_clear_pxe_mode
2642 * @hw: pointer to the HW struct
2643 *
2644 * Tell the firmware that the driver is taking over from PXE (0x0110).
2645 */
2646static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2647{
2648 struct ice_aq_desc desc;
2649
2650 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2651 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2652
2653 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2654}
2655
2656/**
2657 * ice_clear_pxe_mode - clear pxe operations mode
2658 * @hw: pointer to the HW struct
2659 *
2660 * Make sure all PXE mode settings are cleared, including things
2661 * like descriptor fetch/write-back mode.
2662 */
2663void ice_clear_pxe_mode(struct ice_hw *hw)
2664{
2665 if (ice_check_sq_alive(hw, &hw->adminq))
2666 ice_aq_clear_pxe_mode(hw);
2667}
2668
2669/**
2670 * ice_get_link_speed_based_on_phy_type - returns link speed
2671 * @phy_type_low: lower part of phy_type
2672 * @phy_type_high: higher part of phy_type
2673 *
2674 * This helper function will convert an entry in PHY type structure
2675 * [phy_type_low, phy_type_high] to its corresponding link speed.
2676 * Note: In the structure of [phy_type_low, phy_type_high], there should
2677 * be one bit set, as this function will convert one PHY type to its
2678 * speed.
2679 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2680 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2681 */
2682static u16
2683ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2684{
2685 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2686 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2687
2688 switch (phy_type_low) {
2689 case ICE_PHY_TYPE_LOW_100BASE_TX:
2690 case ICE_PHY_TYPE_LOW_100M_SGMII:
2691 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2692 break;
2693 case ICE_PHY_TYPE_LOW_1000BASE_T:
2694 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2695 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2696 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2697 case ICE_PHY_TYPE_LOW_1G_SGMII:
2698 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2699 break;
2700 case ICE_PHY_TYPE_LOW_2500BASE_T:
2701 case ICE_PHY_TYPE_LOW_2500BASE_X:
2702 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2703 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2704 break;
2705 case ICE_PHY_TYPE_LOW_5GBASE_T:
2706 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2707 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2708 break;
2709 case ICE_PHY_TYPE_LOW_10GBASE_T:
2710 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2711 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2712 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2713 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2714 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2715 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2716 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2717 break;
2718 case ICE_PHY_TYPE_LOW_25GBASE_T:
2719 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2720 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2721 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2722 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2723 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2724 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2725 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2726 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2727 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2728 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2729 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2730 break;
2731 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2732 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2733 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2734 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2735 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2736 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2737 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2738 break;
2739 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2740 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2741 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2742 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2743 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2744 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2745 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2746 case ICE_PHY_TYPE_LOW_50G_AUI2:
2747 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2748 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2749 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2750 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2751 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2752 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2753 case ICE_PHY_TYPE_LOW_50G_AUI1:
2754 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2755 break;
2756 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2757 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2758 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2759 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2760 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2761 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2762 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2763 case ICE_PHY_TYPE_LOW_100G_AUI4:
2764 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2765 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2766 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2767 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2768 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2769 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2770 break;
2771 default:
2772 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2773 break;
2774 }
2775
2776 switch (phy_type_high) {
2777 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2778 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2779 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2780 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2781 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2782 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2783 break;
2784 default:
2785 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2786 break;
2787 }
2788
2789 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2790 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2791 return ICE_AQ_LINK_SPEED_UNKNOWN;
2792 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2793 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2794 return ICE_AQ_LINK_SPEED_UNKNOWN;
2795 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2796 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2797 return speed_phy_type_low;
2798 else
2799 return speed_phy_type_high;
2800}
2801
2802/**
2803 * ice_update_phy_type
2804 * @phy_type_low: pointer to the lower part of phy_type
2805 * @phy_type_high: pointer to the higher part of phy_type
2806 * @link_speeds_bitmap: targeted link speeds bitmap
2807 *
2808 * Note: For the link_speeds_bitmap structure, you can check it at
2809 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2810 * link_speeds_bitmap include multiple speeds.
2811 *
2812 * Each entry in this [phy_type_low, phy_type_high] structure will
2813 * present a certain link speed. This helper function will turn on bits
2814 * in [phy_type_low, phy_type_high] structure based on the value of
2815 * link_speeds_bitmap input parameter.
2816 */
2817void
2818ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2819 u16 link_speeds_bitmap)
2820{
2821 u64 pt_high;
2822 u64 pt_low;
2823 int index;
2824 u16 speed;
2825
2826 /* We first check with low part of phy_type */
2827 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2828 pt_low = BIT_ULL(index);
2829 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2830
2831 if (link_speeds_bitmap & speed)
2832 *phy_type_low |= BIT_ULL(index);
2833 }
2834
2835 /* We then check with high part of phy_type */
2836 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2837 pt_high = BIT_ULL(index);
2838 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2839
2840 if (link_speeds_bitmap & speed)
2841 *phy_type_high |= BIT_ULL(index);
2842 }
2843}
2844
2845/**
2846 * ice_aq_set_phy_cfg
2847 * @hw: pointer to the HW struct
2848 * @pi: port info structure of the interested logical port
2849 * @cfg: structure with PHY configuration data to be set
2850 * @cd: pointer to command details structure or NULL
2851 *
2852 * Set the various PHY configuration parameters supported on the Port.
2853 * One or more of the Set PHY config parameters may be ignored in an MFP
2854 * mode as the PF may not have the privilege to set some of the PHY Config
2855 * parameters. This status will be indicated by the command response (0x0601).
2856 */
2857enum ice_status
2858ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2859 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2860{
2861 struct ice_aq_desc desc;
2862 enum ice_status status;
2863
2864 if (!cfg)
2865 return ICE_ERR_PARAM;
2866
2867 /* Ensure that only valid bits of cfg->caps can be turned on. */
2868 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2869 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2870 cfg->caps);
2871
2872 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2873 }
2874
2875 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2876 desc.params.set_phy.lport_num = pi->lport;
2877 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2878
2879 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2880 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2881 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2882 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2883 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2884 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2885 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2886 cfg->low_power_ctrl_an);
2887 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2888 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2889 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2890 cfg->link_fec_opt);
2891
2892 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2893 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2894 status = 0;
2895
2896 if (!status)
2897 pi->phy.curr_user_phy_cfg = *cfg;
2898
2899 return status;
2900}
2901
2902/**
2903 * ice_update_link_info - update status of the HW network link
2904 * @pi: port info structure of the interested logical port
2905 */
2906enum ice_status ice_update_link_info(struct ice_port_info *pi)
2907{
2908 struct ice_link_status *li;
2909 enum ice_status status;
2910
2911 if (!pi)
2912 return ICE_ERR_PARAM;
2913
2914 li = &pi->phy.link_info;
2915
2916 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2917 if (status)
2918 return status;
2919
2920 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2921 struct ice_aqc_get_phy_caps_data *pcaps;
2922 struct ice_hw *hw;
2923
2924 hw = pi->hw;
2925 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2926 GFP_KERNEL);
2927 if (!pcaps)
2928 return ICE_ERR_NO_MEMORY;
2929
2930 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2931 pcaps, NULL);
2932
2933 devm_kfree(ice_hw_to_dev(hw), pcaps);
2934 }
2935
2936 return status;
2937}
2938
2939/**
2940 * ice_cache_phy_user_req
2941 * @pi: port information structure
2942 * @cache_data: PHY logging data
2943 * @cache_mode: PHY logging mode
2944 *
2945 * Log the user request on (FC, FEC, SPEED) for later use.
2946 */
2947static void
2948ice_cache_phy_user_req(struct ice_port_info *pi,
2949 struct ice_phy_cache_mode_data cache_data,
2950 enum ice_phy_cache_mode cache_mode)
2951{
2952 if (!pi)
2953 return;
2954
2955 switch (cache_mode) {
2956 case ICE_FC_MODE:
2957 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2958 break;
2959 case ICE_SPEED_MODE:
2960 pi->phy.curr_user_speed_req =
2961 cache_data.data.curr_user_speed_req;
2962 break;
2963 case ICE_FEC_MODE:
2964 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2965 break;
2966 default:
2967 break;
2968 }
2969}
2970
2971/**
2972 * ice_caps_to_fc_mode
2973 * @caps: PHY capabilities
2974 *
2975 * Convert PHY FC capabilities to ice FC mode
2976 */
2977enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2978{
2979 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2980 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2981 return ICE_FC_FULL;
2982
2983 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2984 return ICE_FC_TX_PAUSE;
2985
2986 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2987 return ICE_FC_RX_PAUSE;
2988
2989 return ICE_FC_NONE;
2990}
2991
2992/**
2993 * ice_caps_to_fec_mode
2994 * @caps: PHY capabilities
2995 * @fec_options: Link FEC options
2996 *
2997 * Convert PHY FEC capabilities to ice FEC mode
2998 */
2999enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3000{
3001 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3002 return ICE_FEC_AUTO;
3003
3004 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3005 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3006 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3007 ICE_AQC_PHY_FEC_25G_KR_REQ))
3008 return ICE_FEC_BASER;
3009
3010 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3011 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3012 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3013 return ICE_FEC_RS;
3014
3015 return ICE_FEC_NONE;
3016}
3017
3018/**
3019 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3020 * @pi: port information structure
3021 * @cfg: PHY configuration data to set FC mode
3022 * @req_mode: FC mode to configure
3023 */
3024enum ice_status
3025ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3026 enum ice_fc_mode req_mode)
3027{
3028 struct ice_phy_cache_mode_data cache_data;
3029 u8 pause_mask = 0x0;
3030
3031 if (!pi || !cfg)
3032 return ICE_ERR_BAD_PTR;
3033
3034 switch (req_mode) {
3035 case ICE_FC_FULL:
3036 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3037 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3038 break;
3039 case ICE_FC_RX_PAUSE:
3040 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3041 break;
3042 case ICE_FC_TX_PAUSE:
3043 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3044 break;
3045 default:
3046 break;
3047 }
3048
3049 /* clear the old pause settings */
3050 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3051 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3052
3053 /* set the new capabilities */
3054 cfg->caps |= pause_mask;
3055
3056 /* Cache user FC request */
3057 cache_data.data.curr_user_fc_req = req_mode;
3058 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3059
3060 return 0;
3061}
3062
3063/**
3064 * ice_set_fc
3065 * @pi: port information structure
3066 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3067 * @ena_auto_link_update: enable automatic link update
3068 *
3069 * Set the requested flow control mode.
3070 */
3071enum ice_status
3072ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3073{
3074 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3075 struct ice_aqc_get_phy_caps_data *pcaps;
3076 enum ice_status status;
3077 struct ice_hw *hw;
3078
3079 if (!pi || !aq_failures)
3080 return ICE_ERR_BAD_PTR;
3081
3082 *aq_failures = 0;
3083 hw = pi->hw;
3084
3085 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3086 if (!pcaps)
3087 return ICE_ERR_NO_MEMORY;
3088
3089 /* Get the current PHY config */
3090 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3091 pcaps, NULL);
3092 if (status) {
3093 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3094 goto out;
3095 }
3096
3097 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3098
3099 /* Configure the set PHY data */
3100 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3101 if (status)
3102 goto out;
3103
3104 /* If the capabilities have changed, then set the new config */
3105 if (cfg.caps != pcaps->caps) {
3106 int retry_count, retry_max = 10;
3107
3108 /* Auto restart link so settings take effect */
3109 if (ena_auto_link_update)
3110 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3111
3112 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3113 if (status) {
3114 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3115 goto out;
3116 }
3117
3118 /* Update the link info
3119 * It sometimes takes a really long time for link to
3120 * come back from the atomic reset. Thus, we wait a
3121 * little bit.
3122 */
3123 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3124 status = ice_update_link_info(pi);
3125
3126 if (!status)
3127 break;
3128
3129 mdelay(100);
3130 }
3131
3132 if (status)
3133 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3134 }
3135
3136out:
3137 devm_kfree(ice_hw_to_dev(hw), pcaps);
3138 return status;
3139}
3140
3141/**
3142 * ice_phy_caps_equals_cfg
3143 * @phy_caps: PHY capabilities
3144 * @phy_cfg: PHY configuration
3145 *
3146 * Helper function to determine if PHY capabilities matches PHY
3147 * configuration
3148 */
3149bool
3150ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3151 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3152{
3153 u8 caps_mask, cfg_mask;
3154
3155 if (!phy_caps || !phy_cfg)
3156 return false;
3157
3158 /* These bits are not common between capabilities and configuration.
3159 * Do not use them to determine equality.
3160 */
3161 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3162 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3163 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3164
3165 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3166 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3167 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3168 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3169 phy_caps->eee_cap != phy_cfg->eee_cap ||
3170 phy_caps->eeer_value != phy_cfg->eeer_value ||
3171 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3172 return false;
3173
3174 return true;
3175}
3176
3177/**
3178 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3179 * @pi: port information structure
3180 * @caps: PHY ability structure to copy date from
3181 * @cfg: PHY configuration structure to copy data to
3182 *
3183 * Helper function to copy AQC PHY get ability data to PHY set configuration
3184 * data structure
3185 */
3186void
3187ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3188 struct ice_aqc_get_phy_caps_data *caps,
3189 struct ice_aqc_set_phy_cfg_data *cfg)
3190{
3191 if (!pi || !caps || !cfg)
3192 return;
3193
3194 memset(cfg, 0, sizeof(*cfg));
3195 cfg->phy_type_low = caps->phy_type_low;
3196 cfg->phy_type_high = caps->phy_type_high;
3197 cfg->caps = caps->caps;
3198 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3199 cfg->eee_cap = caps->eee_cap;
3200 cfg->eeer_value = caps->eeer_value;
3201 cfg->link_fec_opt = caps->link_fec_options;
3202 cfg->module_compliance_enforcement =
3203 caps->module_compliance_enforcement;
3204}
3205
3206/**
3207 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3208 * @pi: port information structure
3209 * @cfg: PHY configuration data to set FEC mode
3210 * @fec: FEC mode to configure
3211 */
3212enum ice_status
3213ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3214 enum ice_fec_mode fec)
3215{
3216 struct ice_aqc_get_phy_caps_data *pcaps;
3217 enum ice_status status;
3218 struct ice_hw *hw;
3219
3220 if (!pi || !cfg)
3221 return ICE_ERR_BAD_PTR;
3222
3223 hw = pi->hw;
3224
3225 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3226 if (!pcaps)
3227 return ICE_ERR_NO_MEMORY;
3228
3229 status = ice_aq_get_phy_caps(pi, false,
3230 (ice_fw_supports_report_dflt_cfg(hw) ?
3231 ICE_AQC_REPORT_DFLT_CFG :
3232 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3233 if (status)
3234 goto out;
3235
3236 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3237 cfg->link_fec_opt = pcaps->link_fec_options;
3238
3239 switch (fec) {
3240 case ICE_FEC_BASER:
3241 /* Clear RS bits, and AND BASE-R ability
3242 * bits and OR request bits.
3243 */
3244 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3245 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3246 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3247 ICE_AQC_PHY_FEC_25G_KR_REQ;
3248 break;
3249 case ICE_FEC_RS:
3250 /* Clear BASE-R bits, and AND RS ability
3251 * bits and OR request bits.
3252 */
3253 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3254 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3255 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3256 break;
3257 case ICE_FEC_NONE:
3258 /* Clear all FEC option bits. */
3259 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3260 break;
3261 case ICE_FEC_AUTO:
3262 /* AND auto FEC bit, and all caps bits. */
3263 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3264 cfg->link_fec_opt |= pcaps->link_fec_options;
3265 break;
3266 default:
3267 status = ICE_ERR_PARAM;
3268 break;
3269 }
3270
3271 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3272 !ice_fw_supports_report_dflt_cfg(hw)) {
3273 struct ice_link_default_override_tlv tlv;
3274
3275 if (ice_get_link_default_override(&tlv, pi))
3276 goto out;
3277
3278 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3279 (tlv.options & ICE_LINK_OVERRIDE_EN))
3280 cfg->link_fec_opt = tlv.fec_options;
3281 }
3282
3283out:
3284 kfree(pcaps);
3285
3286 return status;
3287}
3288
3289/**
3290 * ice_get_link_status - get status of the HW network link
3291 * @pi: port information structure
3292 * @link_up: pointer to bool (true/false = linkup/linkdown)
3293 *
3294 * Variable link_up is true if link is up, false if link is down.
3295 * The variable link_up is invalid if status is non zero. As a
3296 * result of this call, link status reporting becomes enabled
3297 */
3298enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3299{
3300 struct ice_phy_info *phy_info;
3301 enum ice_status status = 0;
3302
3303 if (!pi || !link_up)
3304 return ICE_ERR_PARAM;
3305
3306 phy_info = &pi->phy;
3307
3308 if (phy_info->get_link_info) {
3309 status = ice_update_link_info(pi);
3310
3311 if (status)
3312 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3313 status);
3314 }
3315
3316 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3317
3318 return status;
3319}
3320
3321/**
3322 * ice_aq_set_link_restart_an
3323 * @pi: pointer to the port information structure
3324 * @ena_link: if true: enable link, if false: disable link
3325 * @cd: pointer to command details structure or NULL
3326 *
3327 * Sets up the link and restarts the Auto-Negotiation over the link.
3328 */
3329enum ice_status
3330ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3331 struct ice_sq_cd *cd)
3332{
3333 struct ice_aqc_restart_an *cmd;
3334 struct ice_aq_desc desc;
3335
3336 cmd = &desc.params.restart_an;
3337
3338 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3339
3340 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3341 cmd->lport_num = pi->lport;
3342 if (ena_link)
3343 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3344 else
3345 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3346
3347 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3348}
3349
3350/**
3351 * ice_aq_set_event_mask
3352 * @hw: pointer to the HW struct
3353 * @port_num: port number of the physical function
3354 * @mask: event mask to be set
3355 * @cd: pointer to command details structure or NULL
3356 *
3357 * Set event mask (0x0613)
3358 */
3359enum ice_status
3360ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3361 struct ice_sq_cd *cd)
3362{
3363 struct ice_aqc_set_event_mask *cmd;
3364 struct ice_aq_desc desc;
3365
3366 cmd = &desc.params.set_event_mask;
3367
3368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3369
3370 cmd->lport_num = port_num;
3371
3372 cmd->event_mask = cpu_to_le16(mask);
3373 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3374}
3375
3376/**
3377 * ice_aq_set_mac_loopback
3378 * @hw: pointer to the HW struct
3379 * @ena_lpbk: Enable or Disable loopback
3380 * @cd: pointer to command details structure or NULL
3381 *
3382 * Enable/disable loopback on a given port
3383 */
3384enum ice_status
3385ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3386{
3387 struct ice_aqc_set_mac_lb *cmd;
3388 struct ice_aq_desc desc;
3389
3390 cmd = &desc.params.set_mac_lb;
3391
3392 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3393 if (ena_lpbk)
3394 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3395
3396 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3397}
3398
3399/**
3400 * ice_aq_set_port_id_led
3401 * @pi: pointer to the port information
3402 * @is_orig_mode: is this LED set to original mode (by the net-list)
3403 * @cd: pointer to command details structure or NULL
3404 *
3405 * Set LED value for the given port (0x06e9)
3406 */
3407enum ice_status
3408ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3409 struct ice_sq_cd *cd)
3410{
3411 struct ice_aqc_set_port_id_led *cmd;
3412 struct ice_hw *hw = pi->hw;
3413 struct ice_aq_desc desc;
3414
3415 cmd = &desc.params.set_port_id_led;
3416
3417 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3418
3419 if (is_orig_mode)
3420 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3421 else
3422 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3423
3424 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3425}
3426
3427/**
3428 * ice_aq_sff_eeprom
3429 * @hw: pointer to the HW struct
3430 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3431 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3432 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3433 * @page: QSFP page
3434 * @set_page: set or ignore the page
3435 * @data: pointer to data buffer to be read/written to the I2C device.
3436 * @length: 1-16 for read, 1 for write.
3437 * @write: 0 read, 1 for write.
3438 * @cd: pointer to command details structure or NULL
3439 *
3440 * Read/Write SFF EEPROM (0x06EE)
3441 */
3442enum ice_status
3443ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3444 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3445 bool write, struct ice_sq_cd *cd)
3446{
3447 struct ice_aqc_sff_eeprom *cmd;
3448 struct ice_aq_desc desc;
3449 enum ice_status status;
3450
3451 if (!data || (mem_addr & 0xff00))
3452 return ICE_ERR_PARAM;
3453
3454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3455 cmd = &desc.params.read_write_sff_param;
3456 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3457 cmd->lport_num = (u8)(lport & 0xff);
3458 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3459 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3460 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3461 ((set_page <<
3462 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3463 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3464 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3465 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3466 if (write)
3467 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3468
3469 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3470 return status;
3471}
3472
3473/**
3474 * __ice_aq_get_set_rss_lut
3475 * @hw: pointer to the hardware structure
3476 * @params: RSS LUT parameters
3477 * @set: set true to set the table, false to get the table
3478 *
3479 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3480 */
3481static enum ice_status
3482__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3483{
3484 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3485 struct ice_aqc_get_set_rss_lut *cmd_resp;
3486 struct ice_aq_desc desc;
3487 enum ice_status status;
3488 u8 *lut;
3489
3490 if (!params)
3491 return ICE_ERR_PARAM;
3492
3493 vsi_handle = params->vsi_handle;
3494 lut = params->lut;
3495
3496 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3497 return ICE_ERR_PARAM;
3498
3499 lut_size = params->lut_size;
3500 lut_type = params->lut_type;
3501 glob_lut_idx = params->global_lut_id;
3502 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3503
3504 cmd_resp = &desc.params.get_set_rss_lut;
3505
3506 if (set) {
3507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3508 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3509 } else {
3510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3511 }
3512
3513 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3514 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3515 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3516 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3517
3518 switch (lut_type) {
3519 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3520 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3521 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3522 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3523 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3524 break;
3525 default:
3526 status = ICE_ERR_PARAM;
3527 goto ice_aq_get_set_rss_lut_exit;
3528 }
3529
3530 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3531 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3532 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3533
3534 if (!set)
3535 goto ice_aq_get_set_rss_lut_send;
3536 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3537 if (!set)
3538 goto ice_aq_get_set_rss_lut_send;
3539 } else {
3540 goto ice_aq_get_set_rss_lut_send;
3541 }
3542
3543 /* LUT size is only valid for Global and PF table types */
3544 switch (lut_size) {
3545 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3546 break;
3547 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3548 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3549 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3550 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3551 break;
3552 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3553 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3554 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3555 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3556 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3557 break;
3558 }
3559 fallthrough;
3560 default:
3561 status = ICE_ERR_PARAM;
3562 goto ice_aq_get_set_rss_lut_exit;
3563 }
3564
3565ice_aq_get_set_rss_lut_send:
3566 cmd_resp->flags = cpu_to_le16(flags);
3567 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3568
3569ice_aq_get_set_rss_lut_exit:
3570 return status;
3571}
3572
3573/**
3574 * ice_aq_get_rss_lut
3575 * @hw: pointer to the hardware structure
3576 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3577 *
3578 * get the RSS lookup table, PF or VSI type
3579 */
3580enum ice_status
3581ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3582{
3583 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3584}
3585
3586/**
3587 * ice_aq_set_rss_lut
3588 * @hw: pointer to the hardware structure
3589 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3590 *
3591 * set the RSS lookup table, PF or VSI type
3592 */
3593enum ice_status
3594ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3595{
3596 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3597}
3598
3599/**
3600 * __ice_aq_get_set_rss_key
3601 * @hw: pointer to the HW struct
3602 * @vsi_id: VSI FW index
3603 * @key: pointer to key info struct
3604 * @set: set true to set the key, false to get the key
3605 *
3606 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3607 */
3608static enum
3609ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3610 struct ice_aqc_get_set_rss_keys *key,
3611 bool set)
3612{
3613 struct ice_aqc_get_set_rss_key *cmd_resp;
3614 u16 key_size = sizeof(*key);
3615 struct ice_aq_desc desc;
3616
3617 cmd_resp = &desc.params.get_set_rss_key;
3618
3619 if (set) {
3620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3621 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3622 } else {
3623 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3624 }
3625
3626 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3627 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3628 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3629 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3630
3631 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3632}
3633
3634/**
3635 * ice_aq_get_rss_key
3636 * @hw: pointer to the HW struct
3637 * @vsi_handle: software VSI handle
3638 * @key: pointer to key info struct
3639 *
3640 * get the RSS key per VSI
3641 */
3642enum ice_status
3643ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3644 struct ice_aqc_get_set_rss_keys *key)
3645{
3646 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3647 return ICE_ERR_PARAM;
3648
3649 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3650 key, false);
3651}
3652
3653/**
3654 * ice_aq_set_rss_key
3655 * @hw: pointer to the HW struct
3656 * @vsi_handle: software VSI handle
3657 * @keys: pointer to key info struct
3658 *
3659 * set the RSS key per VSI
3660 */
3661enum ice_status
3662ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3663 struct ice_aqc_get_set_rss_keys *keys)
3664{
3665 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3666 return ICE_ERR_PARAM;
3667
3668 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3669 keys, true);
3670}
3671
3672/**
3673 * ice_aq_add_lan_txq
3674 * @hw: pointer to the hardware structure
3675 * @num_qgrps: Number of added queue groups
3676 * @qg_list: list of queue groups to be added
3677 * @buf_size: size of buffer for indirect command
3678 * @cd: pointer to command details structure or NULL
3679 *
3680 * Add Tx LAN queue (0x0C30)
3681 *
3682 * NOTE:
3683 * Prior to calling add Tx LAN queue:
3684 * Initialize the following as part of the Tx queue context:
3685 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3686 * Cache profile and Packet shaper profile.
3687 *
3688 * After add Tx LAN queue AQ command is completed:
3689 * Interrupts should be associated with specific queues,
3690 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3691 * flow.
3692 */
3693static enum ice_status
3694ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3695 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3696 struct ice_sq_cd *cd)
3697{
3698 struct ice_aqc_add_tx_qgrp *list;
3699 struct ice_aqc_add_txqs *cmd;
3700 struct ice_aq_desc desc;
3701 u16 i, sum_size = 0;
3702
3703 cmd = &desc.params.add_txqs;
3704
3705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3706
3707 if (!qg_list)
3708 return ICE_ERR_PARAM;
3709
3710 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3711 return ICE_ERR_PARAM;
3712
3713 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3714 sum_size += struct_size(list, txqs, list->num_txqs);
3715 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3716 list->num_txqs);
3717 }
3718
3719 if (buf_size != sum_size)
3720 return ICE_ERR_PARAM;
3721
3722 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3723
3724 cmd->num_qgrps = num_qgrps;
3725
3726 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3727}
3728
3729/**
3730 * ice_aq_dis_lan_txq
3731 * @hw: pointer to the hardware structure
3732 * @num_qgrps: number of groups in the list
3733 * @qg_list: the list of groups to disable
3734 * @buf_size: the total size of the qg_list buffer in bytes
3735 * @rst_src: if called due to reset, specifies the reset source
3736 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3737 * @cd: pointer to command details structure or NULL
3738 *
3739 * Disable LAN Tx queue (0x0C31)
3740 */
3741static enum ice_status
3742ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3743 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3744 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3745 struct ice_sq_cd *cd)
3746{
3747 struct ice_aqc_dis_txq_item *item;
3748 struct ice_aqc_dis_txqs *cmd;
3749 struct ice_aq_desc desc;
3750 enum ice_status status;
3751 u16 i, sz = 0;
3752
3753 cmd = &desc.params.dis_txqs;
3754 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3755
3756 /* qg_list can be NULL only in VM/VF reset flow */
3757 if (!qg_list && !rst_src)
3758 return ICE_ERR_PARAM;
3759
3760 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3761 return ICE_ERR_PARAM;
3762
3763 cmd->num_entries = num_qgrps;
3764
3765 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3766 ICE_AQC_Q_DIS_TIMEOUT_M);
3767
3768 switch (rst_src) {
3769 case ICE_VM_RESET:
3770 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3771 cmd->vmvf_and_timeout |=
3772 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3773 break;
3774 case ICE_VF_RESET:
3775 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3776 /* In this case, FW expects vmvf_num to be absolute VF ID */
3777 cmd->vmvf_and_timeout |=
3778 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3779 ICE_AQC_Q_DIS_VMVF_NUM_M);
3780 break;
3781 case ICE_NO_RESET:
3782 default:
3783 break;
3784 }
3785
3786 /* flush pipe on time out */
3787 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3788 /* If no queue group info, we are in a reset flow. Issue the AQ */
3789 if (!qg_list)
3790 goto do_aq;
3791
3792 /* set RD bit to indicate that command buffer is provided by the driver
3793 * and it needs to be read by the firmware
3794 */
3795 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3796
3797 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3798 u16 item_size = struct_size(item, q_id, item->num_qs);
3799
3800 /* If the num of queues is even, add 2 bytes of padding */
3801 if ((item->num_qs % 2) == 0)
3802 item_size += 2;
3803
3804 sz += item_size;
3805
3806 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3807 }
3808
3809 if (buf_size != sz)
3810 return ICE_ERR_PARAM;
3811
3812do_aq:
3813 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3814 if (status) {
3815 if (!qg_list)
3816 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3817 vmvf_num, hw->adminq.sq_last_status);
3818 else
3819 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3820 le16_to_cpu(qg_list[0].q_id[0]),
3821 hw->adminq.sq_last_status);
3822 }
3823 return status;
3824}
3825
3826/**
3827 * ice_aq_add_rdma_qsets
3828 * @hw: pointer to the hardware structure
3829 * @num_qset_grps: Number of RDMA Qset groups
3830 * @qset_list: list of Qset groups to be added
3831 * @buf_size: size of buffer for indirect command
3832 * @cd: pointer to command details structure or NULL
3833 *
3834 * Add Tx RDMA Qsets (0x0C33)
3835 */
3836static int
3837ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3838 struct ice_aqc_add_rdma_qset_data *qset_list,
3839 u16 buf_size, struct ice_sq_cd *cd)
3840{
3841 struct ice_aqc_add_rdma_qset_data *list;
3842 struct ice_aqc_add_rdma_qset *cmd;
3843 struct ice_aq_desc desc;
3844 u16 i, sum_size = 0;
3845
3846 cmd = &desc.params.add_rdma_qset;
3847
3848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3849
3850 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3851 return -EINVAL;
3852
3853 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3854 u16 num_qsets = le16_to_cpu(list->num_qsets);
3855
3856 sum_size += struct_size(list, rdma_qsets, num_qsets);
3857 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3858 num_qsets);
3859 }
3860
3861 if (buf_size != sum_size)
3862 return -EINVAL;
3863
3864 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3865
3866 cmd->num_qset_grps = num_qset_grps;
3867
3868 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
3869 buf_size, cd));
3870}
3871
3872/* End of FW Admin Queue command wrappers */
3873
3874/**
3875 * ice_write_byte - write a byte to a packed context structure
3876 * @src_ctx: the context structure to read from
3877 * @dest_ctx: the context to be written to
3878 * @ce_info: a description of the struct to be filled
3879 */
3880static void
3881ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3882{
3883 u8 src_byte, dest_byte, mask;
3884 u8 *from, *dest;
3885 u16 shift_width;
3886
3887 /* copy from the next struct field */
3888 from = src_ctx + ce_info->offset;
3889
3890 /* prepare the bits and mask */
3891 shift_width = ce_info->lsb % 8;
3892 mask = (u8)(BIT(ce_info->width) - 1);
3893
3894 src_byte = *from;
3895 src_byte &= mask;
3896
3897 /* shift to correct alignment */
3898 mask <<= shift_width;
3899 src_byte <<= shift_width;
3900
3901 /* get the current bits from the target bit string */
3902 dest = dest_ctx + (ce_info->lsb / 8);
3903
3904 memcpy(&dest_byte, dest, sizeof(dest_byte));
3905
3906 dest_byte &= ~mask; /* get the bits not changing */
3907 dest_byte |= src_byte; /* add in the new bits */
3908
3909 /* put it all back */
3910 memcpy(dest, &dest_byte, sizeof(dest_byte));
3911}
3912
3913/**
3914 * ice_write_word - write a word to a packed context structure
3915 * @src_ctx: the context structure to read from
3916 * @dest_ctx: the context to be written to
3917 * @ce_info: a description of the struct to be filled
3918 */
3919static void
3920ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3921{
3922 u16 src_word, mask;
3923 __le16 dest_word;
3924 u8 *from, *dest;
3925 u16 shift_width;
3926
3927 /* copy from the next struct field */
3928 from = src_ctx + ce_info->offset;
3929
3930 /* prepare the bits and mask */
3931 shift_width = ce_info->lsb % 8;
3932 mask = BIT(ce_info->width) - 1;
3933
3934 /* don't swizzle the bits until after the mask because the mask bits
3935 * will be in a different bit position on big endian machines
3936 */
3937 src_word = *(u16 *)from;
3938 src_word &= mask;
3939
3940 /* shift to correct alignment */
3941 mask <<= shift_width;
3942 src_word <<= shift_width;
3943
3944 /* get the current bits from the target bit string */
3945 dest = dest_ctx + (ce_info->lsb / 8);
3946
3947 memcpy(&dest_word, dest, sizeof(dest_word));
3948
3949 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3950 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3951
3952 /* put it all back */
3953 memcpy(dest, &dest_word, sizeof(dest_word));
3954}
3955
3956/**
3957 * ice_write_dword - write a dword to a packed context structure
3958 * @src_ctx: the context structure to read from
3959 * @dest_ctx: the context to be written to
3960 * @ce_info: a description of the struct to be filled
3961 */
3962static void
3963ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3964{
3965 u32 src_dword, mask;
3966 __le32 dest_dword;
3967 u8 *from, *dest;
3968 u16 shift_width;
3969
3970 /* copy from the next struct field */
3971 from = src_ctx + ce_info->offset;
3972
3973 /* prepare the bits and mask */
3974 shift_width = ce_info->lsb % 8;
3975
3976 /* if the field width is exactly 32 on an x86 machine, then the shift
3977 * operation will not work because the SHL instructions count is masked
3978 * to 5 bits so the shift will do nothing
3979 */
3980 if (ce_info->width < 32)
3981 mask = BIT(ce_info->width) - 1;
3982 else
3983 mask = (u32)~0;
3984
3985 /* don't swizzle the bits until after the mask because the mask bits
3986 * will be in a different bit position on big endian machines
3987 */
3988 src_dword = *(u32 *)from;
3989 src_dword &= mask;
3990
3991 /* shift to correct alignment */
3992 mask <<= shift_width;
3993 src_dword <<= shift_width;
3994
3995 /* get the current bits from the target bit string */
3996 dest = dest_ctx + (ce_info->lsb / 8);
3997
3998 memcpy(&dest_dword, dest, sizeof(dest_dword));
3999
4000 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
4001 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
4002
4003 /* put it all back */
4004 memcpy(dest, &dest_dword, sizeof(dest_dword));
4005}
4006
4007/**
4008 * ice_write_qword - write a qword to a packed context structure
4009 * @src_ctx: the context structure to read from
4010 * @dest_ctx: the context to be written to
4011 * @ce_info: a description of the struct to be filled
4012 */
4013static void
4014ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4015{
4016 u64 src_qword, mask;
4017 __le64 dest_qword;
4018 u8 *from, *dest;
4019 u16 shift_width;
4020
4021 /* copy from the next struct field */
4022 from = src_ctx + ce_info->offset;
4023
4024 /* prepare the bits and mask */
4025 shift_width = ce_info->lsb % 8;
4026
4027 /* if the field width is exactly 64 on an x86 machine, then the shift
4028 * operation will not work because the SHL instructions count is masked
4029 * to 6 bits so the shift will do nothing
4030 */
4031 if (ce_info->width < 64)
4032 mask = BIT_ULL(ce_info->width) - 1;
4033 else
4034 mask = (u64)~0;
4035
4036 /* don't swizzle the bits until after the mask because the mask bits
4037 * will be in a different bit position on big endian machines
4038 */
4039 src_qword = *(u64 *)from;
4040 src_qword &= mask;
4041
4042 /* shift to correct alignment */
4043 mask <<= shift_width;
4044 src_qword <<= shift_width;
4045
4046 /* get the current bits from the target bit string */
4047 dest = dest_ctx + (ce_info->lsb / 8);
4048
4049 memcpy(&dest_qword, dest, sizeof(dest_qword));
4050
4051 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
4052 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
4053
4054 /* put it all back */
4055 memcpy(dest, &dest_qword, sizeof(dest_qword));
4056}
4057
4058/**
4059 * ice_set_ctx - set context bits in packed structure
4060 * @hw: pointer to the hardware structure
4061 * @src_ctx: pointer to a generic non-packed context structure
4062 * @dest_ctx: pointer to memory for the packed structure
4063 * @ce_info: a description of the structure to be transformed
4064 */
4065enum ice_status
4066ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4067 const struct ice_ctx_ele *ce_info)
4068{
4069 int f;
4070
4071 for (f = 0; ce_info[f].width; f++) {
4072 /* We have to deal with each element of the FW response
4073 * using the correct size so that we are correct regardless
4074 * of the endianness of the machine.
4075 */
4076 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4077 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4078 f, ce_info[f].width, ce_info[f].size_of);
4079 continue;
4080 }
4081 switch (ce_info[f].size_of) {
4082 case sizeof(u8):
4083 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4084 break;
4085 case sizeof(u16):
4086 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4087 break;
4088 case sizeof(u32):
4089 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4090 break;
4091 case sizeof(u64):
4092 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4093 break;
4094 default:
4095 return ICE_ERR_INVAL_SIZE;
4096 }
4097 }
4098
4099 return 0;
4100}
4101
4102/**
4103 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4104 * @hw: pointer to the HW struct
4105 * @vsi_handle: software VSI handle
4106 * @tc: TC number
4107 * @q_handle: software queue handle
4108 */
4109struct ice_q_ctx *
4110ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4111{
4112 struct ice_vsi_ctx *vsi;
4113 struct ice_q_ctx *q_ctx;
4114
4115 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4116 if (!vsi)
4117 return NULL;
4118 if (q_handle >= vsi->num_lan_q_entries[tc])
4119 return NULL;
4120 if (!vsi->lan_q_ctx[tc])
4121 return NULL;
4122 q_ctx = vsi->lan_q_ctx[tc];
4123 return &q_ctx[q_handle];
4124}
4125
4126/**
4127 * ice_ena_vsi_txq
4128 * @pi: port information structure
4129 * @vsi_handle: software VSI handle
4130 * @tc: TC number
4131 * @q_handle: software queue handle
4132 * @num_qgrps: Number of added queue groups
4133 * @buf: list of queue groups to be added
4134 * @buf_size: size of buffer for indirect command
4135 * @cd: pointer to command details structure or NULL
4136 *
4137 * This function adds one LAN queue
4138 */
4139enum ice_status
4140ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4141 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4142 struct ice_sq_cd *cd)
4143{
4144 struct ice_aqc_txsched_elem_data node = { 0 };
4145 struct ice_sched_node *parent;
4146 struct ice_q_ctx *q_ctx;
4147 enum ice_status status;
4148 struct ice_hw *hw;
4149
4150 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4151 return ICE_ERR_CFG;
4152
4153 if (num_qgrps > 1 || buf->num_txqs > 1)
4154 return ICE_ERR_MAX_LIMIT;
4155
4156 hw = pi->hw;
4157
4158 if (!ice_is_vsi_valid(hw, vsi_handle))
4159 return ICE_ERR_PARAM;
4160
4161 mutex_lock(&pi->sched_lock);
4162
4163 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4164 if (!q_ctx) {
4165 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4166 q_handle);
4167 status = ICE_ERR_PARAM;
4168 goto ena_txq_exit;
4169 }
4170
4171 /* find a parent node */
4172 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4173 ICE_SCHED_NODE_OWNER_LAN);
4174 if (!parent) {
4175 status = ICE_ERR_PARAM;
4176 goto ena_txq_exit;
4177 }
4178
4179 buf->parent_teid = parent->info.node_teid;
4180 node.parent_teid = parent->info.node_teid;
4181 /* Mark that the values in the "generic" section as valid. The default
4182 * value in the "generic" section is zero. This means that :
4183 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4184 * - 0 priority among siblings, indicated by Bit 1-3.
4185 * - WFQ, indicated by Bit 4.
4186 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4187 * Bit 5-6.
4188 * - Bit 7 is reserved.
4189 * Without setting the generic section as valid in valid_sections, the
4190 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4191 */
4192 buf->txqs[0].info.valid_sections =
4193 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4194 ICE_AQC_ELEM_VALID_EIR;
4195 buf->txqs[0].info.generic = 0;
4196 buf->txqs[0].info.cir_bw.bw_profile_idx =
4197 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4198 buf->txqs[0].info.cir_bw.bw_alloc =
4199 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4200 buf->txqs[0].info.eir_bw.bw_profile_idx =
4201 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4202 buf->txqs[0].info.eir_bw.bw_alloc =
4203 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4204
4205 /* add the LAN queue */
4206 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4207 if (status) {
4208 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4209 le16_to_cpu(buf->txqs[0].txq_id),
4210 hw->adminq.sq_last_status);
4211 goto ena_txq_exit;
4212 }
4213
4214 node.node_teid = buf->txqs[0].q_teid;
4215 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4216 q_ctx->q_handle = q_handle;
4217 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4218
4219 /* add a leaf node into scheduler tree queue layer */
4220 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4221 if (!status)
4222 status = ice_sched_replay_q_bw(pi, q_ctx);
4223
4224ena_txq_exit:
4225 mutex_unlock(&pi->sched_lock);
4226 return status;
4227}
4228
4229/**
4230 * ice_dis_vsi_txq
4231 * @pi: port information structure
4232 * @vsi_handle: software VSI handle
4233 * @tc: TC number
4234 * @num_queues: number of queues
4235 * @q_handles: pointer to software queue handle array
4236 * @q_ids: pointer to the q_id array
4237 * @q_teids: pointer to queue node teids
4238 * @rst_src: if called due to reset, specifies the reset source
4239 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4240 * @cd: pointer to command details structure or NULL
4241 *
4242 * This function removes queues and their corresponding nodes in SW DB
4243 */
4244enum ice_status
4245ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4246 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4247 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4248 struct ice_sq_cd *cd)
4249{
4250 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4251 struct ice_aqc_dis_txq_item *qg_list;
4252 struct ice_q_ctx *q_ctx;
4253 struct ice_hw *hw;
4254 u16 i, buf_size;
4255
4256 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4257 return ICE_ERR_CFG;
4258
4259 hw = pi->hw;
4260
4261 if (!num_queues) {
4262 /* if queue is disabled already yet the disable queue command
4263 * has to be sent to complete the VF reset, then call
4264 * ice_aq_dis_lan_txq without any queue information
4265 */
4266 if (rst_src)
4267 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4268 vmvf_num, NULL);
4269 return ICE_ERR_CFG;
4270 }
4271
4272 buf_size = struct_size(qg_list, q_id, 1);
4273 qg_list = kzalloc(buf_size, GFP_KERNEL);
4274 if (!qg_list)
4275 return ICE_ERR_NO_MEMORY;
4276
4277 mutex_lock(&pi->sched_lock);
4278
4279 for (i = 0; i < num_queues; i++) {
4280 struct ice_sched_node *node;
4281
4282 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4283 if (!node)
4284 continue;
4285 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4286 if (!q_ctx) {
4287 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4288 q_handles[i]);
4289 continue;
4290 }
4291 if (q_ctx->q_handle != q_handles[i]) {
4292 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4293 q_ctx->q_handle, q_handles[i]);
4294 continue;
4295 }
4296 qg_list->parent_teid = node->info.parent_teid;
4297 qg_list->num_qs = 1;
4298 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4299 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4300 vmvf_num, cd);
4301
4302 if (status)
4303 break;
4304 ice_free_sched_node(pi, node);
4305 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4306 }
4307 mutex_unlock(&pi->sched_lock);
4308 kfree(qg_list);
4309 return status;
4310}
4311
4312/**
4313 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4314 * @pi: port information structure
4315 * @vsi_handle: software VSI handle
4316 * @tc_bitmap: TC bitmap
4317 * @maxqs: max queues array per TC
4318 * @owner: LAN or RDMA
4319 *
4320 * This function adds/updates the VSI queues per TC.
4321 */
4322static enum ice_status
4323ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4324 u16 *maxqs, u8 owner)
4325{
4326 enum ice_status status = 0;
4327 u8 i;
4328
4329 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4330 return ICE_ERR_CFG;
4331
4332 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4333 return ICE_ERR_PARAM;
4334
4335 mutex_lock(&pi->sched_lock);
4336
4337 ice_for_each_traffic_class(i) {
4338 /* configuration is possible only if TC node is present */
4339 if (!ice_sched_get_tc_node(pi, i))
4340 continue;
4341
4342 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4343 ice_is_tc_ena(tc_bitmap, i));
4344 if (status)
4345 break;
4346 }
4347
4348 mutex_unlock(&pi->sched_lock);
4349 return status;
4350}
4351
4352/**
4353 * ice_cfg_vsi_lan - configure VSI LAN queues
4354 * @pi: port information structure
4355 * @vsi_handle: software VSI handle
4356 * @tc_bitmap: TC bitmap
4357 * @max_lanqs: max LAN queues array per TC
4358 *
4359 * This function adds/updates the VSI LAN queues per TC.
4360 */
4361enum ice_status
4362ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4363 u16 *max_lanqs)
4364{
4365 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4366 ICE_SCHED_NODE_OWNER_LAN);
4367}
4368
4369/**
4370 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4371 * @pi: port information structure
4372 * @vsi_handle: software VSI handle
4373 * @tc_bitmap: TC bitmap
4374 * @max_rdmaqs: max RDMA queues array per TC
4375 *
4376 * This function adds/updates the VSI RDMA queues per TC.
4377 */
4378int
4379ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4380 u16 *max_rdmaqs)
4381{
4382 return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
4383 max_rdmaqs,
4384 ICE_SCHED_NODE_OWNER_RDMA));
4385}
4386
4387/**
4388 * ice_ena_vsi_rdma_qset
4389 * @pi: port information structure
4390 * @vsi_handle: software VSI handle
4391 * @tc: TC number
4392 * @rdma_qset: pointer to RDMA Qset
4393 * @num_qsets: number of RDMA Qsets
4394 * @qset_teid: pointer to Qset node TEIDs
4395 *
4396 * This function adds RDMA Qset
4397 */
4398int
4399ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4400 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4401{
4402 struct ice_aqc_txsched_elem_data node = { 0 };
4403 struct ice_aqc_add_rdma_qset_data *buf;
4404 struct ice_sched_node *parent;
4405 enum ice_status status;
4406 struct ice_hw *hw;
4407 u16 i, buf_size;
4408 int ret;
4409
4410 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4411 return -EIO;
4412 hw = pi->hw;
4413
4414 if (!ice_is_vsi_valid(hw, vsi_handle))
4415 return -EINVAL;
4416
4417 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4418 buf = kzalloc(buf_size, GFP_KERNEL);
4419 if (!buf)
4420 return -ENOMEM;
4421 mutex_lock(&pi->sched_lock);
4422
4423 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4424 ICE_SCHED_NODE_OWNER_RDMA);
4425 if (!parent) {
4426 ret = -EINVAL;
4427 goto rdma_error_exit;
4428 }
4429 buf->parent_teid = parent->info.node_teid;
4430 node.parent_teid = parent->info.node_teid;
4431
4432 buf->num_qsets = cpu_to_le16(num_qsets);
4433 for (i = 0; i < num_qsets; i++) {
4434 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4435 buf->rdma_qsets[i].info.valid_sections =
4436 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4437 ICE_AQC_ELEM_VALID_EIR;
4438 buf->rdma_qsets[i].info.generic = 0;
4439 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4440 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4441 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4442 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4443 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4444 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4445 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4446 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4447 }
4448 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4449 if (ret) {
4450 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4451 goto rdma_error_exit;
4452 }
4453 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4454 for (i = 0; i < num_qsets; i++) {
4455 node.node_teid = buf->rdma_qsets[i].qset_teid;
4456 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4457 &node);
4458 if (status) {
4459 ret = ice_status_to_errno(status);
4460 break;
4461 }
4462 qset_teid[i] = le32_to_cpu(node.node_teid);
4463 }
4464rdma_error_exit:
4465 mutex_unlock(&pi->sched_lock);
4466 kfree(buf);
4467 return ret;
4468}
4469
4470/**
4471 * ice_dis_vsi_rdma_qset - free RDMA resources
4472 * @pi: port_info struct
4473 * @count: number of RDMA Qsets to free
4474 * @qset_teid: TEID of Qset node
4475 * @q_id: list of queue IDs being disabled
4476 */
4477int
4478ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4479 u16 *q_id)
4480{
4481 struct ice_aqc_dis_txq_item *qg_list;
4482 enum ice_status status = 0;
4483 struct ice_hw *hw;
4484 u16 qg_size;
4485 int i;
4486
4487 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4488 return -EIO;
4489
4490 hw = pi->hw;
4491
4492 qg_size = struct_size(qg_list, q_id, 1);
4493 qg_list = kzalloc(qg_size, GFP_KERNEL);
4494 if (!qg_list)
4495 return -ENOMEM;
4496
4497 mutex_lock(&pi->sched_lock);
4498
4499 for (i = 0; i < count; i++) {
4500 struct ice_sched_node *node;
4501
4502 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4503 if (!node)
4504 continue;
4505
4506 qg_list->parent_teid = node->info.parent_teid;
4507 qg_list->num_qs = 1;
4508 qg_list->q_id[0] =
4509 cpu_to_le16(q_id[i] |
4510 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4511
4512 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4513 ICE_NO_RESET, 0, NULL);
4514 if (status)
4515 break;
4516
4517 ice_free_sched_node(pi, node);
4518 }
4519
4520 mutex_unlock(&pi->sched_lock);
4521 kfree(qg_list);
4522 return ice_status_to_errno(status);
4523}
4524
4525/**
4526 * ice_replay_pre_init - replay pre initialization
4527 * @hw: pointer to the HW struct
4528 *
4529 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4530 */
4531static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4532{
4533 struct ice_switch_info *sw = hw->switch_info;
4534 u8 i;
4535
4536 /* Delete old entries from replay filter list head if there is any */
4537 ice_rm_all_sw_replay_rule_info(hw);
4538 /* In start of replay, move entries into replay_rules list, it
4539 * will allow adding rules entries back to filt_rules list,
4540 * which is operational list.
4541 */
4542 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4543 list_replace_init(&sw->recp_list[i].filt_rules,
4544 &sw->recp_list[i].filt_replay_rules);
4545 ice_sched_replay_agg_vsi_preinit(hw);
4546
4547 return 0;
4548}
4549
4550/**
4551 * ice_replay_vsi - replay VSI configuration
4552 * @hw: pointer to the HW struct
4553 * @vsi_handle: driver VSI handle
4554 *
4555 * Restore all VSI configuration after reset. It is required to call this
4556 * function with main VSI first.
4557 */
4558enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4559{
4560 enum ice_status status;
4561
4562 if (!ice_is_vsi_valid(hw, vsi_handle))
4563 return ICE_ERR_PARAM;
4564
4565 /* Replay pre-initialization if there is any */
4566 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4567 status = ice_replay_pre_init(hw);
4568 if (status)
4569 return status;
4570 }
4571 /* Replay per VSI all RSS configurations */
4572 status = ice_replay_rss_cfg(hw, vsi_handle);
4573 if (status)
4574 return status;
4575 /* Replay per VSI all filters */
4576 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4577 if (!status)
4578 status = ice_replay_vsi_agg(hw, vsi_handle);
4579 return status;
4580}
4581
4582/**
4583 * ice_replay_post - post replay configuration cleanup
4584 * @hw: pointer to the HW struct
4585 *
4586 * Post replay cleanup.
4587 */
4588void ice_replay_post(struct ice_hw *hw)
4589{
4590 /* Delete old entries from replay filter list head */
4591 ice_rm_all_sw_replay_rule_info(hw);
4592 ice_sched_replay_agg(hw);
4593}
4594
4595/**
4596 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4597 * @hw: ptr to the hardware info
4598 * @reg: offset of 64 bit HW register to read from
4599 * @prev_stat_loaded: bool to specify if previous stats are loaded
4600 * @prev_stat: ptr to previous loaded stat value
4601 * @cur_stat: ptr to current stat value
4602 */
4603void
4604ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4605 u64 *prev_stat, u64 *cur_stat)
4606{
4607 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4608
4609 /* device stats are not reset at PFR, they likely will not be zeroed
4610 * when the driver starts. Thus, save the value from the first read
4611 * without adding to the statistic value so that we report stats which
4612 * count up from zero.
4613 */
4614 if (!prev_stat_loaded) {
4615 *prev_stat = new_data;
4616 return;
4617 }
4618
4619 /* Calculate the difference between the new and old values, and then
4620 * add it to the software stat value.
4621 */
4622 if (new_data >= *prev_stat)
4623 *cur_stat += new_data - *prev_stat;
4624 else
4625 /* to manage the potential roll-over */
4626 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4627
4628 /* Update the previously stored value to prepare for next read */
4629 *prev_stat = new_data;
4630}
4631
4632/**
4633 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4634 * @hw: ptr to the hardware info
4635 * @reg: offset of HW register to read from
4636 * @prev_stat_loaded: bool to specify if previous stats are loaded
4637 * @prev_stat: ptr to previous loaded stat value
4638 * @cur_stat: ptr to current stat value
4639 */
4640void
4641ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4642 u64 *prev_stat, u64 *cur_stat)
4643{
4644 u32 new_data;
4645
4646 new_data = rd32(hw, reg);
4647
4648 /* device stats are not reset at PFR, they likely will not be zeroed
4649 * when the driver starts. Thus, save the value from the first read
4650 * without adding to the statistic value so that we report stats which
4651 * count up from zero.
4652 */
4653 if (!prev_stat_loaded) {
4654 *prev_stat = new_data;
4655 return;
4656 }
4657
4658 /* Calculate the difference between the new and old values, and then
4659 * add it to the software stat value.
4660 */
4661 if (new_data >= *prev_stat)
4662 *cur_stat += new_data - *prev_stat;
4663 else
4664 /* to manage the potential roll-over */
4665 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4666
4667 /* Update the previously stored value to prepare for next read */
4668 *prev_stat = new_data;
4669}
4670
4671/**
4672 * ice_sched_query_elem - query element information from HW
4673 * @hw: pointer to the HW struct
4674 * @node_teid: node TEID to be queried
4675 * @buf: buffer to element information
4676 *
4677 * This function queries HW element information
4678 */
4679enum ice_status
4680ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4681 struct ice_aqc_txsched_elem_data *buf)
4682{
4683 u16 buf_size, num_elem_ret = 0;
4684 enum ice_status status;
4685
4686 buf_size = sizeof(*buf);
4687 memset(buf, 0, buf_size);
4688 buf->node_teid = cpu_to_le32(node_teid);
4689 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4690 NULL);
4691 if (status || num_elem_ret != 1)
4692 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4693 return status;
4694}
4695
4696/**
4697 * ice_aq_set_driver_param - Set driver parameter to share via firmware
4698 * @hw: pointer to the HW struct
4699 * @idx: parameter index to set
4700 * @value: the value to set the parameter to
4701 * @cd: pointer to command details structure or NULL
4702 *
4703 * Set the value of one of the software defined parameters. All PFs connected
4704 * to this device can read the value using ice_aq_get_driver_param.
4705 *
4706 * Note that firmware provides no synchronization or locking, and will not
4707 * save the parameter value during a device reset. It is expected that
4708 * a single PF will write the parameter value, while all other PFs will only
4709 * read it.
4710 */
4711int
4712ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4713 u32 value, struct ice_sq_cd *cd)
4714{
4715 struct ice_aqc_driver_shared_params *cmd;
4716 struct ice_aq_desc desc;
4717
4718 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4719 return -EIO;
4720
4721 cmd = &desc.params.drv_shared_params;
4722
4723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4724
4725 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4726 cmd->param_indx = idx;
4727 cmd->param_val = cpu_to_le32(value);
4728
4729 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
4730}
4731
4732/**
4733 * ice_aq_get_driver_param - Get driver parameter shared via firmware
4734 * @hw: pointer to the HW struct
4735 * @idx: parameter index to set
4736 * @value: storage to return the shared parameter
4737 * @cd: pointer to command details structure or NULL
4738 *
4739 * Get the value of one of the software defined parameters.
4740 *
4741 * Note that firmware provides no synchronization or locking. It is expected
4742 * that only a single PF will write a given parameter.
4743 */
4744int
4745ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4746 u32 *value, struct ice_sq_cd *cd)
4747{
4748 struct ice_aqc_driver_shared_params *cmd;
4749 struct ice_aq_desc desc;
4750 enum ice_status status;
4751
4752 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4753 return -EIO;
4754
4755 cmd = &desc.params.drv_shared_params;
4756
4757 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4758
4759 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4760 cmd->param_indx = idx;
4761
4762 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4763 if (status)
4764 return ice_status_to_errno(status);
4765
4766 *value = le32_to_cpu(cmd->param_val);
4767
4768 return 0;
4769}
4770
4771/**
4772 * ice_fw_supports_link_override
4773 * @hw: pointer to the hardware structure
4774 *
4775 * Checks if the firmware supports link override
4776 */
4777bool ice_fw_supports_link_override(struct ice_hw *hw)
4778{
4779 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4780 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4781 return true;
4782 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4783 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4784 return true;
4785 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4786 return true;
4787 }
4788
4789 return false;
4790}
4791
4792/**
4793 * ice_get_link_default_override
4794 * @ldo: pointer to the link default override struct
4795 * @pi: pointer to the port info struct
4796 *
4797 * Gets the link default override for a port
4798 */
4799enum ice_status
4800ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4801 struct ice_port_info *pi)
4802{
4803 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4804 struct ice_hw *hw = pi->hw;
4805 enum ice_status status;
4806
4807 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4808 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4809 if (status) {
4810 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4811 return status;
4812 }
4813
4814 /* Each port has its own config; calculate for our port */
4815 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4816 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4817
4818 /* link options first */
4819 status = ice_read_sr_word(hw, tlv_start, &buf);
4820 if (status) {
4821 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4822 return status;
4823 }
4824 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4825 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4826 ICE_LINK_OVERRIDE_PHY_CFG_S;
4827
4828 /* link PHY config */
4829 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4830 status = ice_read_sr_word(hw, offset, &buf);
4831 if (status) {
4832 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4833 return status;
4834 }
4835 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4836
4837 /* PHY types low */
4838 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4839 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4840 status = ice_read_sr_word(hw, (offset + i), &buf);
4841 if (status) {
4842 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4843 return status;
4844 }
4845 /* shift 16 bits at a time to fill 64 bits */
4846 ldo->phy_type_low |= ((u64)buf << (i * 16));
4847 }
4848
4849 /* PHY types high */
4850 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4851 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4852 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4853 status = ice_read_sr_word(hw, (offset + i), &buf);
4854 if (status) {
4855 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4856 return status;
4857 }
4858 /* shift 16 bits at a time to fill 64 bits */
4859 ldo->phy_type_high |= ((u64)buf << (i * 16));
4860 }
4861
4862 return status;
4863}
4864
4865/**
4866 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4867 * @caps: get PHY capability data
4868 */
4869bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4870{
4871 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4872 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4873 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4874 ICE_AQC_PHY_AN_EN_CLAUSE37))
4875 return true;
4876
4877 return false;
4878}
4879
4880/**
4881 * ice_aq_set_lldp_mib - Set the LLDP MIB
4882 * @hw: pointer to the HW struct
4883 * @mib_type: Local, Remote or both Local and Remote MIBs
4884 * @buf: pointer to the caller-supplied buffer to store the MIB block
4885 * @buf_size: size of the buffer (in bytes)
4886 * @cd: pointer to command details structure or NULL
4887 *
4888 * Set the LLDP MIB. (0x0A08)
4889 */
4890enum ice_status
4891ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4892 struct ice_sq_cd *cd)
4893{
4894 struct ice_aqc_lldp_set_local_mib *cmd;
4895 struct ice_aq_desc desc;
4896
4897 cmd = &desc.params.lldp_set_mib;
4898
4899 if (buf_size == 0 || !buf)
4900 return ICE_ERR_PARAM;
4901
4902 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4903
4904 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4905 desc.datalen = cpu_to_le16(buf_size);
4906
4907 cmd->type = mib_type;
4908 cmd->length = cpu_to_le16(buf_size);
4909
4910 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4911}
4912
4913/**
4914 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
4915 * @hw: pointer to HW struct
4916 */
4917bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4918{
4919 if (hw->mac_type != ICE_MAC_E810)
4920 return false;
4921
4922 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4923 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4924 return true;
4925 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4926 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4927 return true;
4928 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4929 return true;
4930 }
4931 return false;
4932}
4933
4934/**
4935 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
4936 * @hw: pointer to HW struct
4937 * @vsi_num: absolute HW index for VSI
4938 * @add: boolean for if adding or removing a filter
4939 */
4940enum ice_status
4941ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4942{
4943 struct ice_aqc_lldp_filter_ctrl *cmd;
4944 struct ice_aq_desc desc;
4945
4946 cmd = &desc.params.lldp_filter_ctrl;
4947
4948 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4949
4950 if (add)
4951 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4952 else
4953 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4954
4955 cmd->vsi_num = cpu_to_le16(vsi_num);
4956
4957 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4958}
4959
4960/**
4961 * ice_fw_supports_report_dflt_cfg
4962 * @hw: pointer to the hardware structure
4963 *
4964 * Checks if the firmware supports report default configuration
4965 */
4966bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
4967{
4968 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4969 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
4970 return true;
4971 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
4972 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
4973 return true;
4974 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4975 return true;
4976 }
4977 return false;
4978}