Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_common.h"
   5#include "ice_sched.h"
   6#include "ice_adminq_cmd.h"
 
   7
   8#define ICE_PF_RESET_WAIT_COUNT	200
   9
  10#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
  11	wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
  12	     ((ICE_RX_OPC_MDID << \
  13	       GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
  14	      GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
  15	     (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
  16	      GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
  17
  18#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
  19	wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
  20	     (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
  21	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
  22	     (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
  23	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
  24	     (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
  25	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
  26	     (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
  27	      GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
  28
  29/**
  30 * ice_set_mac_type - Sets MAC type
  31 * @hw: pointer to the HW structure
  32 *
  33 * This function sets the MAC type of the adapter based on the
  34 * vendor ID and device ID stored in the hw structure.
  35 */
  36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
  37{
  38	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
  39		return ICE_ERR_DEVICE_NOT_SUPPORTED;
  40
  41	hw->mac_type = ICE_MAC_GENERIC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42	return 0;
  43}
  44
  45/**
  46 * ice_clear_pf_cfg - Clear PF configuration
  47 * @hw: pointer to the hardware structure
 
 
 
  48 */
  49enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
  50{
  51	struct ice_aq_desc desc;
  52
  53	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
  54
  55	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  56}
  57
  58/**
  59 * ice_aq_manage_mac_read - manage MAC address read command
  60 * @hw: pointer to the hw struct
  61 * @buf: a virtual buffer to hold the manage MAC read response
  62 * @buf_size: Size of the virtual buffer
  63 * @cd: pointer to command details structure or NULL
  64 *
  65 * This function is used to return per PF station MAC address (0x0107).
  66 * NOTE: Upon successful completion of this command, MAC address information
  67 * is returned in user specified buffer. Please interpret user specified
  68 * buffer as "manage_mac_read" response.
  69 * Response such as various MAC addresses are stored in HW struct (port.mac)
  70 * ice_aq_discover_caps is expected to be called before this function is called.
 
  71 */
  72static enum ice_status
  73ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
  74		       struct ice_sq_cd *cd)
  75{
  76	struct ice_aqc_manage_mac_read_resp *resp;
  77	struct ice_aqc_manage_mac_read *cmd;
  78	struct ice_aq_desc desc;
  79	enum ice_status status;
  80	u16 flags;
  81	u8 i;
  82
  83	cmd = &desc.params.mac_read;
  84
  85	if (buf_size < sizeof(*resp))
  86		return ICE_ERR_BUF_TOO_SHORT;
  87
  88	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
  89
  90	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  91	if (status)
  92		return status;
  93
  94	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
  95	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
  96
  97	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
  98		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
  99		return ICE_ERR_CFG;
 100	}
 101
 102	/* A single port can report up to two (LAN and WoL) addresses */
 103	for (i = 0; i < cmd->num_addr; i++)
 104		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
 105			ether_addr_copy(hw->port_info->mac.lan_addr,
 106					resp[i].mac_addr);
 107			ether_addr_copy(hw->port_info->mac.perm_addr,
 108					resp[i].mac_addr);
 109			break;
 110		}
 111
 112	return 0;
 113}
 114
 115/**
 116 * ice_aq_get_phy_caps - returns PHY capabilities
 117 * @pi: port information structure
 118 * @qual_mods: report qualified modules
 119 * @report_mode: report mode capabilities
 120 * @pcaps: structure for PHY capabilities to be filled
 121 * @cd: pointer to command details structure or NULL
 122 *
 123 * Returns the various PHY capabilities supported on the Port (0x0600)
 124 */
 125static enum ice_status
 126ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
 127		    struct ice_aqc_get_phy_caps_data *pcaps,
 128		    struct ice_sq_cd *cd)
 129{
 130	struct ice_aqc_get_phy_caps *cmd;
 131	u16 pcaps_size = sizeof(*pcaps);
 132	struct ice_aq_desc desc;
 133	enum ice_status status;
 
 134
 135	cmd = &desc.params.get_phy;
 136
 137	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
 138		return ICE_ERR_PARAM;
 
 139
 140	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
 141
 142	if (qual_mods)
 143		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
 144
 145	cmd->param0 |= cpu_to_le16(report_mode);
 146	status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
 147
 148	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
 
 
 
 
 150
 151	return status;
 152}
 153
 154/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155 * ice_get_media_type - Gets media type
 156 * @pi: port information structure
 157 */
 158static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
 159{
 160	struct ice_link_status *hw_link_info;
 161
 162	if (!pi)
 163		return ICE_MEDIA_UNKNOWN;
 164
 165	hw_link_info = &pi->phy.link_info;
 
 
 
 166
 167	if (hw_link_info->phy_type_low) {
 
 
 
 
 
 
 
 
 
 
 
 
 168		switch (hw_link_info->phy_type_low) {
 169		case ICE_PHY_TYPE_LOW_1000BASE_SX:
 170		case ICE_PHY_TYPE_LOW_1000BASE_LX:
 171		case ICE_PHY_TYPE_LOW_10GBASE_SR:
 172		case ICE_PHY_TYPE_LOW_10GBASE_LR:
 173		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
 174		case ICE_PHY_TYPE_LOW_25GBASE_SR:
 175		case ICE_PHY_TYPE_LOW_25GBASE_LR:
 176		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
 177		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
 178		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 179			return ICE_MEDIA_FIBER;
 180		case ICE_PHY_TYPE_LOW_100BASE_TX:
 181		case ICE_PHY_TYPE_LOW_1000BASE_T:
 182		case ICE_PHY_TYPE_LOW_2500BASE_T:
 183		case ICE_PHY_TYPE_LOW_5GBASE_T:
 184		case ICE_PHY_TYPE_LOW_10GBASE_T:
 185		case ICE_PHY_TYPE_LOW_25GBASE_T:
 186			return ICE_MEDIA_BASET;
 187		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
 188		case ICE_PHY_TYPE_LOW_25GBASE_CR:
 189		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
 190		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
 191		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
 
 
 
 
 
 192			return ICE_MEDIA_DA;
 
 
 
 
 
 
 
 
 
 
 193		case ICE_PHY_TYPE_LOW_1000BASE_KX:
 194		case ICE_PHY_TYPE_LOW_2500BASE_KX:
 195		case ICE_PHY_TYPE_LOW_2500BASE_X:
 196		case ICE_PHY_TYPE_LOW_5GBASE_KR:
 197		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
 198		case ICE_PHY_TYPE_LOW_25GBASE_KR:
 199		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
 200		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
 201		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
 
 
 
 
 202			return ICE_MEDIA_BACKPLANE;
 203		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 204	}
 205
 206	return ICE_MEDIA_UNKNOWN;
 207}
 208
 209/**
 210 * ice_aq_get_link_info
 211 * @pi: port information structure
 212 * @ena_lse: enable/disable LinkStatusEvent reporting
 213 * @link: pointer to link status structure - optional
 214 * @cd: pointer to command details structure or NULL
 215 *
 216 * Get Link Status (0x607). Returns the link status of the adapter.
 217 */
 218enum ice_status
 219ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
 220		     struct ice_link_status *link, struct ice_sq_cd *cd)
 221{
 222	struct ice_link_status *hw_link_info_old, *hw_link_info;
 223	struct ice_aqc_get_link_status_data link_data = { 0 };
 224	struct ice_aqc_get_link_status *resp;
 
 225	enum ice_media_type *hw_media_type;
 226	struct ice_fc_info *hw_fc_info;
 227	bool tx_pause, rx_pause;
 228	struct ice_aq_desc desc;
 229	enum ice_status status;
 
 230	u16 cmd_flags;
 231
 232	if (!pi)
 233		return ICE_ERR_PARAM;
 234	hw_link_info_old = &pi->phy.link_info_old;
 
 235	hw_media_type = &pi->phy.media_type;
 236	hw_link_info = &pi->phy.link_info;
 237	hw_fc_info = &pi->fc;
 238
 239	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
 240	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
 241	resp = &desc.params.get_link_status;
 242	resp->cmd_flags = cpu_to_le16(cmd_flags);
 243	resp->lport_num = pi->lport;
 244
 245	status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
 246				 cd);
 247
 248	if (status)
 249		return status;
 250
 251	/* save off old link status information */
 252	*hw_link_info_old = *hw_link_info;
 253
 254	/* update current link status information */
 255	hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
 256	hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
 
 257	*hw_media_type = ice_get_media_type(pi);
 258	hw_link_info->link_info = link_data.link_info;
 259	hw_link_info->an_info = link_data.an_info;
 260	hw_link_info->ext_info = link_data.ext_info;
 261	hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
 262	hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
 
 
 
 263
 264	/* update fc info */
 265	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
 266	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
 267	if (tx_pause && rx_pause)
 268		hw_fc_info->current_mode = ICE_FC_FULL;
 269	else if (tx_pause)
 270		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
 271	else if (rx_pause)
 272		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
 273	else
 274		hw_fc_info->current_mode = ICE_FC_NONE;
 275
 276	hw_link_info->lse_ena =
 277		!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 278
 279	/* save link status information */
 280	if (link)
 281		*link = *hw_link_info;
 282
 283	/* flag cleared so calling functions don't call AQ again */
 284	pi->phy.get_link_info = false;
 285
 286	return status;
 287}
 288
 289/**
 290 * ice_init_flex_parser - initialize rx flex parser
 291 * @hw: pointer to the hardware structure
 
 292 *
 293 * Function to initialize flex descriptors
 
 294 */
 295static void ice_init_flex_parser(struct ice_hw *hw)
 
 
 296{
 297	u8 idx = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298
 299	ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
 300	ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
 301	ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
 302	ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
 303	ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
 304			      ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
 305	ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
 306			      ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
 307	ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
 308			      ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
 309			      idx++);
 310	ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
 311			      ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
 312	ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
 313			      ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314}
 315
 316/**
 317 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
 318 * @hw: pointer to the hw struct
 319 */
 320static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
 321{
 322	struct ice_switch_info *sw;
 
 323
 324	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
 325				       sizeof(*hw->switch_info), GFP_KERNEL);
 326	sw = hw->switch_info;
 327
 328	if (!sw)
 329		return ICE_ERR_NO_MEMORY;
 330
 331	INIT_LIST_HEAD(&sw->vsi_list_map_head);
 332
 333	mutex_init(&sw->mac_list_lock);
 334	INIT_LIST_HEAD(&sw->mac_list_head);
 335
 336	mutex_init(&sw->vlan_list_lock);
 337	INIT_LIST_HEAD(&sw->vlan_list_head);
 338
 339	mutex_init(&sw->eth_m_list_lock);
 340	INIT_LIST_HEAD(&sw->eth_m_list_head);
 341
 342	mutex_init(&sw->promisc_list_lock);
 343	INIT_LIST_HEAD(&sw->promisc_list_head);
 344
 345	mutex_init(&sw->mac_vlan_list_lock);
 346	INIT_LIST_HEAD(&sw->mac_vlan_list_head);
 347
 348	return 0;
 349}
 350
 351/**
 352 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
 353 * @hw: pointer to the hw struct
 354 */
 355static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
 356{
 357	struct ice_switch_info *sw = hw->switch_info;
 358	struct ice_vsi_list_map_info *v_pos_map;
 359	struct ice_vsi_list_map_info *v_tmp_map;
 
 
 360
 361	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
 362				 list_entry) {
 363		list_del(&v_pos_map->list_entry);
 364		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
 365	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366
 367	mutex_destroy(&sw->mac_list_lock);
 368	mutex_destroy(&sw->vlan_list_lock);
 369	mutex_destroy(&sw->eth_m_list_lock);
 370	mutex_destroy(&sw->promisc_list_lock);
 371	mutex_destroy(&sw->mac_vlan_list_lock);
 
 
 
 
 
 372
 373	devm_kfree(ice_hw_to_dev(hw), sw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374}
 375
 376/**
 377 * ice_init_hw - main hardware initialization routine
 378 * @hw: pointer to the hardware structure
 379 */
 380enum ice_status ice_init_hw(struct ice_hw *hw)
 381{
 382	struct ice_aqc_get_phy_caps_data *pcaps;
 383	enum ice_status status;
 384	u16 mac_buf_len;
 385	void *mac_buf;
 386
 387	/* Set MAC type based on DeviceID */
 388	status = ice_set_mac_type(hw);
 389	if (status)
 390		return status;
 391
 392	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
 393			 PF_FUNC_RID_FUNC_NUM_M) >>
 394		PF_FUNC_RID_FUNC_NUM_S;
 395
 396	status = ice_reset(hw, ICE_RESET_PFR);
 397	if (status)
 398		return status;
 399
 400	/* set these values to minimum allowed */
 401	hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
 402	hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
 403	hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
 404	hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
 405
 406	status = ice_init_all_ctrlq(hw);
 407	if (status)
 408		goto err_unroll_cqinit;
 409
 
 
 
 
 
 410	status = ice_clear_pf_cfg(hw);
 411	if (status)
 412		goto err_unroll_cqinit;
 413
 
 
 
 
 414	ice_clear_pxe_mode(hw);
 415
 416	status = ice_init_nvm(hw);
 417	if (status)
 418		goto err_unroll_cqinit;
 419
 420	status = ice_get_caps(hw);
 421	if (status)
 422		goto err_unroll_cqinit;
 423
 424	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
 425				     sizeof(*hw->port_info), GFP_KERNEL);
 426	if (!hw->port_info) {
 427		status = ICE_ERR_NO_MEMORY;
 428		goto err_unroll_cqinit;
 429	}
 430
 431	/* set the back pointer to hw */
 432	hw->port_info->hw = hw;
 433
 434	/* Initialize port_info struct with switch configuration data */
 435	status = ice_get_initial_sw_cfg(hw);
 436	if (status)
 437		goto err_unroll_alloc;
 438
 439	hw->evb_veb = true;
 440
 441	/* Query the allocated resources for tx scheduler */
 442	status = ice_sched_query_res_alloc(hw);
 443	if (status) {
 444		ice_debug(hw, ICE_DBG_SCHED,
 445			  "Failed to get scheduler allocated resources\n");
 446		goto err_unroll_alloc;
 447	}
 448
 449	/* Initialize port_info struct with scheduler data */
 450	status = ice_sched_init_port(hw->port_info);
 451	if (status)
 452		goto err_unroll_sched;
 453
 454	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
 455	if (!pcaps) {
 456		status = ICE_ERR_NO_MEMORY;
 457		goto err_unroll_sched;
 458	}
 459
 460	/* Initialize port_info struct with PHY capabilities */
 461	status = ice_aq_get_phy_caps(hw->port_info, false,
 462				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
 463	devm_kfree(ice_hw_to_dev(hw), pcaps);
 464	if (status)
 465		goto err_unroll_sched;
 466
 467	/* Initialize port_info struct with link information */
 468	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
 469	if (status)
 470		goto err_unroll_sched;
 471
 
 
 
 
 
 
 
 
 
 
 
 472	status = ice_init_fltr_mgmt_struct(hw);
 473	if (status)
 474		goto err_unroll_sched;
 475
 476	/* Get MAC information */
 477	/* A single port can report up to two (LAN and WoL) addresses */
 478	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
 479			       sizeof(struct ice_aqc_manage_mac_read_resp),
 480			       GFP_KERNEL);
 481	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
 482
 483	if (!mac_buf) {
 484		status = ICE_ERR_NO_MEMORY;
 485		goto err_unroll_fltr_mgmt_struct;
 486	}
 487
 488	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
 489	devm_kfree(ice_hw_to_dev(hw), mac_buf);
 490
 491	if (status)
 492		goto err_unroll_fltr_mgmt_struct;
 493
 494	ice_init_flex_parser(hw);
 495
 
 
 
 
 
 
 
 
 
 496	return 0;
 497
 498err_unroll_fltr_mgmt_struct:
 499	ice_cleanup_fltr_mgmt_struct(hw);
 500err_unroll_sched:
 501	ice_sched_cleanup_all(hw);
 502err_unroll_alloc:
 503	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
 504err_unroll_cqinit:
 505	ice_shutdown_all_ctrlq(hw);
 506	return status;
 507}
 508
 509/**
 510 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
 511 * @hw: pointer to the hardware structure
 
 
 
 
 512 */
 513void ice_deinit_hw(struct ice_hw *hw)
 514{
 
 
 
 515	ice_sched_cleanup_all(hw);
 516	ice_shutdown_all_ctrlq(hw);
 
 
 
 517
 518	if (hw->port_info) {
 519		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
 520		hw->port_info = NULL;
 521	}
 522
 523	ice_cleanup_fltr_mgmt_struct(hw);
 
 
 
 
 
 524}
 525
 526/**
 527 * ice_check_reset - Check to see if a global reset is complete
 528 * @hw: pointer to the hardware structure
 529 */
 530enum ice_status ice_check_reset(struct ice_hw *hw)
 531{
 532	u32 cnt, reg = 0, grst_delay;
 533
 534	/* Poll for Device Active state in case a recent CORER, GLOBR,
 535	 * or EMPR has occurred. The grst delay value is in 100ms units.
 536	 * Add 1sec for outstanding AQ commands that can take a long time.
 537	 */
 538	grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
 539		      GLGEN_RSTCTL_GRSTDEL_S) + 10;
 540
 541	for (cnt = 0; cnt < grst_delay; cnt++) {
 542		mdelay(100);
 543		reg = rd32(hw, GLGEN_RSTAT);
 544		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
 545			break;
 546	}
 547
 548	if (cnt == grst_delay) {
 549		ice_debug(hw, ICE_DBG_INIT,
 550			  "Global reset polling failed to complete.\n");
 551		return ICE_ERR_RESET_FAILED;
 552	}
 553
 554#define ICE_RESET_DONE_MASK	(GLNVM_ULD_CORER_DONE_M | \
 555				 GLNVM_ULD_GLOBR_DONE_M)
 
 
 
 
 
 
 
 556
 557	/* Device is Active; check Global Reset processes are done */
 558	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
 559		reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
 560		if (reg == ICE_RESET_DONE_MASK) {
 561			ice_debug(hw, ICE_DBG_INIT,
 562				  "Global reset processes done. %d\n", cnt);
 563			break;
 564		}
 565		mdelay(10);
 566	}
 567
 568	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
 569		ice_debug(hw, ICE_DBG_INIT,
 570			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
 571			  reg);
 572		return ICE_ERR_RESET_FAILED;
 573	}
 574
 575	return 0;
 576}
 577
 578/**
 579 * ice_pf_reset - Reset the PF
 580 * @hw: pointer to the hardware structure
 581 *
 582 * If a global reset has been triggered, this function checks
 583 * for its completion and then issues the PF reset
 584 */
 585static enum ice_status ice_pf_reset(struct ice_hw *hw)
 586{
 587	u32 cnt, reg;
 588
 589	/* If at function entry a global reset was already in progress, i.e.
 590	 * state is not 'device active' or any of the reset done bits are not
 591	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
 592	 * global reset is done.
 593	 */
 594	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
 595	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
 596		/* poll on global reset currently in progress until done */
 597		if (ice_check_reset(hw))
 598			return ICE_ERR_RESET_FAILED;
 599
 600		return 0;
 601	}
 602
 603	/* Reset the PF */
 604	reg = rd32(hw, PFGEN_CTRL);
 605
 606	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
 607
 608	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
 
 
 
 
 
 609		reg = rd32(hw, PFGEN_CTRL);
 610		if (!(reg & PFGEN_CTRL_PFSWR_M))
 611			break;
 612
 613		mdelay(1);
 614	}
 615
 616	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
 617		ice_debug(hw, ICE_DBG_INIT,
 618			  "PF reset polling failed to complete.\n");
 619		return ICE_ERR_RESET_FAILED;
 620	}
 621
 622	return 0;
 623}
 624
 625/**
 626 * ice_reset - Perform different types of reset
 627 * @hw: pointer to the hardware structure
 628 * @req: reset request
 629 *
 630 * This function triggers a reset as specified by the req parameter.
 631 *
 632 * Note:
 633 * If anything other than a PF reset is triggered, PXE mode is restored.
 634 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
 635 * interface has been restored in the rebuild flow.
 636 */
 637enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
 638{
 639	u32 val = 0;
 640
 641	switch (req) {
 642	case ICE_RESET_PFR:
 643		return ice_pf_reset(hw);
 644	case ICE_RESET_CORER:
 645		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
 646		val = GLGEN_RTRIG_CORER_M;
 647		break;
 648	case ICE_RESET_GLOBR:
 649		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
 650		val = GLGEN_RTRIG_GLOBR_M;
 651		break;
 
 
 652	}
 653
 654	val |= rd32(hw, GLGEN_RTRIG);
 655	wr32(hw, GLGEN_RTRIG, val);
 656	ice_flush(hw);
 657
 658	/* wait for the FW to be ready */
 659	return ice_check_reset(hw);
 660}
 661
 662/**
 663 * ice_copy_rxq_ctx_to_hw
 664 * @hw: pointer to the hardware structure
 665 * @ice_rxq_ctx: pointer to the rxq context
 666 * @rxq_index: the index of the rx queue
 667 *
 668 * Copies rxq context from dense structure to hw register space
 669 */
 670static enum ice_status
 671ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
 672{
 673	u8 i;
 674
 675	if (!ice_rxq_ctx)
 676		return ICE_ERR_BAD_PTR;
 677
 678	if (rxq_index > QRX_CTRL_MAX_INDEX)
 679		return ICE_ERR_PARAM;
 680
 681	/* Copy each dword separately to hw */
 682	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
 683		wr32(hw, QRX_CONTEXT(i, rxq_index),
 684		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
 685
 686		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
 687			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
 688	}
 689
 690	return 0;
 691}
 692
 693/* LAN Rx Queue Context */
 694static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
 695	/* Field		Width	LSB */
 696	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
 697	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
 698	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
 699	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
 700	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
 701	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
 702	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
 703	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
 704	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
 705	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
 706	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
 707	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
 708	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
 709	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
 710	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
 711	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
 712	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
 713	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
 714	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
 
 715	{ 0 }
 716};
 717
 718/**
 719 * ice_write_rxq_ctx
 720 * @hw: pointer to the hardware structure
 721 * @rlan_ctx: pointer to the rxq context
 722 * @rxq_index: the index of the rx queue
 723 *
 724 * Converts rxq context from sparse to dense structure and then writes
 725 * it to hw register space
 
 726 */
 727enum ice_status
 728ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
 729		  u32 rxq_index)
 730{
 731	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
 732
 733	ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
 
 
 
 
 
 734	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
 735}
 736
 737/* LAN Tx Queue Context */
 738const struct ice_ctx_ele ice_tlan_ctx_info[] = {
 739				    /* Field			Width	LSB */
 740	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
 741	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
 742	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
 743	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
 744	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
 745	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
 746	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
 747	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
 
 748	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
 749	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
 750	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
 751	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
 752	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
 753	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
 754	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
 755	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
 756	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
 757	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
 758	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
 759	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
 760	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
 761	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
 762	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
 763	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
 764	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
 765	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
 766	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		110,	171),
 767	{ 0 }
 768};
 769
 770/**
 771 * ice_debug_cq
 772 * @hw: pointer to the hardware structure
 773 * @mask: debug mask
 774 * @desc: pointer to control queue descriptor
 775 * @buf: pointer to command buffer
 776 * @buf_len: max length of buf
 777 *
 778 * Dumps debug log about control command with descriptor contents.
 779 */
 780void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
 781		  void *buf, u16 buf_len)
 782{
 783	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
 784	u16 len;
 785
 786#ifndef CONFIG_DYNAMIC_DEBUG
 787	if (!(mask & hw->debug_mask))
 788		return;
 789#endif
 790
 791	if (!desc)
 792		return;
 793
 794	len = le16_to_cpu(cq_desc->datalen);
 795
 796	ice_debug(hw, mask,
 797		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
 798		  le16_to_cpu(cq_desc->opcode),
 799		  le16_to_cpu(cq_desc->flags),
 800		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
 801	ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
 802		  le32_to_cpu(cq_desc->cookie_high),
 803		  le32_to_cpu(cq_desc->cookie_low));
 804	ice_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
 805		  le32_to_cpu(cq_desc->params.generic.param0),
 806		  le32_to_cpu(cq_desc->params.generic.param1));
 807	ice_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
 808		  le32_to_cpu(cq_desc->params.generic.addr_high),
 809		  le32_to_cpu(cq_desc->params.generic.addr_low));
 810	if (buf && cq_desc->datalen != 0) {
 811		ice_debug(hw, mask, "Buffer:\n");
 812		if (buf_len < len)
 813			len = buf_len;
 814
 815		ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
 816	}
 817}
 818
 819/* FW Admin Queue command wrappers */
 820
 
 
 
 
 
 
 821/**
 822 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
 823 * @hw: pointer to the hw struct
 824 * @desc: descriptor describing the command
 825 * @buf: buffer to use for indirect commands (NULL for direct commands)
 826 * @buf_size: size of buffer for indirect commands (0 for direct commands)
 827 * @cd: pointer to command details structure
 828 *
 829 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
 830 */
 831enum ice_status
 832ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
 833		u16 buf_size, struct ice_sq_cd *cd)
 834{
 835	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 836}
 837
 838/**
 839 * ice_aq_get_fw_ver
 840 * @hw: pointer to the hw struct
 841 * @cd: pointer to command details structure or NULL
 842 *
 843 * Get the firmware version (0x0001) from the admin queue commands
 844 */
 845enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
 846{
 847	struct ice_aqc_get_ver *resp;
 848	struct ice_aq_desc desc;
 849	enum ice_status status;
 850
 851	resp = &desc.params.get_ver;
 852
 853	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
 854
 855	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 856
 857	if (!status) {
 858		hw->fw_branch = resp->fw_branch;
 859		hw->fw_maj_ver = resp->fw_major;
 860		hw->fw_min_ver = resp->fw_minor;
 861		hw->fw_patch = resp->fw_patch;
 862		hw->fw_build = le32_to_cpu(resp->fw_build);
 863		hw->api_branch = resp->api_branch;
 864		hw->api_maj_ver = resp->api_major;
 865		hw->api_min_ver = resp->api_minor;
 866		hw->api_patch = resp->api_patch;
 867	}
 868
 869	return status;
 870}
 871
 872/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 873 * ice_aq_q_shutdown
 874 * @hw: pointer to the hw struct
 875 * @unloading: is the driver unloading itself
 876 *
 877 * Tell the Firmware that we're shutting down the AdminQ and whether
 878 * or not the driver is unloading as well (0x0003).
 879 */
 880enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
 881{
 882	struct ice_aqc_q_shutdown *cmd;
 883	struct ice_aq_desc desc;
 884
 885	cmd = &desc.params.q_shutdown;
 886
 887	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
 888
 889	if (unloading)
 890		cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
 891
 892	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
 893}
 894
 895/**
 896 * ice_aq_req_res
 897 * @hw: pointer to the hw struct
 898 * @res: resource id
 899 * @access: access type
 900 * @sdp_number: resource number
 901 * @timeout: the maximum time in ms that the driver may hold the resource
 902 * @cd: pointer to command details structure or NULL
 903 *
 904 * requests common resource using the admin queue commands (0x0008)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905 */
 906static enum ice_status
 907ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
 908	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
 909	       struct ice_sq_cd *cd)
 910{
 911	struct ice_aqc_req_res *cmd_resp;
 912	struct ice_aq_desc desc;
 913	enum ice_status status;
 914
 915	cmd_resp = &desc.params.res_owner;
 916
 917	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
 918
 919	cmd_resp->res_id = cpu_to_le16(res);
 920	cmd_resp->access_type = cpu_to_le16(access);
 921	cmd_resp->res_number = cpu_to_le32(sdp_number);
 
 
 922
 923	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 
 924	/* The completion specifies the maximum time in ms that the driver
 925	 * may hold the resource in the Timeout field.
 926	 * If the resource is held by someone else, the command completes with
 927	 * busy return value and the timeout field indicates the maximum time
 928	 * the current owner of the resource has to free it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 929	 */
 930	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
 931		*timeout = le32_to_cpu(cmd_resp->timeout);
 932
 933	return status;
 934}
 935
 936/**
 937 * ice_aq_release_res
 938 * @hw: pointer to the hw struct
 939 * @res: resource id
 940 * @sdp_number: resource number
 941 * @cd: pointer to command details structure or NULL
 942 *
 943 * release common resource using the admin queue commands (0x0009)
 944 */
 945static enum ice_status
 946ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
 947		   struct ice_sq_cd *cd)
 948{
 949	struct ice_aqc_req_res *cmd;
 950	struct ice_aq_desc desc;
 951
 952	cmd = &desc.params.res_owner;
 953
 954	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
 955
 956	cmd->res_id = cpu_to_le16(res);
 957	cmd->res_number = cpu_to_le32(sdp_number);
 958
 959	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 960}
 961
 962/**
 963 * ice_acquire_res
 964 * @hw: pointer to the HW structure
 965 * @res: resource id
 966 * @access: access type (read or write)
 
 967 *
 968 * This function will attempt to acquire the ownership of a resource.
 969 */
 970enum ice_status
 971ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
 972		enum ice_aq_res_access_type access)
 973{
 974#define ICE_RES_POLLING_DELAY_MS	10
 975	u32 delay = ICE_RES_POLLING_DELAY_MS;
 
 976	enum ice_status status;
 977	u32 time_left = 0;
 978	u32 timeout;
 979
 980	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
 981
 982	/* An admin queue return code of ICE_AQ_RC_EEXIST means that another
 983	 * driver has previously acquired the resource and performed any
 984	 * necessary updates; in this case the caller does not obtain the
 985	 * resource and has no further work to do.
 986	 */
 987	if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
 988		status = ICE_ERR_AQ_NO_WORK;
 989		goto ice_acquire_res_exit;
 990	}
 991
 992	if (status)
 993		ice_debug(hw, ICE_DBG_RES,
 994			  "resource %d acquire type %d failed.\n", res, access);
 995
 996	/* If necessary, poll until the current lock owner timeouts */
 997	timeout = time_left;
 998	while (status && timeout && time_left) {
 999		mdelay(delay);
1000		timeout = (timeout > delay) ? timeout - delay : 0;
1001		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1002
1003		if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
1004			/* lock free, but no work to do */
1005			status = ICE_ERR_AQ_NO_WORK;
1006			break;
1007		}
1008
1009		if (!status)
1010			/* lock acquired */
1011			break;
1012	}
1013	if (status && status != ICE_ERR_AQ_NO_WORK)
1014		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1015
1016ice_acquire_res_exit:
1017	if (status == ICE_ERR_AQ_NO_WORK) {
1018		if (access == ICE_RES_WRITE)
1019			ice_debug(hw, ICE_DBG_RES,
1020				  "resource indicates no work to do.\n");
1021		else
1022			ice_debug(hw, ICE_DBG_RES,
1023				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1024	}
1025	return status;
1026}
1027
1028/**
1029 * ice_release_res
1030 * @hw: pointer to the HW structure
1031 * @res: resource id
1032 *
1033 * This function will release a resource using the proper Admin Command.
1034 */
1035void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1036{
1037	enum ice_status status;
1038	u32 total_delay = 0;
1039
1040	status = ice_aq_release_res(hw, res, 0, NULL);
1041
1042	/* there are some rare cases when trying to release the resource
1043	 * results in an admin Q timeout, so handle them correctly
1044	 */
1045	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1046	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1047		mdelay(1);
1048		status = ice_aq_release_res(hw, res, 0, NULL);
1049		total_delay++;
1050	}
1051}
1052
1053/**
1054 * ice_parse_caps - parse function/device capabilities
1055 * @hw: pointer to the hw struct
1056 * @buf: pointer to a buffer containing function/device capability records
1057 * @cap_count: number of capability records in the list
1058 * @opc: type of capabilities list to parse
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059 *
1060 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
 
1061 */
1062static void
1063ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1064	       enum ice_adminq_opc opc)
1065{
1066	struct ice_aqc_list_caps_elem *cap_resp;
1067	struct ice_hw_func_caps *func_p = NULL;
1068	struct ice_hw_dev_caps *dev_p = NULL;
1069	struct ice_hw_common_caps *caps;
1070	u32 i;
1071
1072	if (!buf)
1073		return;
1074
1075	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1076
1077	if (opc == ice_aqc_opc_list_dev_caps) {
1078		dev_p = &hw->dev_caps;
1079		caps = &dev_p->common_cap;
1080	} else if (opc == ice_aqc_opc_list_func_caps) {
1081		func_p = &hw->func_caps;
1082		caps = &func_p->common_cap;
1083	} else {
1084		ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1085		return;
1086	}
1087
1088	for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1089		u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1090		u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1091		u32 number = le32_to_cpu(cap_resp->number);
1092		u16 cap = le16_to_cpu(cap_resp->cap);
 
1093
1094		switch (cap) {
 
 
 
1095		case ICE_AQC_CAPS_VSI:
1096			if (dev_p) {
1097				dev_p->num_vsi_allocd_to_host = number;
1098				ice_debug(hw, ICE_DBG_INIT,
1099					  "HW caps: Dev.VSI cnt = %d\n",
1100					  dev_p->num_vsi_allocd_to_host);
1101			} else if (func_p) {
1102				func_p->guaranteed_num_vsi = number;
1103				ice_debug(hw, ICE_DBG_INIT,
1104					  "HW caps: Func.VSI cnt = %d\n",
1105					  func_p->guaranteed_num_vsi);
1106			}
1107			break;
1108		case ICE_AQC_CAPS_RSS:
1109			caps->rss_table_size = number;
1110			caps->rss_table_entry_width = logical_id;
1111			ice_debug(hw, ICE_DBG_INIT,
1112				  "HW caps: RSS table size = %d\n",
1113				  caps->rss_table_size);
1114			ice_debug(hw, ICE_DBG_INIT,
1115				  "HW caps: RSS table width = %d\n",
1116				  caps->rss_table_entry_width);
1117			break;
1118		case ICE_AQC_CAPS_RXQS:
1119			caps->num_rxq = number;
1120			caps->rxq_first_id = phys_id;
1121			ice_debug(hw, ICE_DBG_INIT,
1122				  "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1123			ice_debug(hw, ICE_DBG_INIT,
1124				  "HW caps: Rx first queue ID = %d\n",
1125				  caps->rxq_first_id);
1126			break;
1127		case ICE_AQC_CAPS_TXQS:
1128			caps->num_txq = number;
1129			caps->txq_first_id = phys_id;
1130			ice_debug(hw, ICE_DBG_INIT,
1131				  "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1132			ice_debug(hw, ICE_DBG_INIT,
1133				  "HW caps: Tx first queue ID = %d\n",
1134				  caps->txq_first_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135			break;
1136		case ICE_AQC_CAPS_MSIX:
1137			caps->num_msix_vectors = number;
1138			caps->msix_vector_first_id = phys_id;
1139			ice_debug(hw, ICE_DBG_INIT,
1140				  "HW caps: MSIX vector count = %d\n",
1141				  caps->num_msix_vectors);
1142			ice_debug(hw, ICE_DBG_INIT,
1143				  "HW caps: MSIX first vector index = %d\n",
1144				  caps->msix_vector_first_id);
1145			break;
1146		case ICE_AQC_CAPS_MAX_MTU:
1147			caps->max_mtu = number;
1148			if (dev_p)
1149				ice_debug(hw, ICE_DBG_INIT,
1150					  "HW caps: Dev.MaxMTU = %d\n",
1151					  caps->max_mtu);
1152			else if (func_p)
1153				ice_debug(hw, ICE_DBG_INIT,
1154					  "HW caps: func.MaxMTU = %d\n",
1155					  caps->max_mtu);
1156			break;
1157		default:
1158			ice_debug(hw, ICE_DBG_INIT,
1159				  "HW caps: Unknown capability[%d]: 0x%x\n", i,
1160				  cap);
 
 
1161			break;
1162		}
1163	}
 
 
1164}
1165
1166/**
1167 * ice_aq_discover_caps - query function/device capabilities
1168 * @hw: pointer to the hw struct
1169 * @buf: a virtual buffer to hold the capabilities
1170 * @buf_size: Size of the virtual buffer
1171 * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
1172 * @opc: capabilities type to discover - pass in the command opcode
1173 * @cd: pointer to command details structure or NULL
1174 *
1175 * Get the function(0x000a)/device(0x000b) capabilities description from
1176 * the firmware.
 
 
 
 
 
 
 
1177 */
1178static enum ice_status
1179ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
1180		     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1181{
1182	struct ice_aqc_list_caps *cmd;
1183	struct ice_aq_desc desc;
1184	enum ice_status status;
1185
1186	cmd = &desc.params.get_cap;
1187
1188	if (opc != ice_aqc_opc_list_func_caps &&
1189	    opc != ice_aqc_opc_list_dev_caps)
1190		return ICE_ERR_PARAM;
1191
1192	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1193
1194	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1195	if (!status)
1196		ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1197	*data_size = le16_to_cpu(desc.datalen);
1198
1199	return status;
1200}
1201
1202/**
1203 * ice_get_caps - get info about the HW
1204 * @hw: pointer to the hardware structure
 
 
 
 
1205 */
1206enum ice_status ice_get_caps(struct ice_hw *hw)
 
1207{
1208	enum ice_status status;
1209	u16 data_size = 0;
1210	u16 cbuf_len;
1211	u8 retries;
1212
1213	/* The driver doesn't know how many capabilities the device will return
1214	 * so the buffer size required isn't known ahead of time. The driver
1215	 * starts with cbuf_len and if this turns out to be insufficient, the
1216	 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
1217	 * The driver then allocates the buffer of this size and retries the
1218	 * operation. So it follows that the retry count is 2.
1219	 */
1220#define ICE_GET_CAP_BUF_COUNT	40
1221#define ICE_GET_CAP_RETRY_COUNT	2
1222
1223	cbuf_len = ICE_GET_CAP_BUF_COUNT *
1224		sizeof(struct ice_aqc_list_caps_elem);
1225
1226	retries = ICE_GET_CAP_RETRY_COUNT;
 
 
1227
1228	do {
1229		void *cbuf;
 
 
 
1230
1231		cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1232		if (!cbuf)
1233			return ICE_ERR_NO_MEMORY;
 
 
 
 
 
1234
1235		status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
1236					      ice_aqc_opc_list_func_caps, NULL);
1237		devm_kfree(ice_hw_to_dev(hw), cbuf);
 
 
 
 
 
 
 
 
 
 
 
1238
1239		if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1240			break;
 
1241
1242		/* If ENOMEM is returned, try again with bigger buffer */
1243		cbuf_len = data_size;
1244	} while (--retries);
 
 
 
 
 
 
 
 
1245
1246	return status;
1247}
1248
1249/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1250 * ice_aq_manage_mac_write - manage MAC address write command
1251 * @hw: pointer to the hw struct
1252 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1253 * @flags: flags to control write behavior
1254 * @cd: pointer to command details structure or NULL
1255 *
1256 * This function is used to write MAC address to the NVM (0x0108).
1257 */
1258enum ice_status
1259ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1260			struct ice_sq_cd *cd)
1261{
1262	struct ice_aqc_manage_mac_write *cmd;
1263	struct ice_aq_desc desc;
1264
1265	cmd = &desc.params.mac_write;
1266	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1267
1268	cmd->flags = flags;
1269
1270	/* Prep values for flags, sah, sal */
1271	cmd->sah = htons(*((u16 *)mac_addr));
1272	cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1273
1274	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1275}
1276
1277/**
1278 * ice_aq_clear_pxe_mode
1279 * @hw: pointer to the hw struct
1280 *
1281 * Tell the firmware that the driver is taking over from PXE (0x0110).
1282 */
1283static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1284{
1285	struct ice_aq_desc desc;
1286
1287	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1288	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1289
1290	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1291}
1292
1293/**
1294 * ice_clear_pxe_mode - clear pxe operations mode
1295 * @hw: pointer to the hw struct
1296 *
1297 * Make sure all PXE mode settings are cleared, including things
1298 * like descriptor fetch/write-back mode.
1299 */
1300void ice_clear_pxe_mode(struct ice_hw *hw)
1301{
1302	if (ice_check_sq_alive(hw, &hw->adminq))
1303		ice_aq_clear_pxe_mode(hw);
1304}
1305
1306/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307 * ice_aq_set_phy_cfg
1308 * @hw: pointer to the hw struct
1309 * @lport: logical port number
1310 * @cfg: structure with PHY configuration data to be set
1311 * @cd: pointer to command details structure or NULL
1312 *
1313 * Set the various PHY configuration parameters supported on the Port.
1314 * One or more of the Set PHY config parameters may be ignored in an MFP
1315 * mode as the PF may not have the privilege to set some of the PHY Config
1316 * parameters. This status will be indicated by the command response (0x0601).
1317 */
1318static enum ice_status
1319ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1320		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1321{
1322	struct ice_aqc_set_phy_cfg *cmd;
1323	struct ice_aq_desc desc;
 
1324
1325	if (!cfg)
1326		return ICE_ERR_PARAM;
1327
1328	cmd = &desc.params.set_phy;
 
 
 
 
 
 
 
 
1329	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1330	cmd->lport_num = lport;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1331
1332	return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1333}
1334
1335/**
1336 * ice_update_link_info - update status of the HW network link
1337 * @pi: port info structure of the interested logical port
1338 */
1339static enum ice_status
1340ice_update_link_info(struct ice_port_info *pi)
1341{
1342	struct ice_aqc_get_phy_caps_data *pcaps;
1343	struct ice_phy_info *phy_info;
1344	enum ice_status status;
1345	struct ice_hw *hw;
1346
1347	if (!pi)
1348		return ICE_ERR_PARAM;
1349
1350	hw = pi->hw;
1351
1352	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1353	if (!pcaps)
1354		return ICE_ERR_NO_MEMORY;
1355
1356	phy_info = &pi->phy;
1357	status = ice_aq_get_link_info(pi, true, NULL, NULL);
1358	if (status)
1359		goto out;
 
 
 
 
 
 
 
 
 
 
1360
1361	if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1362		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1363					     pcaps, NULL);
1364		if (status)
1365			goto out;
1366
1367		memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1368		       sizeof(phy_info->link_info.module_type));
1369	}
1370out:
1371	devm_kfree(ice_hw_to_dev(hw), pcaps);
1372	return status;
1373}
1374
1375/**
1376 * ice_set_fc
1377 * @pi: port information structure
1378 * @aq_failures: pointer to status code, specific to ice_set_fc routine
1379 * @atomic_restart: enable automatic link update
1380 *
1381 * Set the requested flow control mode.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382 */
1383enum ice_status
1384ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
 
1385{
1386	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1387	struct ice_aqc_get_phy_caps_data *pcaps;
1388	enum ice_status status;
1389	u8 pause_mask = 0x0;
1390	struct ice_hw *hw;
1391
1392	if (!pi)
1393		return ICE_ERR_PARAM;
1394	hw = pi->hw;
1395	*aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1396
1397	switch (pi->fc.req_mode) {
1398	case ICE_FC_FULL:
1399		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1400		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1401		break;
1402	case ICE_FC_RX_PAUSE:
1403		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1404		break;
1405	case ICE_FC_TX_PAUSE:
1406		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1407		break;
1408	default:
1409		break;
1410	}
1411
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1413	if (!pcaps)
1414		return ICE_ERR_NO_MEMORY;
1415
1416	/* Get the current phy config */
1417	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1418				     NULL);
1419	if (status) {
1420		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1421		goto out;
1422	}
1423
1424	/* clear the old pause settings */
1425	cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1426				   ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1427	/* set the new capabilities */
1428	cfg.caps |= pause_mask;
 
 
1429	/* If the capabilities have changed, then set the new config */
1430	if (cfg.caps != pcaps->caps) {
1431		int retry_count, retry_max = 10;
1432
1433		/* Auto restart link so settings take effect */
1434		if (atomic_restart)
1435			cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
1436		/* Copy over all the old settings */
1437		cfg.phy_type_low = pcaps->phy_type_low;
1438		cfg.low_power_ctrl = pcaps->low_power_ctrl;
1439		cfg.eee_cap = pcaps->eee_cap;
1440		cfg.eeer_value = pcaps->eeer_value;
1441		cfg.link_fec_opt = pcaps->link_fec_options;
1442
1443		status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1444		if (status) {
1445			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1446			goto out;
1447		}
1448
1449		/* Update the link info
1450		 * It sometimes takes a really long time for link to
1451		 * come back from the atomic reset. Thus, we wait a
1452		 * little bit.
1453		 */
1454		for (retry_count = 0; retry_count < retry_max; retry_count++) {
1455			status = ice_update_link_info(pi);
1456
1457			if (!status)
1458				break;
1459
1460			mdelay(100);
1461		}
1462
1463		if (status)
1464			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1465	}
1466
1467out:
1468	devm_kfree(ice_hw_to_dev(hw), pcaps);
1469	return status;
1470}
1471
1472/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1473 * ice_get_link_status - get status of the HW network link
1474 * @pi: port information structure
1475 * @link_up: pointer to bool (true/false = linkup/linkdown)
1476 *
1477 * Variable link_up is true if link is up, false if link is down.
1478 * The variable link_up is invalid if status is non zero. As a
1479 * result of this call, link status reporting becomes enabled
1480 */
1481enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1482{
1483	struct ice_phy_info *phy_info;
1484	enum ice_status status = 0;
1485
1486	if (!pi)
1487		return ICE_ERR_PARAM;
1488
1489	phy_info = &pi->phy;
1490
1491	if (phy_info->get_link_info) {
1492		status = ice_update_link_info(pi);
1493
1494		if (status)
1495			ice_debug(pi->hw, ICE_DBG_LINK,
1496				  "get link status error, status = %d\n",
1497				  status);
1498	}
1499
1500	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1501
1502	return status;
1503}
1504
1505/**
1506 * ice_aq_set_link_restart_an
1507 * @pi: pointer to the port information structure
1508 * @ena_link: if true: enable link, if false: disable link
1509 * @cd: pointer to command details structure or NULL
1510 *
1511 * Sets up the link and restarts the Auto-Negotiation over the link.
1512 */
1513enum ice_status
1514ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1515			   struct ice_sq_cd *cd)
1516{
1517	struct ice_aqc_restart_an *cmd;
1518	struct ice_aq_desc desc;
1519
1520	cmd = &desc.params.restart_an;
1521
1522	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1523
1524	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
1525	cmd->lport_num = pi->lport;
1526	if (ena_link)
1527		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
1528	else
1529		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
1530
1531	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
1532}
1533
1534/**
1535 * ice_aq_set_event_mask
1536 * @hw: pointer to the hw struct
1537 * @port_num: port number of the physical function
1538 * @mask: event mask to be set
1539 * @cd: pointer to command details structure or NULL
1540 *
1541 * Set event mask (0x0613)
1542 */
1543enum ice_status
1544ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
1545		      struct ice_sq_cd *cd)
1546{
1547	struct ice_aqc_set_event_mask *cmd;
1548	struct ice_aq_desc desc;
1549
1550	cmd = &desc.params.set_event_mask;
1551
1552	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
1553
1554	cmd->lport_num = port_num;
1555
1556	cmd->event_mask = cpu_to_le16(mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1557
1558	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1559}
1560
1561/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1562 * __ice_aq_get_set_rss_lut
1563 * @hw: pointer to the hardware structure
1564 * @vsi_id: VSI FW index
1565 * @lut_type: LUT table type
1566 * @lut: pointer to the LUT buffer provided by the caller
1567 * @lut_size: size of the LUT buffer
1568 * @glob_lut_idx: global LUT index
1569 * @set: set true to set the table, false to get the table
1570 *
1571 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
1572 */
1573static enum ice_status
1574__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1575			 u16 lut_size, u8 glob_lut_idx, bool set)
1576{
1577	struct ice_aqc_get_set_rss_lut *cmd_resp;
1578	struct ice_aq_desc desc;
1579	enum ice_status status;
1580	u16 flags = 0;
1581
1582	cmd_resp = &desc.params.get_set_rss_lut;
1583
1584	if (set) {
1585		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
1586		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1587	} else {
1588		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
1589	}
1590
1591	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
1592					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
1593					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
1594				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
1595
1596	switch (lut_type) {
1597	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
1598	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
1599	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
1600		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
1601			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
1602		break;
1603	default:
1604		status = ICE_ERR_PARAM;
1605		goto ice_aq_get_set_rss_lut_exit;
1606	}
1607
1608	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
1609		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
1610			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
1611
1612		if (!set)
1613			goto ice_aq_get_set_rss_lut_send;
1614	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
1615		if (!set)
1616			goto ice_aq_get_set_rss_lut_send;
1617	} else {
1618		goto ice_aq_get_set_rss_lut_send;
1619	}
1620
1621	/* LUT size is only valid for Global and PF table types */
1622	if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
1623		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
1624			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1625			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1626	} else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
1627		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
1628			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1629			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1630	} else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
1631		   (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
1632		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
1633			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1634			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1635	} else {
 
 
 
 
1636		status = ICE_ERR_PARAM;
1637		goto ice_aq_get_set_rss_lut_exit;
1638	}
1639
1640ice_aq_get_set_rss_lut_send:
1641	cmd_resp->flags = cpu_to_le16(flags);
1642	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
1643
1644ice_aq_get_set_rss_lut_exit:
1645	return status;
1646}
1647
1648/**
1649 * ice_aq_get_rss_lut
1650 * @hw: pointer to the hardware structure
1651 * @vsi_id: VSI FW index
1652 * @lut_type: LUT table type
1653 * @lut: pointer to the LUT buffer provided by the caller
1654 * @lut_size: size of the LUT buffer
1655 *
1656 * get the RSS lookup table, PF or VSI type
1657 */
1658enum ice_status
1659ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1660		   u16 lut_size)
1661{
1662	return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1663					false);
 
 
 
1664}
1665
1666/**
1667 * ice_aq_set_rss_lut
1668 * @hw: pointer to the hardware structure
1669 * @vsi_id: VSI FW index
1670 * @lut_type: LUT table type
1671 * @lut: pointer to the LUT buffer provided by the caller
1672 * @lut_size: size of the LUT buffer
1673 *
1674 * set the RSS lookup table, PF or VSI type
1675 */
1676enum ice_status
1677ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1678		   u16 lut_size)
1679{
1680	return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1681					true);
 
 
 
1682}
1683
1684/**
1685 * __ice_aq_get_set_rss_key
1686 * @hw: pointer to the hw struct
1687 * @vsi_id: VSI FW index
1688 * @key: pointer to key info struct
1689 * @set: set true to set the key, false to get the key
1690 *
1691 * get (0x0B04) or set (0x0B02) the RSS key per VSI
1692 */
1693static enum
1694ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
1695				    struct ice_aqc_get_set_rss_keys *key,
1696				    bool set)
1697{
1698	struct ice_aqc_get_set_rss_key *cmd_resp;
1699	u16 key_size = sizeof(*key);
1700	struct ice_aq_desc desc;
1701
1702	cmd_resp = &desc.params.get_set_rss_key;
1703
1704	if (set) {
1705		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
1706		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1707	} else {
1708		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
1709	}
1710
1711	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
1712					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
1713					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
1714				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
1715
1716	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
1717}
1718
1719/**
1720 * ice_aq_get_rss_key
1721 * @hw: pointer to the hw struct
1722 * @vsi_id: VSI FW index
1723 * @key: pointer to key info struct
1724 *
1725 * get the RSS key per VSI
1726 */
1727enum ice_status
1728ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
1729		   struct ice_aqc_get_set_rss_keys *key)
1730{
1731	return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
 
 
 
 
1732}
1733
1734/**
1735 * ice_aq_set_rss_key
1736 * @hw: pointer to the hw struct
1737 * @vsi_id: VSI FW index
1738 * @keys: pointer to key info struct
1739 *
1740 * set the RSS key per VSI
1741 */
1742enum ice_status
1743ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
1744		   struct ice_aqc_get_set_rss_keys *keys)
1745{
1746	return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
 
 
 
 
1747}
1748
1749/**
1750 * ice_aq_add_lan_txq
1751 * @hw: pointer to the hardware structure
1752 * @num_qgrps: Number of added queue groups
1753 * @qg_list: list of queue groups to be added
1754 * @buf_size: size of buffer for indirect command
1755 * @cd: pointer to command details structure or NULL
1756 *
1757 * Add Tx LAN queue (0x0C30)
1758 *
1759 * NOTE:
1760 * Prior to calling add Tx LAN queue:
1761 * Initialize the following as part of the Tx queue context:
1762 * Completion queue ID if the queue uses Completion queue, Quanta profile,
1763 * Cache profile and Packet shaper profile.
1764 *
1765 * After add Tx LAN queue AQ command is completed:
1766 * Interrupts should be associated with specific queues,
1767 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
1768 * flow.
1769 */
1770static enum ice_status
1771ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1772		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
1773		   struct ice_sq_cd *cd)
1774{
1775	u16 i, sum_header_size, sum_q_size = 0;
1776	struct ice_aqc_add_tx_qgrp *list;
1777	struct ice_aqc_add_txqs *cmd;
1778	struct ice_aq_desc desc;
 
1779
1780	cmd = &desc.params.add_txqs;
1781
1782	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
1783
1784	if (!qg_list)
1785		return ICE_ERR_PARAM;
1786
1787	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1788		return ICE_ERR_PARAM;
1789
1790	sum_header_size = num_qgrps *
1791		(sizeof(*qg_list) - sizeof(*qg_list->txqs));
1792
1793	list = qg_list;
1794	for (i = 0; i < num_qgrps; i++) {
1795		struct ice_aqc_add_txqs_perq *q = list->txqs;
1796
1797		sum_q_size += list->num_txqs * sizeof(*q);
1798		list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
1799	}
1800
1801	if (buf_size != (sum_header_size + sum_q_size))
1802		return ICE_ERR_PARAM;
1803
1804	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1805
1806	cmd->num_qgrps = num_qgrps;
1807
1808	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
1809}
1810
1811/**
1812 * ice_aq_dis_lan_txq
1813 * @hw: pointer to the hardware structure
1814 * @num_qgrps: number of groups in the list
1815 * @qg_list: the list of groups to disable
1816 * @buf_size: the total size of the qg_list buffer in bytes
 
 
1817 * @cd: pointer to command details structure or NULL
1818 *
1819 * Disable LAN Tx queue (0x0C31)
1820 */
1821static enum ice_status
1822ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1823		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
 
1824		   struct ice_sq_cd *cd)
1825{
 
1826	struct ice_aqc_dis_txqs *cmd;
1827	struct ice_aq_desc desc;
 
1828	u16 i, sz = 0;
1829
1830	cmd = &desc.params.dis_txqs;
1831	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
1832
1833	if (!qg_list)
 
1834		return ICE_ERR_PARAM;
1835
1836	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1837		return ICE_ERR_PARAM;
1838	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1839	cmd->num_entries = num_qgrps;
1840
1841	for (i = 0; i < num_qgrps; ++i) {
1842		/* Calculate the size taken up by the queue IDs in this group */
1843		sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
1844
1845		/* Add the size of the group header */
1846		sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847
1848		/* If the num of queues is even, add 2 bytes of padding */
1849		if ((qg_list[i].num_qs % 2) == 0)
1850			sz += 2;
 
 
 
 
1851	}
1852
1853	if (buf_size != sz)
1854		return ICE_ERR_PARAM;
1855
1856	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
 
 
 
 
 
 
 
 
 
 
 
1857}
1858
1859/* End of FW Admin Queue command wrappers */
1860
1861/**
1862 * ice_write_byte - write a byte to a packed context structure
1863 * @src_ctx:  the context structure to read from
1864 * @dest_ctx: the context to be written to
1865 * @ce_info:  a description of the struct to be filled
1866 */
1867static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
1868			   const struct ice_ctx_ele *ce_info)
1869{
1870	u8 src_byte, dest_byte, mask;
1871	u8 *from, *dest;
1872	u16 shift_width;
1873
1874	/* copy from the next struct field */
1875	from = src_ctx + ce_info->offset;
1876
1877	/* prepare the bits and mask */
1878	shift_width = ce_info->lsb % 8;
1879	mask = (u8)(BIT(ce_info->width) - 1);
1880
1881	src_byte = *from;
1882	src_byte &= mask;
1883
1884	/* shift to correct alignment */
1885	mask <<= shift_width;
1886	src_byte <<= shift_width;
1887
1888	/* get the current bits from the target bit string */
1889	dest = dest_ctx + (ce_info->lsb / 8);
1890
1891	memcpy(&dest_byte, dest, sizeof(dest_byte));
1892
1893	dest_byte &= ~mask;	/* get the bits not changing */
1894	dest_byte |= src_byte;	/* add in the new bits */
1895
1896	/* put it all back */
1897	memcpy(dest, &dest_byte, sizeof(dest_byte));
1898}
1899
1900/**
1901 * ice_write_word - write a word to a packed context structure
1902 * @src_ctx:  the context structure to read from
1903 * @dest_ctx: the context to be written to
1904 * @ce_info:  a description of the struct to be filled
1905 */
1906static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
1907			   const struct ice_ctx_ele *ce_info)
1908{
1909	u16 src_word, mask;
1910	__le16 dest_word;
1911	u8 *from, *dest;
1912	u16 shift_width;
1913
1914	/* copy from the next struct field */
1915	from = src_ctx + ce_info->offset;
1916
1917	/* prepare the bits and mask */
1918	shift_width = ce_info->lsb % 8;
1919	mask = BIT(ce_info->width) - 1;
1920
1921	/* don't swizzle the bits until after the mask because the mask bits
1922	 * will be in a different bit position on big endian machines
1923	 */
1924	src_word = *(u16 *)from;
1925	src_word &= mask;
1926
1927	/* shift to correct alignment */
1928	mask <<= shift_width;
1929	src_word <<= shift_width;
1930
1931	/* get the current bits from the target bit string */
1932	dest = dest_ctx + (ce_info->lsb / 8);
1933
1934	memcpy(&dest_word, dest, sizeof(dest_word));
1935
1936	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
1937	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
1938
1939	/* put it all back */
1940	memcpy(dest, &dest_word, sizeof(dest_word));
1941}
1942
1943/**
1944 * ice_write_dword - write a dword to a packed context structure
1945 * @src_ctx:  the context structure to read from
1946 * @dest_ctx: the context to be written to
1947 * @ce_info:  a description of the struct to be filled
1948 */
1949static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
1950			    const struct ice_ctx_ele *ce_info)
1951{
1952	u32 src_dword, mask;
1953	__le32 dest_dword;
1954	u8 *from, *dest;
1955	u16 shift_width;
1956
1957	/* copy from the next struct field */
1958	from = src_ctx + ce_info->offset;
1959
1960	/* prepare the bits and mask */
1961	shift_width = ce_info->lsb % 8;
1962
1963	/* if the field width is exactly 32 on an x86 machine, then the shift
1964	 * operation will not work because the SHL instructions count is masked
1965	 * to 5 bits so the shift will do nothing
1966	 */
1967	if (ce_info->width < 32)
1968		mask = BIT(ce_info->width) - 1;
1969	else
1970		mask = (u32)~0;
1971
1972	/* don't swizzle the bits until after the mask because the mask bits
1973	 * will be in a different bit position on big endian machines
1974	 */
1975	src_dword = *(u32 *)from;
1976	src_dword &= mask;
1977
1978	/* shift to correct alignment */
1979	mask <<= shift_width;
1980	src_dword <<= shift_width;
1981
1982	/* get the current bits from the target bit string */
1983	dest = dest_ctx + (ce_info->lsb / 8);
1984
1985	memcpy(&dest_dword, dest, sizeof(dest_dword));
1986
1987	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
1988	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
1989
1990	/* put it all back */
1991	memcpy(dest, &dest_dword, sizeof(dest_dword));
1992}
1993
1994/**
1995 * ice_write_qword - write a qword to a packed context structure
1996 * @src_ctx:  the context structure to read from
1997 * @dest_ctx: the context to be written to
1998 * @ce_info:  a description of the struct to be filled
1999 */
2000static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2001			    const struct ice_ctx_ele *ce_info)
2002{
2003	u64 src_qword, mask;
2004	__le64 dest_qword;
2005	u8 *from, *dest;
2006	u16 shift_width;
2007
2008	/* copy from the next struct field */
2009	from = src_ctx + ce_info->offset;
2010
2011	/* prepare the bits and mask */
2012	shift_width = ce_info->lsb % 8;
2013
2014	/* if the field width is exactly 64 on an x86 machine, then the shift
2015	 * operation will not work because the SHL instructions count is masked
2016	 * to 6 bits so the shift will do nothing
2017	 */
2018	if (ce_info->width < 64)
2019		mask = BIT_ULL(ce_info->width) - 1;
2020	else
2021		mask = (u64)~0;
2022
2023	/* don't swizzle the bits until after the mask because the mask bits
2024	 * will be in a different bit position on big endian machines
2025	 */
2026	src_qword = *(u64 *)from;
2027	src_qword &= mask;
2028
2029	/* shift to correct alignment */
2030	mask <<= shift_width;
2031	src_qword <<= shift_width;
2032
2033	/* get the current bits from the target bit string */
2034	dest = dest_ctx + (ce_info->lsb / 8);
2035
2036	memcpy(&dest_qword, dest, sizeof(dest_qword));
2037
2038	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
2039	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
2040
2041	/* put it all back */
2042	memcpy(dest, &dest_qword, sizeof(dest_qword));
2043}
2044
2045/**
2046 * ice_set_ctx - set context bits in packed structure
 
2047 * @src_ctx:  pointer to a generic non-packed context structure
2048 * @dest_ctx: pointer to memory for the packed structure
2049 * @ce_info:  a description of the structure to be transformed
2050 */
2051enum ice_status
2052ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
 
2053{
2054	int f;
2055
2056	for (f = 0; ce_info[f].width; f++) {
2057		/* We have to deal with each element of the FW response
2058		 * using the correct size so that we are correct regardless
2059		 * of the endianness of the machine.
2060		 */
 
 
 
 
 
 
2061		switch (ce_info[f].size_of) {
2062		case sizeof(u8):
2063			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2064			break;
2065		case sizeof(u16):
2066			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2067			break;
2068		case sizeof(u32):
2069			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2070			break;
2071		case sizeof(u64):
2072			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2073			break;
2074		default:
2075			return ICE_ERR_INVAL_SIZE;
2076		}
2077	}
2078
2079	return 0;
2080}
2081
2082/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2083 * ice_ena_vsi_txq
2084 * @pi: port information structure
2085 * @vsi_id: VSI id
2086 * @tc: tc number
 
2087 * @num_qgrps: Number of added queue groups
2088 * @buf: list of queue groups to be added
2089 * @buf_size: size of buffer for indirect command
2090 * @cd: pointer to command details structure or NULL
2091 *
2092 * This function adds one lan q
2093 */
2094enum ice_status
2095ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
2096		struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2097		struct ice_sq_cd *cd)
2098{
2099	struct ice_aqc_txsched_elem_data node = { 0 };
2100	struct ice_sched_node *parent;
 
2101	enum ice_status status;
2102	struct ice_hw *hw;
2103
2104	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2105		return ICE_ERR_CFG;
2106
2107	if (num_qgrps > 1 || buf->num_txqs > 1)
2108		return ICE_ERR_MAX_LIMIT;
2109
2110	hw = pi->hw;
2111
 
 
 
2112	mutex_lock(&pi->sched_lock);
2113
 
 
 
 
 
 
 
 
2114	/* find a parent node */
2115	parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
2116					    ICE_SCHED_NODE_OWNER_LAN);
2117	if (!parent) {
2118		status = ICE_ERR_PARAM;
2119		goto ena_txq_exit;
2120	}
 
2121	buf->parent_teid = parent->info.node_teid;
2122	node.parent_teid = parent->info.node_teid;
2123	/* Mark that the values in the "generic" section as valid. The default
2124	 * value in the "generic" section is zero. This means that :
2125	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
2126	 * - 0 priority among siblings, indicated by Bit 1-3.
2127	 * - WFQ, indicated by Bit 4.
2128	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
2129	 * Bit 5-6.
2130	 * - Bit 7 is reserved.
2131	 * Without setting the generic section as valid in valid_sections, the
2132	 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
2133	 */
2134	buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
 
 
 
 
 
 
 
 
 
 
 
2135
2136	/* add the lan q */
2137	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2138	if (status)
 
 
 
2139		goto ena_txq_exit;
 
2140
2141	node.node_teid = buf->txqs[0].q_teid;
2142	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
 
 
2143
2144	/* add a leaf node into schduler tree q layer */
2145	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
 
 
2146
2147ena_txq_exit:
2148	mutex_unlock(&pi->sched_lock);
2149	return status;
2150}
2151
2152/**
2153 * ice_dis_vsi_txq
2154 * @pi: port information structure
 
 
2155 * @num_queues: number of queues
 
2156 * @q_ids: pointer to the q_id array
2157 * @q_teids: pointer to queue node teids
 
 
2158 * @cd: pointer to command details structure or NULL
2159 *
2160 * This function removes queues and their corresponding nodes in SW DB
2161 */
2162enum ice_status
2163ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2164		u32 *q_teids, struct ice_sq_cd *cd)
 
 
2165{
2166	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2167	struct ice_aqc_dis_txq_item qg_list;
2168	u16 i;
 
 
2169
2170	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2171		return ICE_ERR_CFG;
2172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2173	mutex_lock(&pi->sched_lock);
2174
2175	for (i = 0; i < num_queues; i++) {
2176		struct ice_sched_node *node;
2177
2178		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2179		if (!node)
2180			continue;
2181		qg_list.parent_teid = node->info.parent_teid;
2182		qg_list.num_qs = 1;
2183		qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2184		status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2185					    sizeof(qg_list), cd);
 
 
 
 
 
 
 
 
 
 
 
2186
2187		if (status)
2188			break;
2189		ice_free_sched_node(pi, node);
 
2190	}
2191	mutex_unlock(&pi->sched_lock);
 
2192	return status;
2193}
2194
2195/**
2196 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
2197 * @pi: port information structure
2198 * @vsi_id: VSI Id
2199 * @tc_bitmap: TC bitmap
2200 * @maxqs: max queues array per TC
2201 * @owner: lan or rdma
2202 *
2203 * This function adds/updates the VSI queues per TC.
2204 */
2205static enum ice_status
2206ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2207	       u16 *maxqs, u8 owner)
2208{
2209	enum ice_status status = 0;
2210	u8 i;
2211
2212	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2213		return ICE_ERR_CFG;
2214
 
 
 
2215	mutex_lock(&pi->sched_lock);
2216
2217	for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2218		/* configuration is possible only if TC node is present */
2219		if (!ice_sched_get_tc_node(pi, i))
2220			continue;
2221
2222		status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
2223					   ice_is_tc_ena(tc_bitmap, i));
2224		if (status)
2225			break;
2226	}
2227
2228	mutex_unlock(&pi->sched_lock);
2229	return status;
2230}
2231
2232/**
2233 * ice_cfg_vsi_lan - configure VSI lan queues
2234 * @pi: port information structure
2235 * @vsi_id: VSI Id
2236 * @tc_bitmap: TC bitmap
2237 * @max_lanqs: max lan queues array per TC
2238 *
2239 * This function adds/updates the VSI lan queues per TC.
2240 */
2241enum ice_status
2242ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2243		u16 *max_lanqs)
2244{
2245	return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
2246			      ICE_SCHED_NODE_OWNER_LAN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2247}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_common.h"
   5#include "ice_sched.h"
   6#include "ice_adminq_cmd.h"
   7#include "ice_flow.h"
   8
   9#define ICE_PF_RESET_WAIT_COUNT	300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  10
  11/**
  12 * ice_set_mac_type - Sets MAC type
  13 * @hw: pointer to the HW structure
  14 *
  15 * This function sets the MAC type of the adapter based on the
  16 * vendor ID and device ID stored in the HW structure.
  17 */
  18static enum ice_status ice_set_mac_type(struct ice_hw *hw)
  19{
  20	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
  21		return ICE_ERR_DEVICE_NOT_SUPPORTED;
  22
  23	switch (hw->device_id) {
  24	case ICE_DEV_ID_E810C_BACKPLANE:
  25	case ICE_DEV_ID_E810C_QSFP:
  26	case ICE_DEV_ID_E810C_SFP:
  27	case ICE_DEV_ID_E810_XXV_SFP:
  28		hw->mac_type = ICE_MAC_E810;
  29		break;
  30	case ICE_DEV_ID_E823C_10G_BASE_T:
  31	case ICE_DEV_ID_E823C_BACKPLANE:
  32	case ICE_DEV_ID_E823C_QSFP:
  33	case ICE_DEV_ID_E823C_SFP:
  34	case ICE_DEV_ID_E823C_SGMII:
  35	case ICE_DEV_ID_E822C_10G_BASE_T:
  36	case ICE_DEV_ID_E822C_BACKPLANE:
  37	case ICE_DEV_ID_E822C_QSFP:
  38	case ICE_DEV_ID_E822C_SFP:
  39	case ICE_DEV_ID_E822C_SGMII:
  40	case ICE_DEV_ID_E822L_10G_BASE_T:
  41	case ICE_DEV_ID_E822L_BACKPLANE:
  42	case ICE_DEV_ID_E822L_SFP:
  43	case ICE_DEV_ID_E822L_SGMII:
  44	case ICE_DEV_ID_E823L_10G_BASE_T:
  45	case ICE_DEV_ID_E823L_1GBE:
  46	case ICE_DEV_ID_E823L_BACKPLANE:
  47	case ICE_DEV_ID_E823L_QSFP:
  48	case ICE_DEV_ID_E823L_SFP:
  49		hw->mac_type = ICE_MAC_GENERIC;
  50		break;
  51	default:
  52		hw->mac_type = ICE_MAC_UNKNOWN;
  53		break;
  54	}
  55
  56	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
  57	return 0;
  58}
  59
  60/**
  61 * ice_clear_pf_cfg - Clear PF configuration
  62 * @hw: pointer to the hardware structure
  63 *
  64 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
  65 * configuration, flow director filters, etc.).
  66 */
  67enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
  68{
  69	struct ice_aq_desc desc;
  70
  71	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
  72
  73	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
  74}
  75
  76/**
  77 * ice_aq_manage_mac_read - manage MAC address read command
  78 * @hw: pointer to the HW struct
  79 * @buf: a virtual buffer to hold the manage MAC read response
  80 * @buf_size: Size of the virtual buffer
  81 * @cd: pointer to command details structure or NULL
  82 *
  83 * This function is used to return per PF station MAC address (0x0107).
  84 * NOTE: Upon successful completion of this command, MAC address information
  85 * is returned in user specified buffer. Please interpret user specified
  86 * buffer as "manage_mac_read" response.
  87 * Response such as various MAC addresses are stored in HW struct (port.mac)
  88 * ice_discover_dev_caps is expected to be called before this function is
  89 * called.
  90 */
  91static enum ice_status
  92ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
  93		       struct ice_sq_cd *cd)
  94{
  95	struct ice_aqc_manage_mac_read_resp *resp;
  96	struct ice_aqc_manage_mac_read *cmd;
  97	struct ice_aq_desc desc;
  98	enum ice_status status;
  99	u16 flags;
 100	u8 i;
 101
 102	cmd = &desc.params.mac_read;
 103
 104	if (buf_size < sizeof(*resp))
 105		return ICE_ERR_BUF_TOO_SHORT;
 106
 107	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
 108
 109	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 110	if (status)
 111		return status;
 112
 113	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
 114	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
 115
 116	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
 117		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
 118		return ICE_ERR_CFG;
 119	}
 120
 121	/* A single port can report up to two (LAN and WoL) addresses */
 122	for (i = 0; i < cmd->num_addr; i++)
 123		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
 124			ether_addr_copy(hw->port_info->mac.lan_addr,
 125					resp[i].mac_addr);
 126			ether_addr_copy(hw->port_info->mac.perm_addr,
 127					resp[i].mac_addr);
 128			break;
 129		}
 130
 131	return 0;
 132}
 133
 134/**
 135 * ice_aq_get_phy_caps - returns PHY capabilities
 136 * @pi: port information structure
 137 * @qual_mods: report qualified modules
 138 * @report_mode: report mode capabilities
 139 * @pcaps: structure for PHY capabilities to be filled
 140 * @cd: pointer to command details structure or NULL
 141 *
 142 * Returns the various PHY capabilities supported on the Port (0x0600)
 143 */
 144enum ice_status
 145ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
 146		    struct ice_aqc_get_phy_caps_data *pcaps,
 147		    struct ice_sq_cd *cd)
 148{
 149	struct ice_aqc_get_phy_caps *cmd;
 150	u16 pcaps_size = sizeof(*pcaps);
 151	struct ice_aq_desc desc;
 152	enum ice_status status;
 153	struct ice_hw *hw;
 154
 155	cmd = &desc.params.get_phy;
 156
 157	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
 158		return ICE_ERR_PARAM;
 159	hw = pi->hw;
 160
 161	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
 162
 163	if (qual_mods)
 164		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
 165
 166	cmd->param0 |= cpu_to_le16(report_mode);
 167	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
 168
 169	ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
 170		  report_mode);
 171	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
 172		  (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
 173	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
 174		  (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
 175	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", pcaps->caps);
 176	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
 177		  pcaps->low_power_ctrl_an);
 178	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", pcaps->eee_cap);
 179	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n",
 180		  pcaps->eeer_value);
 181	ice_debug(hw, ICE_DBG_LINK, "	link_fec_options = 0x%x\n",
 182		  pcaps->link_fec_options);
 183	ice_debug(hw, ICE_DBG_LINK, "	module_compliance_enforcement = 0x%x\n",
 184		  pcaps->module_compliance_enforcement);
 185	ice_debug(hw, ICE_DBG_LINK, "   extended_compliance_code = 0x%x\n",
 186		  pcaps->extended_compliance_code);
 187	ice_debug(hw, ICE_DBG_LINK, "   module_type[0] = 0x%x\n",
 188		  pcaps->module_type[0]);
 189	ice_debug(hw, ICE_DBG_LINK, "   module_type[1] = 0x%x\n",
 190		  pcaps->module_type[1]);
 191	ice_debug(hw, ICE_DBG_LINK, "   module_type[2] = 0x%x\n",
 192		  pcaps->module_type[2]);
 193
 194	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
 195		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
 196		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
 197		memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
 198		       sizeof(pi->phy.link_info.module_type));
 199	}
 200
 201	return status;
 202}
 203
 204/**
 205 * ice_aq_get_link_topo_handle - get link topology node return status
 206 * @pi: port information structure
 207 * @node_type: requested node type
 208 * @cd: pointer to command details structure or NULL
 209 *
 210 * Get link topology node return status for specified node type (0x06E0)
 211 *
 212 * Node type cage can be used to determine if cage is present. If AQC
 213 * returns error (ENOENT), then no cage present. If no cage present, then
 214 * connection type is backplane or BASE-T.
 215 */
 216static enum ice_status
 217ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
 218			    struct ice_sq_cd *cd)
 219{
 220	struct ice_aqc_get_link_topo *cmd;
 221	struct ice_aq_desc desc;
 222
 223	cmd = &desc.params.get_link_topo;
 224
 225	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
 226
 227	cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
 228				   ICE_AQC_LINK_TOPO_NODE_CTX_S);
 229
 230	/* set node type */
 231	cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
 232
 233	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
 234}
 235
 236/**
 237 * ice_is_media_cage_present
 238 * @pi: port information structure
 239 *
 240 * Returns true if media cage is present, else false. If no cage, then
 241 * media type is backplane or BASE-T.
 242 */
 243static bool ice_is_media_cage_present(struct ice_port_info *pi)
 244{
 245	/* Node type cage can be used to determine if cage is present. If AQC
 246	 * returns error (ENOENT), then no cage present. If no cage present then
 247	 * connection type is backplane or BASE-T.
 248	 */
 249	return !ice_aq_get_link_topo_handle(pi,
 250					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
 251					    NULL);
 252}
 253
 254/**
 255 * ice_get_media_type - Gets media type
 256 * @pi: port information structure
 257 */
 258static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
 259{
 260	struct ice_link_status *hw_link_info;
 261
 262	if (!pi)
 263		return ICE_MEDIA_UNKNOWN;
 264
 265	hw_link_info = &pi->phy.link_info;
 266	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
 267		/* If more than one media type is selected, report unknown */
 268		return ICE_MEDIA_UNKNOWN;
 269
 270	if (hw_link_info->phy_type_low) {
 271		/* 1G SGMII is a special case where some DA cable PHYs
 272		 * may show this as an option when it really shouldn't
 273		 * be since SGMII is meant to be between a MAC and a PHY
 274		 * in a backplane. Try to detect this case and handle it
 275		 */
 276		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
 277		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
 278		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
 279		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
 280		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
 281			return ICE_MEDIA_DA;
 282
 283		switch (hw_link_info->phy_type_low) {
 284		case ICE_PHY_TYPE_LOW_1000BASE_SX:
 285		case ICE_PHY_TYPE_LOW_1000BASE_LX:
 286		case ICE_PHY_TYPE_LOW_10GBASE_SR:
 287		case ICE_PHY_TYPE_LOW_10GBASE_LR:
 288		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
 289		case ICE_PHY_TYPE_LOW_25GBASE_SR:
 290		case ICE_PHY_TYPE_LOW_25GBASE_LR:
 
 291		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
 292		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
 293		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
 294		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
 295		case ICE_PHY_TYPE_LOW_50GBASE_SR:
 296		case ICE_PHY_TYPE_LOW_50GBASE_FR:
 297		case ICE_PHY_TYPE_LOW_50GBASE_LR:
 298		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
 299		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
 300		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
 301		case ICE_PHY_TYPE_LOW_100GBASE_DR:
 302		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
 303		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
 304		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
 305		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
 306		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
 307		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
 308		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
 309		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
 310			return ICE_MEDIA_FIBER;
 311		case ICE_PHY_TYPE_LOW_100BASE_TX:
 312		case ICE_PHY_TYPE_LOW_1000BASE_T:
 313		case ICE_PHY_TYPE_LOW_2500BASE_T:
 314		case ICE_PHY_TYPE_LOW_5GBASE_T:
 315		case ICE_PHY_TYPE_LOW_10GBASE_T:
 316		case ICE_PHY_TYPE_LOW_25GBASE_T:
 317			return ICE_MEDIA_BASET;
 318		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
 319		case ICE_PHY_TYPE_LOW_25GBASE_CR:
 320		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
 321		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
 322		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
 323		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
 324		case ICE_PHY_TYPE_LOW_50GBASE_CP:
 325		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
 326		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
 327		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
 328			return ICE_MEDIA_DA;
 329		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
 330		case ICE_PHY_TYPE_LOW_40G_XLAUI:
 331		case ICE_PHY_TYPE_LOW_50G_LAUI2:
 332		case ICE_PHY_TYPE_LOW_50G_AUI2:
 333		case ICE_PHY_TYPE_LOW_50G_AUI1:
 334		case ICE_PHY_TYPE_LOW_100G_AUI4:
 335		case ICE_PHY_TYPE_LOW_100G_CAUI4:
 336			if (ice_is_media_cage_present(pi))
 337				return ICE_MEDIA_DA;
 338			fallthrough;
 339		case ICE_PHY_TYPE_LOW_1000BASE_KX:
 340		case ICE_PHY_TYPE_LOW_2500BASE_KX:
 341		case ICE_PHY_TYPE_LOW_2500BASE_X:
 342		case ICE_PHY_TYPE_LOW_5GBASE_KR:
 343		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
 344		case ICE_PHY_TYPE_LOW_25GBASE_KR:
 345		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
 346		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
 347		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
 348		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
 349		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
 350		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
 351		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
 352			return ICE_MEDIA_BACKPLANE;
 353		}
 354	} else {
 355		switch (hw_link_info->phy_type_high) {
 356		case ICE_PHY_TYPE_HIGH_100G_AUI2:
 357		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
 358			if (ice_is_media_cage_present(pi))
 359				return ICE_MEDIA_DA;
 360			fallthrough;
 361		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
 362			return ICE_MEDIA_BACKPLANE;
 363		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
 364		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
 365			return ICE_MEDIA_FIBER;
 366		}
 367	}
 
 368	return ICE_MEDIA_UNKNOWN;
 369}
 370
 371/**
 372 * ice_aq_get_link_info
 373 * @pi: port information structure
 374 * @ena_lse: enable/disable LinkStatusEvent reporting
 375 * @link: pointer to link status structure - optional
 376 * @cd: pointer to command details structure or NULL
 377 *
 378 * Get Link Status (0x607). Returns the link status of the adapter.
 379 */
 380enum ice_status
 381ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
 382		     struct ice_link_status *link, struct ice_sq_cd *cd)
 383{
 
 384	struct ice_aqc_get_link_status_data link_data = { 0 };
 385	struct ice_aqc_get_link_status *resp;
 386	struct ice_link_status *li_old, *li;
 387	enum ice_media_type *hw_media_type;
 388	struct ice_fc_info *hw_fc_info;
 389	bool tx_pause, rx_pause;
 390	struct ice_aq_desc desc;
 391	enum ice_status status;
 392	struct ice_hw *hw;
 393	u16 cmd_flags;
 394
 395	if (!pi)
 396		return ICE_ERR_PARAM;
 397	hw = pi->hw;
 398	li_old = &pi->phy.link_info_old;
 399	hw_media_type = &pi->phy.media_type;
 400	li = &pi->phy.link_info;
 401	hw_fc_info = &pi->fc;
 402
 403	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
 404	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
 405	resp = &desc.params.get_link_status;
 406	resp->cmd_flags = cpu_to_le16(cmd_flags);
 407	resp->lport_num = pi->lport;
 408
 409	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
 
 410
 411	if (status)
 412		return status;
 413
 414	/* save off old link status information */
 415	*li_old = *li;
 416
 417	/* update current link status information */
 418	li->link_speed = le16_to_cpu(link_data.link_speed);
 419	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
 420	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
 421	*hw_media_type = ice_get_media_type(pi);
 422	li->link_info = link_data.link_info;
 423	li->an_info = link_data.an_info;
 424	li->ext_info = link_data.ext_info;
 425	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
 426	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
 427	li->topo_media_conflict = link_data.topo_media_conflict;
 428	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
 429				      ICE_AQ_CFG_PACING_TYPE_M);
 430
 431	/* update fc info */
 432	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
 433	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
 434	if (tx_pause && rx_pause)
 435		hw_fc_info->current_mode = ICE_FC_FULL;
 436	else if (tx_pause)
 437		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
 438	else if (rx_pause)
 439		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
 440	else
 441		hw_fc_info->current_mode = ICE_FC_NONE;
 442
 443	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
 444
 445	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
 446	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
 447	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
 448		  (unsigned long long)li->phy_type_low);
 449	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
 450		  (unsigned long long)li->phy_type_high);
 451	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
 452	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
 453	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
 454	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
 455	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
 456	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
 457	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
 458		  li->max_frame_size);
 459	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
 460
 461	/* save link status information */
 462	if (link)
 463		*link = *li;
 464
 465	/* flag cleared so calling functions don't call AQ again */
 466	pi->phy.get_link_info = false;
 467
 468	return 0;
 469}
 470
 471/**
 472 * ice_fill_tx_timer_and_fc_thresh
 473 * @hw: pointer to the HW struct
 474 * @cmd: pointer to MAC cfg structure
 475 *
 476 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
 477 * descriptor
 478 */
 479static void
 480ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
 481				struct ice_aqc_set_mac_cfg *cmd)
 482{
 483	u16 fc_thres_val, tx_timer_val;
 484	u32 val;
 485
 486	/* We read back the transmit timer and FC threshold value of
 487	 * LFC. Thus, we will use index =
 488	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
 489	 *
 490	 * Also, because we are operating on transmit timer and FC
 491	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
 492	 */
 493#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
 494
 495	/* Retrieve the transmit timer */
 496	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
 497	tx_timer_val = val &
 498		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
 499	cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
 500
 501	/* Retrieve the FC threshold */
 502	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
 503	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
 504
 505	cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
 506}
 507
 508/**
 509 * ice_aq_set_mac_cfg
 510 * @hw: pointer to the HW struct
 511 * @max_frame_size: Maximum Frame Size to be supported
 512 * @cd: pointer to command details structure or NULL
 513 *
 514 * Set MAC configuration (0x0603)
 515 */
 516enum ice_status
 517ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
 518{
 519	struct ice_aqc_set_mac_cfg *cmd;
 520	struct ice_aq_desc desc;
 521
 522	cmd = &desc.params.set_mac_cfg;
 523
 524	if (max_frame_size == 0)
 525		return ICE_ERR_PARAM;
 526
 527	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
 528
 529	cmd->max_frame_size = cpu_to_le16(max_frame_size);
 530
 531	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
 532
 533	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 534}
 535
 536/**
 537 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
 538 * @hw: pointer to the HW struct
 539 */
 540static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
 541{
 542	struct ice_switch_info *sw;
 543	enum ice_status status;
 544
 545	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
 546				       sizeof(*hw->switch_info), GFP_KERNEL);
 547	sw = hw->switch_info;
 548
 549	if (!sw)
 550		return ICE_ERR_NO_MEMORY;
 551
 552	INIT_LIST_HEAD(&sw->vsi_list_map_head);
 553
 554	status = ice_init_def_sw_recp(hw);
 555	if (status) {
 556		devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
 557		return status;
 558	}
 
 
 
 
 
 
 
 
 
 
 559	return 0;
 560}
 561
 562/**
 563 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
 564 * @hw: pointer to the HW struct
 565 */
 566static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
 567{
 568	struct ice_switch_info *sw = hw->switch_info;
 569	struct ice_vsi_list_map_info *v_pos_map;
 570	struct ice_vsi_list_map_info *v_tmp_map;
 571	struct ice_sw_recipe *recps;
 572	u8 i;
 573
 574	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
 575				 list_entry) {
 576		list_del(&v_pos_map->list_entry);
 577		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
 578	}
 579	recps = hw->switch_info->recp_list;
 580	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
 581		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
 582
 583		recps[i].root_rid = i;
 584		mutex_destroy(&recps[i].filt_rule_lock);
 585		list_for_each_entry_safe(lst_itr, tmp_entry,
 586					 &recps[i].filt_rules, list_entry) {
 587			list_del(&lst_itr->list_entry);
 588			devm_kfree(ice_hw_to_dev(hw), lst_itr);
 589		}
 590	}
 591	ice_rm_all_sw_replay_rule_info(hw);
 592	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
 593	devm_kfree(ice_hw_to_dev(hw), sw);
 594}
 595
 596/**
 597 * ice_get_fw_log_cfg - get FW logging configuration
 598 * @hw: pointer to the HW struct
 599 */
 600static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
 601{
 602	struct ice_aq_desc desc;
 603	enum ice_status status;
 604	__le16 *config;
 605	u16 size;
 606
 607	size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
 608	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
 609	if (!config)
 610		return ICE_ERR_NO_MEMORY;
 611
 612	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
 613
 614	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
 615	if (!status) {
 616		u16 i;
 617
 618		/* Save FW logging information into the HW structure */
 619		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
 620			u16 v, m, flgs;
 621
 622			v = le16_to_cpu(config[i]);
 623			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
 624			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
 625
 626			if (m < ICE_AQC_FW_LOG_ID_MAX)
 627				hw->fw_log.evnts[m].cur = flgs;
 628		}
 629	}
 630
 631	devm_kfree(ice_hw_to_dev(hw), config);
 632
 633	return status;
 634}
 635
 636/**
 637 * ice_cfg_fw_log - configure FW logging
 638 * @hw: pointer to the HW struct
 639 * @enable: enable certain FW logging events if true, disable all if false
 640 *
 641 * This function enables/disables the FW logging via Rx CQ events and a UART
 642 * port based on predetermined configurations. FW logging via the Rx CQ can be
 643 * enabled/disabled for individual PF's. However, FW logging via the UART can
 644 * only be enabled/disabled for all PFs on the same device.
 645 *
 646 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
 647 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
 648 * before initializing the device.
 649 *
 650 * When re/configuring FW logging, callers need to update the "cfg" elements of
 651 * the hw->fw_log.evnts array with the desired logging event configurations for
 652 * modules of interest. When disabling FW logging completely, the callers can
 653 * just pass false in the "enable" parameter. On completion, the function will
 654 * update the "cur" element of the hw->fw_log.evnts array with the resulting
 655 * logging event configurations of the modules that are being re/configured. FW
 656 * logging modules that are not part of a reconfiguration operation retain their
 657 * previous states.
 658 *
 659 * Before resetting the device, it is recommended that the driver disables FW
 660 * logging before shutting down the control queue. When disabling FW logging
 661 * ("enable" = false), the latest configurations of FW logging events stored in
 662 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
 663 * a device reset.
 664 *
 665 * When enabling FW logging to emit log messages via the Rx CQ during the
 666 * device's initialization phase, a mechanism alternative to interrupt handlers
 667 * needs to be used to extract FW log messages from the Rx CQ periodically and
 668 * to prevent the Rx CQ from being full and stalling other types of control
 669 * messages from FW to SW. Interrupts are typically disabled during the device's
 670 * initialization phase.
 671 */
 672static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
 673{
 674	struct ice_aqc_fw_logging *cmd;
 675	enum ice_status status = 0;
 676	u16 i, chgs = 0, len = 0;
 677	struct ice_aq_desc desc;
 678	__le16 *data = NULL;
 679	u8 actv_evnts = 0;
 680	void *buf = NULL;
 681
 682	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
 683		return 0;
 684
 685	/* Disable FW logging only when the control queue is still responsive */
 686	if (!enable &&
 687	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
 688		return 0;
 689
 690	/* Get current FW log settings */
 691	status = ice_get_fw_log_cfg(hw);
 692	if (status)
 693		return status;
 694
 695	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
 696	cmd = &desc.params.fw_logging;
 697
 698	/* Indicate which controls are valid */
 699	if (hw->fw_log.cq_en)
 700		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
 701
 702	if (hw->fw_log.uart_en)
 703		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
 704
 705	if (enable) {
 706		/* Fill in an array of entries with FW logging modules and
 707		 * logging events being reconfigured.
 708		 */
 709		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
 710			u16 val;
 711
 712			/* Keep track of enabled event types */
 713			actv_evnts |= hw->fw_log.evnts[i].cfg;
 714
 715			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
 716				continue;
 717
 718			if (!data) {
 719				data = devm_kcalloc(ice_hw_to_dev(hw),
 720						    sizeof(*data),
 721						    ICE_AQC_FW_LOG_ID_MAX,
 722						    GFP_KERNEL);
 723				if (!data)
 724					return ICE_ERR_NO_MEMORY;
 725			}
 726
 727			val = i << ICE_AQC_FW_LOG_ID_S;
 728			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
 729			data[chgs++] = cpu_to_le16(val);
 730		}
 731
 732		/* Only enable FW logging if at least one module is specified.
 733		 * If FW logging is currently enabled but all modules are not
 734		 * enabled to emit log messages, disable FW logging altogether.
 735		 */
 736		if (actv_evnts) {
 737			/* Leave if there is effectively no change */
 738			if (!chgs)
 739				goto out;
 740
 741			if (hw->fw_log.cq_en)
 742				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
 743
 744			if (hw->fw_log.uart_en)
 745				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
 746
 747			buf = data;
 748			len = sizeof(*data) * chgs;
 749			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 750		}
 751	}
 752
 753	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
 754	if (!status) {
 755		/* Update the current configuration to reflect events enabled.
 756		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
 757		 * logging mode is enabled for the device. They do not reflect
 758		 * actual modules being enabled to emit log messages. So, their
 759		 * values remain unchanged even when all modules are disabled.
 760		 */
 761		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
 762
 763		hw->fw_log.actv_evnts = actv_evnts;
 764		for (i = 0; i < cnt; i++) {
 765			u16 v, m;
 766
 767			if (!enable) {
 768				/* When disabling all FW logging events as part
 769				 * of device's de-initialization, the original
 770				 * configurations are retained, and can be used
 771				 * to reconfigure FW logging later if the device
 772				 * is re-initialized.
 773				 */
 774				hw->fw_log.evnts[i].cur = 0;
 775				continue;
 776			}
 777
 778			v = le16_to_cpu(data[i]);
 779			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
 780			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
 781		}
 782	}
 783
 784out:
 785	if (data)
 786		devm_kfree(ice_hw_to_dev(hw), data);
 787
 788	return status;
 789}
 790
 791/**
 792 * ice_output_fw_log
 793 * @hw: pointer to the HW struct
 794 * @desc: pointer to the AQ message descriptor
 795 * @buf: pointer to the buffer accompanying the AQ message
 796 *
 797 * Formats a FW Log message and outputs it via the standard driver logs.
 798 */
 799void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
 800{
 801	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
 802	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
 803			le16_to_cpu(desc->datalen));
 804	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
 805}
 806
 807/**
 808 * ice_get_itr_intrl_gran
 809 * @hw: pointer to the HW struct
 810 *
 811 * Determines the ITR/INTRL granularities based on the maximum aggregate
 812 * bandwidth according to the device's configuration during power-on.
 813 */
 814static void ice_get_itr_intrl_gran(struct ice_hw *hw)
 815{
 816	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
 817			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
 818			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
 819
 820	switch (max_agg_bw) {
 821	case ICE_MAX_AGG_BW_200G:
 822	case ICE_MAX_AGG_BW_100G:
 823	case ICE_MAX_AGG_BW_50G:
 824		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
 825		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
 826		break;
 827	case ICE_MAX_AGG_BW_25G:
 828		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
 829		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
 830		break;
 831	}
 832}
 833
 834/**
 835 * ice_init_hw - main hardware initialization routine
 836 * @hw: pointer to the hardware structure
 837 */
 838enum ice_status ice_init_hw(struct ice_hw *hw)
 839{
 840	struct ice_aqc_get_phy_caps_data *pcaps;
 841	enum ice_status status;
 842	u16 mac_buf_len;
 843	void *mac_buf;
 844
 845	/* Set MAC type based on DeviceID */
 846	status = ice_set_mac_type(hw);
 847	if (status)
 848		return status;
 849
 850	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
 851			 PF_FUNC_RID_FUNC_NUM_M) >>
 852		PF_FUNC_RID_FUNC_NUM_S;
 853
 854	status = ice_reset(hw, ICE_RESET_PFR);
 855	if (status)
 856		return status;
 857
 858	ice_get_itr_intrl_gran(hw);
 
 
 
 
 859
 860	status = ice_create_all_ctrlq(hw);
 861	if (status)
 862		goto err_unroll_cqinit;
 863
 864	/* Enable FW logging. Not fatal if this fails. */
 865	status = ice_cfg_fw_log(hw, true);
 866	if (status)
 867		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
 868
 869	status = ice_clear_pf_cfg(hw);
 870	if (status)
 871		goto err_unroll_cqinit;
 872
 873	/* Set bit to enable Flow Director filters */
 874	wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
 875	INIT_LIST_HEAD(&hw->fdir_list_head);
 876
 877	ice_clear_pxe_mode(hw);
 878
 879	status = ice_init_nvm(hw);
 880	if (status)
 881		goto err_unroll_cqinit;
 882
 883	status = ice_get_caps(hw);
 884	if (status)
 885		goto err_unroll_cqinit;
 886
 887	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
 888				     sizeof(*hw->port_info), GFP_KERNEL);
 889	if (!hw->port_info) {
 890		status = ICE_ERR_NO_MEMORY;
 891		goto err_unroll_cqinit;
 892	}
 893
 894	/* set the back pointer to HW */
 895	hw->port_info->hw = hw;
 896
 897	/* Initialize port_info struct with switch configuration data */
 898	status = ice_get_initial_sw_cfg(hw);
 899	if (status)
 900		goto err_unroll_alloc;
 901
 902	hw->evb_veb = true;
 903
 904	/* Query the allocated resources for Tx scheduler */
 905	status = ice_sched_query_res_alloc(hw);
 906	if (status) {
 907		ice_debug(hw, ICE_DBG_SCHED,
 908			  "Failed to get scheduler allocated resources\n");
 909		goto err_unroll_alloc;
 910	}
 911
 912	/* Initialize port_info struct with scheduler data */
 913	status = ice_sched_init_port(hw->port_info);
 914	if (status)
 915		goto err_unroll_sched;
 916
 917	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
 918	if (!pcaps) {
 919		status = ICE_ERR_NO_MEMORY;
 920		goto err_unroll_sched;
 921	}
 922
 923	/* Initialize port_info struct with PHY capabilities */
 924	status = ice_aq_get_phy_caps(hw->port_info, false,
 925				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
 926	devm_kfree(ice_hw_to_dev(hw), pcaps);
 927	if (status)
 928		goto err_unroll_sched;
 929
 930	/* Initialize port_info struct with link information */
 931	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
 932	if (status)
 933		goto err_unroll_sched;
 934
 935	/* need a valid SW entry point to build a Tx tree */
 936	if (!hw->sw_entry_point_layer) {
 937		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
 938		status = ICE_ERR_CFG;
 939		goto err_unroll_sched;
 940	}
 941	INIT_LIST_HEAD(&hw->agg_list);
 942	/* Initialize max burst size */
 943	if (!hw->max_burst_size)
 944		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
 945
 946	status = ice_init_fltr_mgmt_struct(hw);
 947	if (status)
 948		goto err_unroll_sched;
 949
 950	/* Get MAC information */
 951	/* A single port can report up to two (LAN and WoL) addresses */
 952	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
 953			       sizeof(struct ice_aqc_manage_mac_read_resp),
 954			       GFP_KERNEL);
 955	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
 956
 957	if (!mac_buf) {
 958		status = ICE_ERR_NO_MEMORY;
 959		goto err_unroll_fltr_mgmt_struct;
 960	}
 961
 962	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
 963	devm_kfree(ice_hw_to_dev(hw), mac_buf);
 964
 965	if (status)
 966		goto err_unroll_fltr_mgmt_struct;
 967	/* enable jumbo frame support at MAC level */
 968	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
 969	if (status)
 970		goto err_unroll_fltr_mgmt_struct;
 971	/* Obtain counter base index which would be used by flow director */
 972	status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
 973	if (status)
 974		goto err_unroll_fltr_mgmt_struct;
 975	status = ice_init_hw_tbls(hw);
 976	if (status)
 977		goto err_unroll_fltr_mgmt_struct;
 978	mutex_init(&hw->tnl_lock);
 979	return 0;
 980
 981err_unroll_fltr_mgmt_struct:
 982	ice_cleanup_fltr_mgmt_struct(hw);
 983err_unroll_sched:
 984	ice_sched_cleanup_all(hw);
 985err_unroll_alloc:
 986	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
 987err_unroll_cqinit:
 988	ice_destroy_all_ctrlq(hw);
 989	return status;
 990}
 991
 992/**
 993 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
 994 * @hw: pointer to the hardware structure
 995 *
 996 * This should be called only during nominal operation, not as a result of
 997 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
 998 * applicable initializations if it fails for any reason.
 999 */
1000void ice_deinit_hw(struct ice_hw *hw)
1001{
1002	ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1003	ice_cleanup_fltr_mgmt_struct(hw);
1004
1005	ice_sched_cleanup_all(hw);
1006	ice_sched_clear_agg(hw);
1007	ice_free_seg(hw);
1008	ice_free_hw_tbls(hw);
1009	mutex_destroy(&hw->tnl_lock);
1010
1011	if (hw->port_info) {
1012		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1013		hw->port_info = NULL;
1014	}
1015
1016	/* Attempt to disable FW logging before shutting down control queues */
1017	ice_cfg_fw_log(hw, false);
1018	ice_destroy_all_ctrlq(hw);
1019
1020	/* Clear VSI contexts if not already cleared */
1021	ice_clear_all_vsi_ctx(hw);
1022}
1023
1024/**
1025 * ice_check_reset - Check to see if a global reset is complete
1026 * @hw: pointer to the hardware structure
1027 */
1028enum ice_status ice_check_reset(struct ice_hw *hw)
1029{
1030	u32 cnt, reg = 0, grst_timeout, uld_mask;
1031
1032	/* Poll for Device Active state in case a recent CORER, GLOBR,
1033	 * or EMPR has occurred. The grst delay value is in 100ms units.
1034	 * Add 1sec for outstanding AQ commands that can take a long time.
1035	 */
1036	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1037			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1038
1039	for (cnt = 0; cnt < grst_timeout; cnt++) {
1040		mdelay(100);
1041		reg = rd32(hw, GLGEN_RSTAT);
1042		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1043			break;
1044	}
1045
1046	if (cnt == grst_timeout) {
1047		ice_debug(hw, ICE_DBG_INIT,
1048			  "Global reset polling failed to complete.\n");
1049		return ICE_ERR_RESET_FAILED;
1050	}
1051
1052#define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1053				 GLNVM_ULD_PCIER_DONE_1_M |\
1054				 GLNVM_ULD_CORER_DONE_M |\
1055				 GLNVM_ULD_GLOBR_DONE_M |\
1056				 GLNVM_ULD_POR_DONE_M |\
1057				 GLNVM_ULD_POR_DONE_1_M |\
1058				 GLNVM_ULD_PCIER_DONE_2_M)
1059
1060	uld_mask = ICE_RESET_DONE_MASK;
1061
1062	/* Device is Active; check Global Reset processes are done */
1063	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1064		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1065		if (reg == uld_mask) {
1066			ice_debug(hw, ICE_DBG_INIT,
1067				  "Global reset processes done. %d\n", cnt);
1068			break;
1069		}
1070		mdelay(10);
1071	}
1072
1073	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1074		ice_debug(hw, ICE_DBG_INIT,
1075			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1076			  reg);
1077		return ICE_ERR_RESET_FAILED;
1078	}
1079
1080	return 0;
1081}
1082
1083/**
1084 * ice_pf_reset - Reset the PF
1085 * @hw: pointer to the hardware structure
1086 *
1087 * If a global reset has been triggered, this function checks
1088 * for its completion and then issues the PF reset
1089 */
1090static enum ice_status ice_pf_reset(struct ice_hw *hw)
1091{
1092	u32 cnt, reg;
1093
1094	/* If at function entry a global reset was already in progress, i.e.
1095	 * state is not 'device active' or any of the reset done bits are not
1096	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1097	 * global reset is done.
1098	 */
1099	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1100	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1101		/* poll on global reset currently in progress until done */
1102		if (ice_check_reset(hw))
1103			return ICE_ERR_RESET_FAILED;
1104
1105		return 0;
1106	}
1107
1108	/* Reset the PF */
1109	reg = rd32(hw, PFGEN_CTRL);
1110
1111	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1112
1113	/* Wait for the PFR to complete. The wait time is the global config lock
1114	 * timeout plus the PFR timeout which will account for a possible reset
1115	 * that is occurring during a download package operation.
1116	 */
1117	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1118	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1119		reg = rd32(hw, PFGEN_CTRL);
1120		if (!(reg & PFGEN_CTRL_PFSWR_M))
1121			break;
1122
1123		mdelay(1);
1124	}
1125
1126	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1127		ice_debug(hw, ICE_DBG_INIT,
1128			  "PF reset polling failed to complete.\n");
1129		return ICE_ERR_RESET_FAILED;
1130	}
1131
1132	return 0;
1133}
1134
1135/**
1136 * ice_reset - Perform different types of reset
1137 * @hw: pointer to the hardware structure
1138 * @req: reset request
1139 *
1140 * This function triggers a reset as specified by the req parameter.
1141 *
1142 * Note:
1143 * If anything other than a PF reset is triggered, PXE mode is restored.
1144 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1145 * interface has been restored in the rebuild flow.
1146 */
1147enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1148{
1149	u32 val = 0;
1150
1151	switch (req) {
1152	case ICE_RESET_PFR:
1153		return ice_pf_reset(hw);
1154	case ICE_RESET_CORER:
1155		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1156		val = GLGEN_RTRIG_CORER_M;
1157		break;
1158	case ICE_RESET_GLOBR:
1159		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1160		val = GLGEN_RTRIG_GLOBR_M;
1161		break;
1162	default:
1163		return ICE_ERR_PARAM;
1164	}
1165
1166	val |= rd32(hw, GLGEN_RTRIG);
1167	wr32(hw, GLGEN_RTRIG, val);
1168	ice_flush(hw);
1169
1170	/* wait for the FW to be ready */
1171	return ice_check_reset(hw);
1172}
1173
1174/**
1175 * ice_copy_rxq_ctx_to_hw
1176 * @hw: pointer to the hardware structure
1177 * @ice_rxq_ctx: pointer to the rxq context
1178 * @rxq_index: the index of the Rx queue
1179 *
1180 * Copies rxq context from dense structure to HW register space
1181 */
1182static enum ice_status
1183ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1184{
1185	u8 i;
1186
1187	if (!ice_rxq_ctx)
1188		return ICE_ERR_BAD_PTR;
1189
1190	if (rxq_index > QRX_CTRL_MAX_INDEX)
1191		return ICE_ERR_PARAM;
1192
1193	/* Copy each dword separately to HW */
1194	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1195		wr32(hw, QRX_CONTEXT(i, rxq_index),
1196		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1197
1198		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1199			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1200	}
1201
1202	return 0;
1203}
1204
1205/* LAN Rx Queue Context */
1206static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1207	/* Field		Width	LSB */
1208	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1209	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1210	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1211	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1212	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1213	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1214	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1215	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1216	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1217	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1218	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1219	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1220	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1221	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1222	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1223	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1224	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1225	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1226	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1227	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1228	{ 0 }
1229};
1230
1231/**
1232 * ice_write_rxq_ctx
1233 * @hw: pointer to the hardware structure
1234 * @rlan_ctx: pointer to the rxq context
1235 * @rxq_index: the index of the Rx queue
1236 *
1237 * Converts rxq context from sparse to dense structure and then writes
1238 * it to HW register space and enables the hardware to prefetch descriptors
1239 * instead of only fetching them on demand
1240 */
1241enum ice_status
1242ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1243		  u32 rxq_index)
1244{
1245	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1246
1247	if (!rlan_ctx)
1248		return ICE_ERR_BAD_PTR;
1249
1250	rlan_ctx->prefena = 1;
1251
1252	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1253	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1254}
1255
1256/* LAN Tx Queue Context */
1257const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1258				    /* Field			Width	LSB */
1259	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1260	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1261	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1262	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1263	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1264	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1265	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1266	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1267	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1268	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1269	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1270	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1271	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1272	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1273	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1274	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1275	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1276	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1277	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1278	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1279	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1280	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1281	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1282	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1283	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1284	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1285	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1286	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1287	{ 0 }
1288};
1289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290/* FW Admin Queue command wrappers */
1291
1292/* Software lock/mutex that is meant to be held while the Global Config Lock
1293 * in firmware is acquired by the software to prevent most (but not all) types
1294 * of AQ commands from being sent to FW
1295 */
1296DEFINE_MUTEX(ice_global_cfg_lock_sw);
1297
1298/**
1299 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1300 * @hw: pointer to the HW struct
1301 * @desc: descriptor describing the command
1302 * @buf: buffer to use for indirect commands (NULL for direct commands)
1303 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1304 * @cd: pointer to command details structure
1305 *
1306 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1307 */
1308enum ice_status
1309ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1310		u16 buf_size, struct ice_sq_cd *cd)
1311{
1312	struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1313	bool lock_acquired = false;
1314	enum ice_status status;
1315
1316	/* When a package download is in process (i.e. when the firmware's
1317	 * Global Configuration Lock resource is held), only the Download
1318	 * Package, Get Version, Get Package Info List and Release Resource
1319	 * (with resource ID set to Global Config Lock) AdminQ commands are
1320	 * allowed; all others must block until the package download completes
1321	 * and the Global Config Lock is released.  See also
1322	 * ice_acquire_global_cfg_lock().
1323	 */
1324	switch (le16_to_cpu(desc->opcode)) {
1325	case ice_aqc_opc_download_pkg:
1326	case ice_aqc_opc_get_pkg_info_list:
1327	case ice_aqc_opc_get_ver:
1328		break;
1329	case ice_aqc_opc_release_res:
1330		if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1331			break;
1332		fallthrough;
1333	default:
1334		mutex_lock(&ice_global_cfg_lock_sw);
1335		lock_acquired = true;
1336		break;
1337	}
1338
1339	status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1340	if (lock_acquired)
1341		mutex_unlock(&ice_global_cfg_lock_sw);
1342
1343	return status;
1344}
1345
1346/**
1347 * ice_aq_get_fw_ver
1348 * @hw: pointer to the HW struct
1349 * @cd: pointer to command details structure or NULL
1350 *
1351 * Get the firmware version (0x0001) from the admin queue commands
1352 */
1353enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1354{
1355	struct ice_aqc_get_ver *resp;
1356	struct ice_aq_desc desc;
1357	enum ice_status status;
1358
1359	resp = &desc.params.get_ver;
1360
1361	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1362
1363	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1364
1365	if (!status) {
1366		hw->fw_branch = resp->fw_branch;
1367		hw->fw_maj_ver = resp->fw_major;
1368		hw->fw_min_ver = resp->fw_minor;
1369		hw->fw_patch = resp->fw_patch;
1370		hw->fw_build = le32_to_cpu(resp->fw_build);
1371		hw->api_branch = resp->api_branch;
1372		hw->api_maj_ver = resp->api_major;
1373		hw->api_min_ver = resp->api_minor;
1374		hw->api_patch = resp->api_patch;
1375	}
1376
1377	return status;
1378}
1379
1380/**
1381 * ice_aq_send_driver_ver
1382 * @hw: pointer to the HW struct
1383 * @dv: driver's major, minor version
1384 * @cd: pointer to command details structure or NULL
1385 *
1386 * Send the driver version (0x0002) to the firmware
1387 */
1388enum ice_status
1389ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1390		       struct ice_sq_cd *cd)
1391{
1392	struct ice_aqc_driver_ver *cmd;
1393	struct ice_aq_desc desc;
1394	u16 len;
1395
1396	cmd = &desc.params.driver_ver;
1397
1398	if (!dv)
1399		return ICE_ERR_PARAM;
1400
1401	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1402
1403	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1404	cmd->major_ver = dv->major_ver;
1405	cmd->minor_ver = dv->minor_ver;
1406	cmd->build_ver = dv->build_ver;
1407	cmd->subbuild_ver = dv->subbuild_ver;
1408
1409	len = 0;
1410	while (len < sizeof(dv->driver_string) &&
1411	       isascii(dv->driver_string[len]) && dv->driver_string[len])
1412		len++;
1413
1414	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1415}
1416
1417/**
1418 * ice_aq_q_shutdown
1419 * @hw: pointer to the HW struct
1420 * @unloading: is the driver unloading itself
1421 *
1422 * Tell the Firmware that we're shutting down the AdminQ and whether
1423 * or not the driver is unloading as well (0x0003).
1424 */
1425enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1426{
1427	struct ice_aqc_q_shutdown *cmd;
1428	struct ice_aq_desc desc;
1429
1430	cmd = &desc.params.q_shutdown;
1431
1432	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1433
1434	if (unloading)
1435		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1436
1437	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1438}
1439
1440/**
1441 * ice_aq_req_res
1442 * @hw: pointer to the HW struct
1443 * @res: resource ID
1444 * @access: access type
1445 * @sdp_number: resource number
1446 * @timeout: the maximum time in ms that the driver may hold the resource
1447 * @cd: pointer to command details structure or NULL
1448 *
1449 * Requests common resource using the admin queue commands (0x0008).
1450 * When attempting to acquire the Global Config Lock, the driver can
1451 * learn of three states:
1452 *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1453 *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1454 *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1455 *                          successfully downloaded the package; the driver does
1456 *                          not have to download the package and can continue
1457 *                          loading
1458 *
1459 * Note that if the caller is in an acquire lock, perform action, release lock
1460 * phase of operation, it is possible that the FW may detect a timeout and issue
1461 * a CORER. In this case, the driver will receive a CORER interrupt and will
1462 * have to determine its cause. The calling thread that is handling this flow
1463 * will likely get an error propagated back to it indicating the Download
1464 * Package, Update Package or the Release Resource AQ commands timed out.
1465 */
1466static enum ice_status
1467ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1468	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1469	       struct ice_sq_cd *cd)
1470{
1471	struct ice_aqc_req_res *cmd_resp;
1472	struct ice_aq_desc desc;
1473	enum ice_status status;
1474
1475	cmd_resp = &desc.params.res_owner;
1476
1477	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1478
1479	cmd_resp->res_id = cpu_to_le16(res);
1480	cmd_resp->access_type = cpu_to_le16(access);
1481	cmd_resp->res_number = cpu_to_le32(sdp_number);
1482	cmd_resp->timeout = cpu_to_le32(*timeout);
1483	*timeout = 0;
1484
1485	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1486
1487	/* The completion specifies the maximum time in ms that the driver
1488	 * may hold the resource in the Timeout field.
1489	 */
1490
1491	/* Global config lock response utilizes an additional status field.
1492	 *
1493	 * If the Global config lock resource is held by some other driver, the
1494	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1495	 * and the timeout field indicates the maximum time the current owner
1496	 * of the resource has to free it.
1497	 */
1498	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1499		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1500			*timeout = le32_to_cpu(cmd_resp->timeout);
1501			return 0;
1502		} else if (le16_to_cpu(cmd_resp->status) ==
1503			   ICE_AQ_RES_GLBL_IN_PROG) {
1504			*timeout = le32_to_cpu(cmd_resp->timeout);
1505			return ICE_ERR_AQ_ERROR;
1506		} else if (le16_to_cpu(cmd_resp->status) ==
1507			   ICE_AQ_RES_GLBL_DONE) {
1508			return ICE_ERR_AQ_NO_WORK;
1509		}
1510
1511		/* invalid FW response, force a timeout immediately */
1512		*timeout = 0;
1513		return ICE_ERR_AQ_ERROR;
1514	}
1515
1516	/* If the resource is held by some other driver, the command completes
1517	 * with a busy return value and the timeout field indicates the maximum
1518	 * time the current owner of the resource has to free it.
1519	 */
1520	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1521		*timeout = le32_to_cpu(cmd_resp->timeout);
1522
1523	return status;
1524}
1525
1526/**
1527 * ice_aq_release_res
1528 * @hw: pointer to the HW struct
1529 * @res: resource ID
1530 * @sdp_number: resource number
1531 * @cd: pointer to command details structure or NULL
1532 *
1533 * release common resource using the admin queue commands (0x0009)
1534 */
1535static enum ice_status
1536ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1537		   struct ice_sq_cd *cd)
1538{
1539	struct ice_aqc_req_res *cmd;
1540	struct ice_aq_desc desc;
1541
1542	cmd = &desc.params.res_owner;
1543
1544	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1545
1546	cmd->res_id = cpu_to_le16(res);
1547	cmd->res_number = cpu_to_le32(sdp_number);
1548
1549	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1550}
1551
1552/**
1553 * ice_acquire_res
1554 * @hw: pointer to the HW structure
1555 * @res: resource ID
1556 * @access: access type (read or write)
1557 * @timeout: timeout in milliseconds
1558 *
1559 * This function will attempt to acquire the ownership of a resource.
1560 */
1561enum ice_status
1562ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1563		enum ice_aq_res_access_type access, u32 timeout)
1564{
1565#define ICE_RES_POLLING_DELAY_MS	10
1566	u32 delay = ICE_RES_POLLING_DELAY_MS;
1567	u32 time_left = timeout;
1568	enum ice_status status;
 
 
1569
1570	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1571
1572	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1573	 * previously acquired the resource and performed any necessary updates;
1574	 * in this case the caller does not obtain the resource and has no
1575	 * further work to do.
1576	 */
1577	if (status == ICE_ERR_AQ_NO_WORK)
 
1578		goto ice_acquire_res_exit;
 
1579
1580	if (status)
1581		ice_debug(hw, ICE_DBG_RES,
1582			  "resource %d acquire type %d failed.\n", res, access);
1583
1584	/* If necessary, poll until the current lock owner timeouts */
1585	timeout = time_left;
1586	while (status && timeout && time_left) {
1587		mdelay(delay);
1588		timeout = (timeout > delay) ? timeout - delay : 0;
1589		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1590
1591		if (status == ICE_ERR_AQ_NO_WORK)
1592			/* lock free, but no work to do */
 
1593			break;
 
1594
1595		if (!status)
1596			/* lock acquired */
1597			break;
1598	}
1599	if (status && status != ICE_ERR_AQ_NO_WORK)
1600		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1601
1602ice_acquire_res_exit:
1603	if (status == ICE_ERR_AQ_NO_WORK) {
1604		if (access == ICE_RES_WRITE)
1605			ice_debug(hw, ICE_DBG_RES,
1606				  "resource indicates no work to do.\n");
1607		else
1608			ice_debug(hw, ICE_DBG_RES,
1609				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1610	}
1611	return status;
1612}
1613
1614/**
1615 * ice_release_res
1616 * @hw: pointer to the HW structure
1617 * @res: resource ID
1618 *
1619 * This function will release a resource using the proper Admin Command.
1620 */
1621void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1622{
1623	enum ice_status status;
1624	u32 total_delay = 0;
1625
1626	status = ice_aq_release_res(hw, res, 0, NULL);
1627
1628	/* there are some rare cases when trying to release the resource
1629	 * results in an admin queue timeout, so handle them correctly
1630	 */
1631	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1632	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1633		mdelay(1);
1634		status = ice_aq_release_res(hw, res, 0, NULL);
1635		total_delay++;
1636	}
1637}
1638
1639/**
1640 * ice_aq_alloc_free_res - command to allocate/free resources
1641 * @hw: pointer to the HW struct
1642 * @num_entries: number of resource entries in buffer
1643 * @buf: Indirect buffer to hold data parameters and response
1644 * @buf_size: size of buffer for indirect commands
1645 * @opc: pass in the command opcode
1646 * @cd: pointer to command details structure or NULL
1647 *
1648 * Helper function to allocate/free resources using the admin queue commands
1649 */
1650enum ice_status
1651ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1652		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1653		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1654{
1655	struct ice_aqc_alloc_free_res_cmd *cmd;
1656	struct ice_aq_desc desc;
1657
1658	cmd = &desc.params.sw_res_ctrl;
1659
1660	if (!buf)
1661		return ICE_ERR_PARAM;
1662
1663	if (buf_size < (num_entries * sizeof(buf->elem[0])))
1664		return ICE_ERR_PARAM;
1665
1666	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1667
1668	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1669
1670	cmd->num_entries = cpu_to_le16(num_entries);
1671
1672	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1673}
1674
1675/**
1676 * ice_alloc_hw_res - allocate resource
1677 * @hw: pointer to the HW struct
1678 * @type: type of resource
1679 * @num: number of resources to allocate
1680 * @btm: allocate from bottom
1681 * @res: pointer to array that will receive the resources
1682 */
1683enum ice_status
1684ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1685{
1686	struct ice_aqc_alloc_free_res_elem *buf;
1687	enum ice_status status;
1688	u16 buf_len;
1689
1690	buf_len = struct_size(buf, elem, num);
1691	buf = kzalloc(buf_len, GFP_KERNEL);
1692	if (!buf)
1693		return ICE_ERR_NO_MEMORY;
1694
1695	/* Prepare buffer to allocate resource. */
1696	buf->num_elems = cpu_to_le16(num);
1697	buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1698				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1699	if (btm)
1700		buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1701
1702	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1703				       ice_aqc_opc_alloc_res, NULL);
1704	if (status)
1705		goto ice_alloc_res_exit;
1706
1707	memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1708
1709ice_alloc_res_exit:
1710	kfree(buf);
1711	return status;
1712}
1713
1714/**
1715 * ice_free_hw_res - free allocated HW resource
1716 * @hw: pointer to the HW struct
1717 * @type: type of resource to free
1718 * @num: number of resources
1719 * @res: pointer to array that contains the resources to free
1720 */
1721enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1722{
1723	struct ice_aqc_alloc_free_res_elem *buf;
1724	enum ice_status status;
1725	u16 buf_len;
1726
1727	buf_len = struct_size(buf, elem, num);
1728	buf = kzalloc(buf_len, GFP_KERNEL);
1729	if (!buf)
1730		return ICE_ERR_NO_MEMORY;
1731
1732	/* Prepare buffer to free resource. */
1733	buf->num_elems = cpu_to_le16(num);
1734	buf->res_type = cpu_to_le16(type);
1735	memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1736
1737	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1738				       ice_aqc_opc_free_res, NULL);
1739	if (status)
1740		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1741
1742	kfree(buf);
1743	return status;
1744}
1745
1746/**
1747 * ice_get_num_per_func - determine number of resources per PF
1748 * @hw: pointer to the HW structure
1749 * @max: value to be evenly split between each PF
1750 *
1751 * Determine the number of valid functions by going through the bitmap returned
1752 * from parsing capabilities and use this to calculate the number of resources
1753 * per PF based on the max value passed in.
1754 */
1755static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1756{
1757	u8 funcs;
1758
1759#define ICE_CAPS_VALID_FUNCS_M	0xFF
1760	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1761			 ICE_CAPS_VALID_FUNCS_M);
1762
1763	if (!funcs)
1764		return 0;
1765
1766	return max / funcs;
1767}
1768
1769/**
1770 * ice_parse_common_caps - parse common device/function capabilities
1771 * @hw: pointer to the HW struct
1772 * @caps: pointer to common capabilities structure
1773 * @elem: the capability element to parse
1774 * @prefix: message prefix for tracing capabilities
1775 *
1776 * Given a capability element, extract relevant details into the common
1777 * capability structure.
1778 *
1779 * Returns: true if the capability matches one of the common capability ids,
1780 * false otherwise.
1781 */
1782static bool
1783ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1784		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
1785{
1786	u32 logical_id = le32_to_cpu(elem->logical_id);
1787	u32 phys_id = le32_to_cpu(elem->phys_id);
1788	u32 number = le32_to_cpu(elem->number);
1789	u16 cap = le16_to_cpu(elem->cap);
1790	bool found = true;
1791
1792	switch (cap) {
1793	case ICE_AQC_CAPS_VALID_FUNCTIONS:
1794		caps->valid_functions = number;
1795		ice_debug(hw, ICE_DBG_INIT,
1796			  "%s: valid_functions (bitmap) = %d\n", prefix,
1797			  caps->valid_functions);
1798		break;
1799	case ICE_AQC_CAPS_SRIOV:
1800		caps->sr_iov_1_1 = (number == 1);
1801		ice_debug(hw, ICE_DBG_INIT,
1802			  "%s: sr_iov_1_1 = %d\n", prefix,
1803			  caps->sr_iov_1_1);
1804		break;
1805	case ICE_AQC_CAPS_DCB:
1806		caps->dcb = (number == 1);
1807		caps->active_tc_bitmap = logical_id;
1808		caps->maxtc = phys_id;
1809		ice_debug(hw, ICE_DBG_INIT,
1810			  "%s: dcb = %d\n", prefix, caps->dcb);
1811		ice_debug(hw, ICE_DBG_INIT,
1812			  "%s: active_tc_bitmap = %d\n", prefix,
1813			  caps->active_tc_bitmap);
1814		ice_debug(hw, ICE_DBG_INIT,
1815			  "%s: maxtc = %d\n", prefix, caps->maxtc);
1816		break;
1817	case ICE_AQC_CAPS_RSS:
1818		caps->rss_table_size = number;
1819		caps->rss_table_entry_width = logical_id;
1820		ice_debug(hw, ICE_DBG_INIT,
1821			  "%s: rss_table_size = %d\n", prefix,
1822			  caps->rss_table_size);
1823		ice_debug(hw, ICE_DBG_INIT,
1824			  "%s: rss_table_entry_width = %d\n", prefix,
1825			  caps->rss_table_entry_width);
1826		break;
1827	case ICE_AQC_CAPS_RXQS:
1828		caps->num_rxq = number;
1829		caps->rxq_first_id = phys_id;
1830		ice_debug(hw, ICE_DBG_INIT,
1831			  "%s: num_rxq = %d\n", prefix,
1832			  caps->num_rxq);
1833		ice_debug(hw, ICE_DBG_INIT,
1834			  "%s: rxq_first_id = %d\n", prefix,
1835			  caps->rxq_first_id);
1836		break;
1837	case ICE_AQC_CAPS_TXQS:
1838		caps->num_txq = number;
1839		caps->txq_first_id = phys_id;
1840		ice_debug(hw, ICE_DBG_INIT,
1841			  "%s: num_txq = %d\n", prefix,
1842			  caps->num_txq);
1843		ice_debug(hw, ICE_DBG_INIT,
1844			  "%s: txq_first_id = %d\n", prefix,
1845			  caps->txq_first_id);
1846		break;
1847	case ICE_AQC_CAPS_MSIX:
1848		caps->num_msix_vectors = number;
1849		caps->msix_vector_first_id = phys_id;
1850		ice_debug(hw, ICE_DBG_INIT,
1851			  "%s: num_msix_vectors = %d\n", prefix,
1852			  caps->num_msix_vectors);
1853		ice_debug(hw, ICE_DBG_INIT,
1854			  "%s: msix_vector_first_id = %d\n", prefix,
1855			  caps->msix_vector_first_id);
1856		break;
1857	case ICE_AQC_CAPS_PENDING_NVM_VER:
1858		caps->nvm_update_pending_nvm = true;
1859		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1860		break;
1861	case ICE_AQC_CAPS_PENDING_OROM_VER:
1862		caps->nvm_update_pending_orom = true;
1863		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
1864		break;
1865	case ICE_AQC_CAPS_PENDING_NET_VER:
1866		caps->nvm_update_pending_netlist = true;
1867		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
1868		break;
1869	case ICE_AQC_CAPS_NVM_MGMT:
1870		caps->nvm_unified_update =
1871			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1872			true : false;
1873		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1874			  caps->nvm_unified_update);
1875		break;
1876	case ICE_AQC_CAPS_MAX_MTU:
1877		caps->max_mtu = number;
1878		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1879			  prefix, caps->max_mtu);
1880		break;
1881	default:
1882		/* Not one of the recognized common capabilities */
1883		found = false;
1884	}
1885
1886	return found;
1887}
1888
1889/**
1890 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1891 * @hw: pointer to the HW structure
1892 * @caps: pointer to capabilities structure to fix
1893 *
1894 * Re-calculate the capabilities that are dependent on the number of physical
1895 * ports; i.e. some features are not supported or function differently on
1896 * devices with more than 4 ports.
1897 */
1898static void
1899ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1900{
1901	/* This assumes device capabilities are always scanned before function
1902	 * capabilities during the initialization flow.
1903	 */
1904	if (hw->dev_caps.num_funcs > 4) {
1905		/* Max 4 TCs per port */
1906		caps->maxtc = 4;
1907		ice_debug(hw, ICE_DBG_INIT,
1908			  "reducing maxtc to %d (based on #ports)\n",
1909			  caps->maxtc);
1910	}
1911}
1912
1913/**
1914 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
1915 * @hw: pointer to the HW struct
1916 * @func_p: pointer to function capabilities structure
1917 * @cap: pointer to the capability element to parse
1918 *
1919 * Extract function capabilities for ICE_AQC_CAPS_VF.
1920 */
1921static void
1922ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1923		       struct ice_aqc_list_caps_elem *cap)
1924{
1925	u32 logical_id = le32_to_cpu(cap->logical_id);
1926	u32 number = le32_to_cpu(cap->number);
1927
1928	func_p->num_allocd_vfs = number;
1929	func_p->vf_base_id = logical_id;
1930	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
1931		  func_p->num_allocd_vfs);
1932	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
1933		  func_p->vf_base_id);
1934}
1935
1936/**
1937 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
1938 * @hw: pointer to the HW struct
1939 * @func_p: pointer to function capabilities structure
1940 * @cap: pointer to the capability element to parse
1941 *
1942 * Extract function capabilities for ICE_AQC_CAPS_VSI.
1943 */
1944static void
1945ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1946			struct ice_aqc_list_caps_elem *cap)
1947{
1948	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
1949	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
1950		  le32_to_cpu(cap->number));
1951	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
1952		  func_p->guar_num_vsi);
1953}
1954
1955/**
1956 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
1957 * @hw: pointer to the HW struct
1958 * @func_p: pointer to function capabilities structure
1959 *
1960 * Extract function capabilities for ICE_AQC_CAPS_FD.
1961 */
1962static void
1963ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
1964{
1965	u32 reg_val, val;
1966
1967	reg_val = rd32(hw, GLQF_FD_SIZE);
1968	val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1969		GLQF_FD_SIZE_FD_GSIZE_S;
1970	func_p->fd_fltr_guar =
1971		ice_get_num_per_func(hw, val);
1972	val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1973		GLQF_FD_SIZE_FD_BSIZE_S;
1974	func_p->fd_fltr_best_effort = val;
1975
1976	ice_debug(hw, ICE_DBG_INIT,
1977		  "func caps: fd_fltr_guar = %d\n",
1978		  func_p->fd_fltr_guar);
1979	ice_debug(hw, ICE_DBG_INIT,
1980		  "func caps: fd_fltr_best_effort = %d\n",
1981		  func_p->fd_fltr_best_effort);
1982}
1983
1984/**
1985 * ice_parse_func_caps - Parse function capabilities
1986 * @hw: pointer to the HW struct
1987 * @func_p: pointer to function capabilities structure
1988 * @buf: buffer containing the function capability records
1989 * @cap_count: the number of capabilities
1990 *
1991 * Helper function to parse function (0x000A) capabilities list. For
1992 * capabilities shared between device and function, this relies on
1993 * ice_parse_common_caps.
1994 *
1995 * Loop through the list of provided capabilities and extract the relevant
1996 * data into the function capabilities structured.
1997 */
1998static void
1999ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2000		    void *buf, u32 cap_count)
2001{
2002	struct ice_aqc_list_caps_elem *cap_resp;
 
 
 
2003	u32 i;
2004
 
 
 
2005	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2006
2007	memset(func_p, 0, sizeof(*func_p));
 
 
 
 
 
 
 
 
 
2008
2009	for (i = 0; i < cap_count; i++) {
2010		u16 cap = le16_to_cpu(cap_resp[i].cap);
2011		bool found;
2012
2013		found = ice_parse_common_caps(hw, &func_p->common_cap,
2014					      &cap_resp[i], "func caps");
2015
2016		switch (cap) {
2017		case ICE_AQC_CAPS_VF:
2018			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2019			break;
2020		case ICE_AQC_CAPS_VSI:
2021			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
 
 
 
 
 
 
 
 
 
 
2022			break;
2023		case ICE_AQC_CAPS_FD:
2024			ice_parse_fdir_func_caps(hw, func_p);
 
 
 
 
 
 
 
2025			break;
2026		default:
2027			/* Don't list common capabilities as unknown */
2028			if (!found)
2029				ice_debug(hw, ICE_DBG_INIT,
2030					  "func caps: unknown capability[%d]: 0x%x\n",
2031					  i, cap);
 
 
2032			break;
2033		}
2034	}
2035
2036	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2037}
2038
2039/**
2040 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2041 * @hw: pointer to the HW struct
2042 * @dev_p: pointer to device capabilities structure
2043 * @cap: capability element to parse
2044 *
2045 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2046 */
2047static void
2048ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2049			      struct ice_aqc_list_caps_elem *cap)
2050{
2051	u32 number = le32_to_cpu(cap->number);
2052
2053	dev_p->num_funcs = hweight32(number);
2054	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2055		  dev_p->num_funcs);
2056}
2057
2058/**
2059 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2060 * @hw: pointer to the HW struct
2061 * @dev_p: pointer to device capabilities structure
2062 * @cap: capability element to parse
2063 *
2064 * Parse ICE_AQC_CAPS_VF for device capabilities.
2065 */
2066static void
2067ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2068		      struct ice_aqc_list_caps_elem *cap)
2069{
2070	u32 number = le32_to_cpu(cap->number);
2071
2072	dev_p->num_vfs_exposed = number;
2073	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2074		  dev_p->num_vfs_exposed);
2075}
2076
2077/**
2078 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2079 * @hw: pointer to the HW struct
2080 * @dev_p: pointer to device capabilities structure
2081 * @cap: capability element to parse
2082 *
2083 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2084 */
2085static void
2086ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2087		       struct ice_aqc_list_caps_elem *cap)
2088{
2089	u32 number = le32_to_cpu(cap->number);
2090
2091	dev_p->num_vsi_allocd_to_host = number;
2092	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2093		  dev_p->num_vsi_allocd_to_host);
2094}
2095
2096/**
2097 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2098 * @hw: pointer to the HW struct
2099 * @dev_p: pointer to device capabilities structure
2100 * @cap: capability element to parse
2101 *
2102 * Parse ICE_AQC_CAPS_FD for device capabilities.
2103 */
2104static void
2105ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2106			struct ice_aqc_list_caps_elem *cap)
2107{
2108	u32 number = le32_to_cpu(cap->number);
2109
2110	dev_p->num_flow_director_fltr = number;
2111	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2112		  dev_p->num_flow_director_fltr);
2113}
2114
2115/**
2116 * ice_parse_dev_caps - Parse device capabilities
2117 * @hw: pointer to the HW struct
2118 * @dev_p: pointer to device capabilities structure
2119 * @buf: buffer containing the device capability records
2120 * @cap_count: the number of capabilities
2121 *
2122 * Helper device to parse device (0x000B) capabilities list. For
2123 * capabilities shared between device and function, this relies on
2124 * ice_parse_common_caps.
2125 *
2126 * Loop through the list of provided capabilities and extract the relevant
2127 * data into the device capabilities structured.
2128 */
2129static void
2130ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2131		   void *buf, u32 cap_count)
2132{
2133	struct ice_aqc_list_caps_elem *cap_resp;
2134	u32 i;
2135
2136	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2137
2138	memset(dev_p, 0, sizeof(*dev_p));
2139
2140	for (i = 0; i < cap_count; i++) {
2141		u16 cap = le16_to_cpu(cap_resp[i].cap);
2142		bool found;
2143
2144		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2145					      &cap_resp[i], "dev caps");
2146
2147		switch (cap) {
2148		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2149			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2150			break;
2151		case ICE_AQC_CAPS_VF:
2152			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
 
 
 
 
 
 
 
2153			break;
2154		case ICE_AQC_CAPS_VSI:
2155			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2156			break;
2157		case  ICE_AQC_CAPS_FD:
2158			ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
 
 
 
 
 
2159			break;
2160		default:
2161			/* Don't list common capabilities as unknown */
2162			if (!found)
2163				ice_debug(hw, ICE_DBG_INIT,
2164					  "dev caps: unknown capability[%d]: 0x%x\n",
2165					  i, cap);
2166			break;
2167		}
2168	}
2169
2170	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2171}
2172
2173/**
2174 * ice_aq_list_caps - query function/device capabilities
2175 * @hw: pointer to the HW struct
2176 * @buf: a buffer to hold the capabilities
2177 * @buf_size: size of the buffer
2178 * @cap_count: if not NULL, set to the number of capabilities reported
2179 * @opc: capabilities type to discover, device or function
2180 * @cd: pointer to command details structure or NULL
2181 *
2182 * Get the function (0x000A) or device (0x000B) capabilities description from
2183 * firmware and store it in the buffer.
2184 *
2185 * If the cap_count pointer is not NULL, then it is set to the number of
2186 * capabilities firmware will report. Note that if the buffer size is too
2187 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2188 * cap_count will still be updated in this case. It is recommended that the
2189 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2190 * firmware could return) to avoid this.
2191 */
2192enum ice_status
2193ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2194		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2195{
2196	struct ice_aqc_list_caps *cmd;
2197	struct ice_aq_desc desc;
2198	enum ice_status status;
2199
2200	cmd = &desc.params.get_cap;
2201
2202	if (opc != ice_aqc_opc_list_func_caps &&
2203	    opc != ice_aqc_opc_list_dev_caps)
2204		return ICE_ERR_PARAM;
2205
2206	ice_fill_dflt_direct_cmd_desc(&desc, opc);
 
2207	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2208
2209	if (cap_count)
2210		*cap_count = le32_to_cpu(cmd->count);
2211
2212	return status;
2213}
2214
2215/**
2216 * ice_discover_dev_caps - Read and extract device capabilities
2217 * @hw: pointer to the hardware structure
2218 * @dev_caps: pointer to device capabilities structure
2219 *
2220 * Read the device capabilities and extract them into the dev_caps structure
2221 * for later use.
2222 */
2223enum ice_status
2224ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2225{
2226	enum ice_status status;
2227	u32 cap_count = 0;
2228	void *cbuf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2229
2230	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2231	if (!cbuf)
2232		return ICE_ERR_NO_MEMORY;
2233
2234	/* Although the driver doesn't know the number of capabilities the
2235	 * device will return, we can simply send a 4KB buffer, the maximum
2236	 * possible size that firmware can return.
2237	 */
2238	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2239
2240	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2241				  ice_aqc_opc_list_dev_caps, NULL);
2242	if (!status)
2243		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2244	kfree(cbuf);
2245
2246	return status;
2247}
2248
2249/**
2250 * ice_discover_func_caps - Read and extract function capabilities
2251 * @hw: pointer to the hardware structure
2252 * @func_caps: pointer to function capabilities structure
2253 *
2254 * Read the function capabilities and extract them into the func_caps structure
2255 * for later use.
2256 */
2257static enum ice_status
2258ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2259{
2260	enum ice_status status;
2261	u32 cap_count = 0;
2262	void *cbuf;
2263
2264	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2265	if (!cbuf)
2266		return ICE_ERR_NO_MEMORY;
2267
2268	/* Although the driver doesn't know the number of capabilities the
2269	 * device will return, we can simply send a 4KB buffer, the maximum
2270	 * possible size that firmware can return.
2271	 */
2272	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2273
2274	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2275				  ice_aqc_opc_list_func_caps, NULL);
2276	if (!status)
2277		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2278	kfree(cbuf);
2279
2280	return status;
2281}
2282
2283/**
2284 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2285 * @hw: pointer to the hardware structure
2286 */
2287void ice_set_safe_mode_caps(struct ice_hw *hw)
2288{
2289	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2290	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2291	struct ice_hw_common_caps cached_caps;
2292	u32 num_funcs;
2293
2294	/* cache some func_caps values that should be restored after memset */
2295	cached_caps = func_caps->common_cap;
2296
2297	/* unset func capabilities */
2298	memset(func_caps, 0, sizeof(*func_caps));
2299
2300#define ICE_RESTORE_FUNC_CAP(name) \
2301	func_caps->common_cap.name = cached_caps.name
2302
2303	/* restore cached values */
2304	ICE_RESTORE_FUNC_CAP(valid_functions);
2305	ICE_RESTORE_FUNC_CAP(txq_first_id);
2306	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2307	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2308	ICE_RESTORE_FUNC_CAP(max_mtu);
2309	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2310	ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2311	ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2312	ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2313
2314	/* one Tx and one Rx queue in safe mode */
2315	func_caps->common_cap.num_rxq = 1;
2316	func_caps->common_cap.num_txq = 1;
2317
2318	/* two MSIX vectors, one for traffic and one for misc causes */
2319	func_caps->common_cap.num_msix_vectors = 2;
2320	func_caps->guar_num_vsi = 1;
2321
2322	/* cache some dev_caps values that should be restored after memset */
2323	cached_caps = dev_caps->common_cap;
2324	num_funcs = dev_caps->num_funcs;
2325
2326	/* unset dev capabilities */
2327	memset(dev_caps, 0, sizeof(*dev_caps));
2328
2329#define ICE_RESTORE_DEV_CAP(name) \
2330	dev_caps->common_cap.name = cached_caps.name
2331
2332	/* restore cached values */
2333	ICE_RESTORE_DEV_CAP(valid_functions);
2334	ICE_RESTORE_DEV_CAP(txq_first_id);
2335	ICE_RESTORE_DEV_CAP(rxq_first_id);
2336	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2337	ICE_RESTORE_DEV_CAP(max_mtu);
2338	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2339	ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2340	ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2341	ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2342	dev_caps->num_funcs = num_funcs;
2343
2344	/* one Tx and one Rx queue per function in safe mode */
2345	dev_caps->common_cap.num_rxq = num_funcs;
2346	dev_caps->common_cap.num_txq = num_funcs;
2347
2348	/* two MSIX vectors per function */
2349	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2350}
2351
2352/**
2353 * ice_get_caps - get info about the HW
2354 * @hw: pointer to the hardware structure
2355 */
2356enum ice_status ice_get_caps(struct ice_hw *hw)
2357{
2358	enum ice_status status;
2359
2360	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2361	if (status)
2362		return status;
2363
2364	return ice_discover_func_caps(hw, &hw->func_caps);
2365}
2366
2367/**
2368 * ice_aq_manage_mac_write - manage MAC address write command
2369 * @hw: pointer to the HW struct
2370 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2371 * @flags: flags to control write behavior
2372 * @cd: pointer to command details structure or NULL
2373 *
2374 * This function is used to write MAC address to the NVM (0x0108).
2375 */
2376enum ice_status
2377ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2378			struct ice_sq_cd *cd)
2379{
2380	struct ice_aqc_manage_mac_write *cmd;
2381	struct ice_aq_desc desc;
2382
2383	cmd = &desc.params.mac_write;
2384	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2385
2386	cmd->flags = flags;
2387	ether_addr_copy(cmd->mac_addr, mac_addr);
 
 
 
2388
2389	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2390}
2391
2392/**
2393 * ice_aq_clear_pxe_mode
2394 * @hw: pointer to the HW struct
2395 *
2396 * Tell the firmware that the driver is taking over from PXE (0x0110).
2397 */
2398static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2399{
2400	struct ice_aq_desc desc;
2401
2402	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2403	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2404
2405	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2406}
2407
2408/**
2409 * ice_clear_pxe_mode - clear pxe operations mode
2410 * @hw: pointer to the HW struct
2411 *
2412 * Make sure all PXE mode settings are cleared, including things
2413 * like descriptor fetch/write-back mode.
2414 */
2415void ice_clear_pxe_mode(struct ice_hw *hw)
2416{
2417	if (ice_check_sq_alive(hw, &hw->adminq))
2418		ice_aq_clear_pxe_mode(hw);
2419}
2420
2421/**
2422 * ice_get_link_speed_based_on_phy_type - returns link speed
2423 * @phy_type_low: lower part of phy_type
2424 * @phy_type_high: higher part of phy_type
2425 *
2426 * This helper function will convert an entry in PHY type structure
2427 * [phy_type_low, phy_type_high] to its corresponding link speed.
2428 * Note: In the structure of [phy_type_low, phy_type_high], there should
2429 * be one bit set, as this function will convert one PHY type to its
2430 * speed.
2431 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2432 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2433 */
2434static u16
2435ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2436{
2437	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2438	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2439
2440	switch (phy_type_low) {
2441	case ICE_PHY_TYPE_LOW_100BASE_TX:
2442	case ICE_PHY_TYPE_LOW_100M_SGMII:
2443		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2444		break;
2445	case ICE_PHY_TYPE_LOW_1000BASE_T:
2446	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2447	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2448	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2449	case ICE_PHY_TYPE_LOW_1G_SGMII:
2450		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2451		break;
2452	case ICE_PHY_TYPE_LOW_2500BASE_T:
2453	case ICE_PHY_TYPE_LOW_2500BASE_X:
2454	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2455		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2456		break;
2457	case ICE_PHY_TYPE_LOW_5GBASE_T:
2458	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2459		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2460		break;
2461	case ICE_PHY_TYPE_LOW_10GBASE_T:
2462	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2463	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2464	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2465	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2466	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2467	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2468		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2469		break;
2470	case ICE_PHY_TYPE_LOW_25GBASE_T:
2471	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2472	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2473	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2474	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2475	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2476	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2477	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2478	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2479	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2480	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2481		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2482		break;
2483	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2484	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2485	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2486	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2487	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2488	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2489		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2490		break;
2491	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2492	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2493	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2494	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2495	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2496	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2497	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2498	case ICE_PHY_TYPE_LOW_50G_AUI2:
2499	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2500	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2501	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2502	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2503	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2504	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2505	case ICE_PHY_TYPE_LOW_50G_AUI1:
2506		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2507		break;
2508	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2509	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2510	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2511	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2512	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2513	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2514	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2515	case ICE_PHY_TYPE_LOW_100G_AUI4:
2516	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2517	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2518	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2519	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2520	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2521		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2522		break;
2523	default:
2524		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2525		break;
2526	}
2527
2528	switch (phy_type_high) {
2529	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2530	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2531	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2532	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2533	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2534		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2535		break;
2536	default:
2537		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2538		break;
2539	}
2540
2541	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2542	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2543		return ICE_AQ_LINK_SPEED_UNKNOWN;
2544	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2545		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2546		return ICE_AQ_LINK_SPEED_UNKNOWN;
2547	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2548		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2549		return speed_phy_type_low;
2550	else
2551		return speed_phy_type_high;
2552}
2553
2554/**
2555 * ice_update_phy_type
2556 * @phy_type_low: pointer to the lower part of phy_type
2557 * @phy_type_high: pointer to the higher part of phy_type
2558 * @link_speeds_bitmap: targeted link speeds bitmap
2559 *
2560 * Note: For the link_speeds_bitmap structure, you can check it at
2561 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2562 * link_speeds_bitmap include multiple speeds.
2563 *
2564 * Each entry in this [phy_type_low, phy_type_high] structure will
2565 * present a certain link speed. This helper function will turn on bits
2566 * in [phy_type_low, phy_type_high] structure based on the value of
2567 * link_speeds_bitmap input parameter.
2568 */
2569void
2570ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2571		    u16 link_speeds_bitmap)
2572{
2573	u64 pt_high;
2574	u64 pt_low;
2575	int index;
2576	u16 speed;
2577
2578	/* We first check with low part of phy_type */
2579	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2580		pt_low = BIT_ULL(index);
2581		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2582
2583		if (link_speeds_bitmap & speed)
2584			*phy_type_low |= BIT_ULL(index);
2585	}
2586
2587	/* We then check with high part of phy_type */
2588	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2589		pt_high = BIT_ULL(index);
2590		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2591
2592		if (link_speeds_bitmap & speed)
2593			*phy_type_high |= BIT_ULL(index);
2594	}
2595}
2596
2597/**
2598 * ice_aq_set_phy_cfg
2599 * @hw: pointer to the HW struct
2600 * @pi: port info structure of the interested logical port
2601 * @cfg: structure with PHY configuration data to be set
2602 * @cd: pointer to command details structure or NULL
2603 *
2604 * Set the various PHY configuration parameters supported on the Port.
2605 * One or more of the Set PHY config parameters may be ignored in an MFP
2606 * mode as the PF may not have the privilege to set some of the PHY Config
2607 * parameters. This status will be indicated by the command response (0x0601).
2608 */
2609enum ice_status
2610ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2611		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2612{
 
2613	struct ice_aq_desc desc;
2614	enum ice_status status;
2615
2616	if (!cfg)
2617		return ICE_ERR_PARAM;
2618
2619	/* Ensure that only valid bits of cfg->caps can be turned on. */
2620	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2621		ice_debug(hw, ICE_DBG_PHY,
2622			  "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2623			  cfg->caps);
2624
2625		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2626	}
2627
2628	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2629	desc.params.set_phy.lport_num = pi->lport;
2630	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2631
2632	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2633	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
2634		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2635	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
2636		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2637	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
2638	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
2639		  cfg->low_power_ctrl_an);
2640	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
2641	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
2642	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
2643		  cfg->link_fec_opt);
2644
2645	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2646	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2647		status = 0;
2648
2649	if (!status)
2650		pi->phy.curr_user_phy_cfg = *cfg;
2651
2652	return status;
2653}
2654
2655/**
2656 * ice_update_link_info - update status of the HW network link
2657 * @pi: port info structure of the interested logical port
2658 */
2659enum ice_status ice_update_link_info(struct ice_port_info *pi)
 
2660{
2661	struct ice_link_status *li;
 
2662	enum ice_status status;
 
2663
2664	if (!pi)
2665		return ICE_ERR_PARAM;
2666
2667	li = &pi->phy.link_info;
 
 
 
 
2668
 
2669	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2670	if (status)
2671		return status;
2672
2673	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2674		struct ice_aqc_get_phy_caps_data *pcaps;
2675		struct ice_hw *hw;
2676
2677		hw = pi->hw;
2678		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2679				     GFP_KERNEL);
2680		if (!pcaps)
2681			return ICE_ERR_NO_MEMORY;
2682
2683		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
 
2684					     pcaps, NULL);
 
 
2685
2686		devm_kfree(ice_hw_to_dev(hw), pcaps);
 
2687	}
2688
 
2689	return status;
2690}
2691
2692/**
2693 * ice_cache_phy_user_req
2694 * @pi: port information structure
2695 * @cache_data: PHY logging data
2696 * @cache_mode: PHY logging mode
2697 *
2698 * Log the user request on (FC, FEC, SPEED) for later use.
2699 */
2700static void
2701ice_cache_phy_user_req(struct ice_port_info *pi,
2702		       struct ice_phy_cache_mode_data cache_data,
2703		       enum ice_phy_cache_mode cache_mode)
2704{
2705	if (!pi)
2706		return;
2707
2708	switch (cache_mode) {
2709	case ICE_FC_MODE:
2710		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2711		break;
2712	case ICE_SPEED_MODE:
2713		pi->phy.curr_user_speed_req =
2714			cache_data.data.curr_user_speed_req;
2715		break;
2716	case ICE_FEC_MODE:
2717		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2718		break;
2719	default:
2720		break;
2721	}
2722}
2723
2724/**
2725 * ice_caps_to_fc_mode
2726 * @caps: PHY capabilities
2727 *
2728 * Convert PHY FC capabilities to ice FC mode
2729 */
2730enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2731{
2732	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2733	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2734		return ICE_FC_FULL;
2735
2736	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2737		return ICE_FC_TX_PAUSE;
2738
2739	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2740		return ICE_FC_RX_PAUSE;
2741
2742	return ICE_FC_NONE;
2743}
2744
2745/**
2746 * ice_caps_to_fec_mode
2747 * @caps: PHY capabilities
2748 * @fec_options: Link FEC options
2749 *
2750 * Convert PHY FEC capabilities to ice FEC mode
2751 */
2752enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2753{
2754	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2755		return ICE_FEC_AUTO;
2756
2757	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2758			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2759			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2760			   ICE_AQC_PHY_FEC_25G_KR_REQ))
2761		return ICE_FEC_BASER;
2762
2763	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2764			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2765			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2766		return ICE_FEC_RS;
2767
2768	return ICE_FEC_NONE;
2769}
2770
2771/**
2772 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2773 * @pi: port information structure
2774 * @cfg: PHY configuration data to set FC mode
2775 * @req_mode: FC mode to configure
2776 */
2777enum ice_status
2778ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2779	       enum ice_fc_mode req_mode)
2780{
2781	struct ice_phy_cache_mode_data cache_data;
 
 
2782	u8 pause_mask = 0x0;
 
2783
2784	if (!pi || !cfg)
2785		return ICE_ERR_BAD_PTR;
 
 
2786
2787	switch (req_mode) {
2788	case ICE_FC_FULL:
2789		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2790		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2791		break;
2792	case ICE_FC_RX_PAUSE:
2793		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2794		break;
2795	case ICE_FC_TX_PAUSE:
2796		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2797		break;
2798	default:
2799		break;
2800	}
2801
2802	/* clear the old pause settings */
2803	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2804		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2805
2806	/* set the new capabilities */
2807	cfg->caps |= pause_mask;
2808
2809	/* Cache user FC request */
2810	cache_data.data.curr_user_fc_req = req_mode;
2811	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2812
2813	return 0;
2814}
2815
2816/**
2817 * ice_set_fc
2818 * @pi: port information structure
2819 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2820 * @ena_auto_link_update: enable automatic link update
2821 *
2822 * Set the requested flow control mode.
2823 */
2824enum ice_status
2825ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2826{
2827	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2828	struct ice_aqc_get_phy_caps_data *pcaps;
2829	enum ice_status status;
2830	struct ice_hw *hw;
2831
2832	if (!pi || !aq_failures)
2833		return ICE_ERR_BAD_PTR;
2834
2835	*aq_failures = 0;
2836	hw = pi->hw;
2837
2838	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2839	if (!pcaps)
2840		return ICE_ERR_NO_MEMORY;
2841
2842	/* Get the current PHY config */
2843	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2844				     NULL);
2845	if (status) {
2846		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2847		goto out;
2848	}
2849
2850	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2851
2852	/* Configure the set PHY data */
2853	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2854	if (status)
2855		goto out;
2856
2857	/* If the capabilities have changed, then set the new config */
2858	if (cfg.caps != pcaps->caps) {
2859		int retry_count, retry_max = 10;
2860
2861		/* Auto restart link so settings take effect */
2862		if (ena_auto_link_update)
2863			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
 
 
 
 
 
 
2864
2865		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2866		if (status) {
2867			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2868			goto out;
2869		}
2870
2871		/* Update the link info
2872		 * It sometimes takes a really long time for link to
2873		 * come back from the atomic reset. Thus, we wait a
2874		 * little bit.
2875		 */
2876		for (retry_count = 0; retry_count < retry_max; retry_count++) {
2877			status = ice_update_link_info(pi);
2878
2879			if (!status)
2880				break;
2881
2882			mdelay(100);
2883		}
2884
2885		if (status)
2886			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2887	}
2888
2889out:
2890	devm_kfree(ice_hw_to_dev(hw), pcaps);
2891	return status;
2892}
2893
2894/**
2895 * ice_phy_caps_equals_cfg
2896 * @phy_caps: PHY capabilities
2897 * @phy_cfg: PHY configuration
2898 *
2899 * Helper function to determine if PHY capabilities matches PHY
2900 * configuration
2901 */
2902bool
2903ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2904			struct ice_aqc_set_phy_cfg_data *phy_cfg)
2905{
2906	u8 caps_mask, cfg_mask;
2907
2908	if (!phy_caps || !phy_cfg)
2909		return false;
2910
2911	/* These bits are not common between capabilities and configuration.
2912	 * Do not use them to determine equality.
2913	 */
2914	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2915					      ICE_AQC_GET_PHY_EN_MOD_QUAL);
2916	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2917
2918	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2919	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2920	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2921	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2922	    phy_caps->eee_cap != phy_cfg->eee_cap ||
2923	    phy_caps->eeer_value != phy_cfg->eeer_value ||
2924	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2925		return false;
2926
2927	return true;
2928}
2929
2930/**
2931 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2932 * @pi: port information structure
2933 * @caps: PHY ability structure to copy date from
2934 * @cfg: PHY configuration structure to copy data to
2935 *
2936 * Helper function to copy AQC PHY get ability data to PHY set configuration
2937 * data structure
2938 */
2939void
2940ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2941			 struct ice_aqc_get_phy_caps_data *caps,
2942			 struct ice_aqc_set_phy_cfg_data *cfg)
2943{
2944	if (!pi || !caps || !cfg)
2945		return;
2946
2947	memset(cfg, 0, sizeof(*cfg));
2948	cfg->phy_type_low = caps->phy_type_low;
2949	cfg->phy_type_high = caps->phy_type_high;
2950	cfg->caps = caps->caps;
2951	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2952	cfg->eee_cap = caps->eee_cap;
2953	cfg->eeer_value = caps->eeer_value;
2954	cfg->link_fec_opt = caps->link_fec_options;
2955	cfg->module_compliance_enforcement =
2956		caps->module_compliance_enforcement;
2957
2958	if (ice_fw_supports_link_override(pi->hw)) {
2959		struct ice_link_default_override_tlv tlv;
2960
2961		if (ice_get_link_default_override(&tlv, pi))
2962			return;
2963
2964		if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2965			cfg->module_compliance_enforcement |=
2966				ICE_LINK_OVERRIDE_STRICT_MODE;
2967	}
2968}
2969
2970/**
2971 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2972 * @pi: port information structure
2973 * @cfg: PHY configuration data to set FEC mode
2974 * @fec: FEC mode to configure
2975 */
2976enum ice_status
2977ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2978		enum ice_fec_mode fec)
2979{
2980	struct ice_aqc_get_phy_caps_data *pcaps;
2981	enum ice_status status;
2982
2983	if (!pi || !cfg)
2984		return ICE_ERR_BAD_PTR;
2985
2986	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2987	if (!pcaps)
2988		return ICE_ERR_NO_MEMORY;
2989
2990	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2991				     NULL);
2992	if (status)
2993		goto out;
2994
2995	cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2996	cfg->link_fec_opt = pcaps->link_fec_options;
2997
2998	switch (fec) {
2999	case ICE_FEC_BASER:
3000		/* Clear RS bits, and AND BASE-R ability
3001		 * bits and OR request bits.
3002		 */
3003		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3004			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3005		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3006			ICE_AQC_PHY_FEC_25G_KR_REQ;
3007		break;
3008	case ICE_FEC_RS:
3009		/* Clear BASE-R bits, and AND RS ability
3010		 * bits and OR request bits.
3011		 */
3012		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3013		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3014			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3015		break;
3016	case ICE_FEC_NONE:
3017		/* Clear all FEC option bits. */
3018		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3019		break;
3020	case ICE_FEC_AUTO:
3021		/* AND auto FEC bit, and all caps bits. */
3022		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3023		cfg->link_fec_opt |= pcaps->link_fec_options;
3024		break;
3025	default:
3026		status = ICE_ERR_PARAM;
3027		break;
3028	}
3029
3030	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3031		struct ice_link_default_override_tlv tlv;
3032
3033		if (ice_get_link_default_override(&tlv, pi))
3034			goto out;
3035
3036		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3037		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3038			cfg->link_fec_opt = tlv.fec_options;
3039	}
3040
3041out:
3042	kfree(pcaps);
3043
3044	return status;
3045}
3046
3047/**
3048 * ice_get_link_status - get status of the HW network link
3049 * @pi: port information structure
3050 * @link_up: pointer to bool (true/false = linkup/linkdown)
3051 *
3052 * Variable link_up is true if link is up, false if link is down.
3053 * The variable link_up is invalid if status is non zero. As a
3054 * result of this call, link status reporting becomes enabled
3055 */
3056enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3057{
3058	struct ice_phy_info *phy_info;
3059	enum ice_status status = 0;
3060
3061	if (!pi || !link_up)
3062		return ICE_ERR_PARAM;
3063
3064	phy_info = &pi->phy;
3065
3066	if (phy_info->get_link_info) {
3067		status = ice_update_link_info(pi);
3068
3069		if (status)
3070			ice_debug(pi->hw, ICE_DBG_LINK,
3071				  "get link status error, status = %d\n",
3072				  status);
3073	}
3074
3075	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3076
3077	return status;
3078}
3079
3080/**
3081 * ice_aq_set_link_restart_an
3082 * @pi: pointer to the port information structure
3083 * @ena_link: if true: enable link, if false: disable link
3084 * @cd: pointer to command details structure or NULL
3085 *
3086 * Sets up the link and restarts the Auto-Negotiation over the link.
3087 */
3088enum ice_status
3089ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3090			   struct ice_sq_cd *cd)
3091{
3092	struct ice_aqc_restart_an *cmd;
3093	struct ice_aq_desc desc;
3094
3095	cmd = &desc.params.restart_an;
3096
3097	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3098
3099	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3100	cmd->lport_num = pi->lport;
3101	if (ena_link)
3102		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3103	else
3104		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3105
3106	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3107}
3108
3109/**
3110 * ice_aq_set_event_mask
3111 * @hw: pointer to the HW struct
3112 * @port_num: port number of the physical function
3113 * @mask: event mask to be set
3114 * @cd: pointer to command details structure or NULL
3115 *
3116 * Set event mask (0x0613)
3117 */
3118enum ice_status
3119ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3120		      struct ice_sq_cd *cd)
3121{
3122	struct ice_aqc_set_event_mask *cmd;
3123	struct ice_aq_desc desc;
3124
3125	cmd = &desc.params.set_event_mask;
3126
3127	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3128
3129	cmd->lport_num = port_num;
3130
3131	cmd->event_mask = cpu_to_le16(mask);
3132	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3133}
3134
3135/**
3136 * ice_aq_set_mac_loopback
3137 * @hw: pointer to the HW struct
3138 * @ena_lpbk: Enable or Disable loopback
3139 * @cd: pointer to command details structure or NULL
3140 *
3141 * Enable/disable loopback on a given port
3142 */
3143enum ice_status
3144ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3145{
3146	struct ice_aqc_set_mac_lb *cmd;
3147	struct ice_aq_desc desc;
3148
3149	cmd = &desc.params.set_mac_lb;
3150
3151	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3152	if (ena_lpbk)
3153		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3154
3155	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3156}
3157
3158/**
3159 * ice_aq_set_port_id_led
3160 * @pi: pointer to the port information
3161 * @is_orig_mode: is this LED set to original mode (by the net-list)
3162 * @cd: pointer to command details structure or NULL
3163 *
3164 * Set LED value for the given port (0x06e9)
3165 */
3166enum ice_status
3167ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3168		       struct ice_sq_cd *cd)
3169{
3170	struct ice_aqc_set_port_id_led *cmd;
3171	struct ice_hw *hw = pi->hw;
3172	struct ice_aq_desc desc;
3173
3174	cmd = &desc.params.set_port_id_led;
3175
3176	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3177
3178	if (is_orig_mode)
3179		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3180	else
3181		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3182
3183	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3184}
3185
3186/**
3187 * ice_aq_sff_eeprom
3188 * @hw: pointer to the HW struct
3189 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3190 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3191 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3192 * @page: QSFP page
3193 * @set_page: set or ignore the page
3194 * @data: pointer to data buffer to be read/written to the I2C device.
3195 * @length: 1-16 for read, 1 for write.
3196 * @write: 0 read, 1 for write.
3197 * @cd: pointer to command details structure or NULL
3198 *
3199 * Read/Write SFF EEPROM (0x06EE)
3200 */
3201enum ice_status
3202ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3203		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3204		  bool write, struct ice_sq_cd *cd)
3205{
3206	struct ice_aqc_sff_eeprom *cmd;
3207	struct ice_aq_desc desc;
3208	enum ice_status status;
3209
3210	if (!data || (mem_addr & 0xff00))
3211		return ICE_ERR_PARAM;
3212
3213	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3214	cmd = &desc.params.read_write_sff_param;
3215	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3216	cmd->lport_num = (u8)(lport & 0xff);
3217	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3218	cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3219					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3220					((set_page <<
3221					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3222					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3223	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3224	cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3225	if (write)
3226		cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3227
3228	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3229	return status;
3230}
3231
3232/**
3233 * __ice_aq_get_set_rss_lut
3234 * @hw: pointer to the hardware structure
3235 * @vsi_id: VSI FW index
3236 * @lut_type: LUT table type
3237 * @lut: pointer to the LUT buffer provided by the caller
3238 * @lut_size: size of the LUT buffer
3239 * @glob_lut_idx: global LUT index
3240 * @set: set true to set the table, false to get the table
3241 *
3242 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3243 */
3244static enum ice_status
3245__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3246			 u16 lut_size, u8 glob_lut_idx, bool set)
3247{
3248	struct ice_aqc_get_set_rss_lut *cmd_resp;
3249	struct ice_aq_desc desc;
3250	enum ice_status status;
3251	u16 flags = 0;
3252
3253	cmd_resp = &desc.params.get_set_rss_lut;
3254
3255	if (set) {
3256		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3257		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3258	} else {
3259		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3260	}
3261
3262	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3263					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3264					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3265				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3266
3267	switch (lut_type) {
3268	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3269	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3270	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3271		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3272			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3273		break;
3274	default:
3275		status = ICE_ERR_PARAM;
3276		goto ice_aq_get_set_rss_lut_exit;
3277	}
3278
3279	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3280		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3281			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3282
3283		if (!set)
3284			goto ice_aq_get_set_rss_lut_send;
3285	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3286		if (!set)
3287			goto ice_aq_get_set_rss_lut_send;
3288	} else {
3289		goto ice_aq_get_set_rss_lut_send;
3290	}
3291
3292	/* LUT size is only valid for Global and PF table types */
3293	switch (lut_size) {
3294	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3295		break;
3296	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
 
3297		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3298			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3299			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3300		break;
3301	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3302		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3303			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3304				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3305				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3306			break;
3307		}
3308		fallthrough;
3309	default:
3310		status = ICE_ERR_PARAM;
3311		goto ice_aq_get_set_rss_lut_exit;
3312	}
3313
3314ice_aq_get_set_rss_lut_send:
3315	cmd_resp->flags = cpu_to_le16(flags);
3316	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3317
3318ice_aq_get_set_rss_lut_exit:
3319	return status;
3320}
3321
3322/**
3323 * ice_aq_get_rss_lut
3324 * @hw: pointer to the hardware structure
3325 * @vsi_handle: software VSI handle
3326 * @lut_type: LUT table type
3327 * @lut: pointer to the LUT buffer provided by the caller
3328 * @lut_size: size of the LUT buffer
3329 *
3330 * get the RSS lookup table, PF or VSI type
3331 */
3332enum ice_status
3333ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3334		   u8 *lut, u16 lut_size)
3335{
3336	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3337		return ICE_ERR_PARAM;
3338
3339	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3340					lut_type, lut, lut_size, 0, false);
3341}
3342
3343/**
3344 * ice_aq_set_rss_lut
3345 * @hw: pointer to the hardware structure
3346 * @vsi_handle: software VSI handle
3347 * @lut_type: LUT table type
3348 * @lut: pointer to the LUT buffer provided by the caller
3349 * @lut_size: size of the LUT buffer
3350 *
3351 * set the RSS lookup table, PF or VSI type
3352 */
3353enum ice_status
3354ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3355		   u8 *lut, u16 lut_size)
3356{
3357	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3358		return ICE_ERR_PARAM;
3359
3360	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3361					lut_type, lut, lut_size, 0, true);
3362}
3363
3364/**
3365 * __ice_aq_get_set_rss_key
3366 * @hw: pointer to the HW struct
3367 * @vsi_id: VSI FW index
3368 * @key: pointer to key info struct
3369 * @set: set true to set the key, false to get the key
3370 *
3371 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3372 */
3373static enum
3374ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3375				    struct ice_aqc_get_set_rss_keys *key,
3376				    bool set)
3377{
3378	struct ice_aqc_get_set_rss_key *cmd_resp;
3379	u16 key_size = sizeof(*key);
3380	struct ice_aq_desc desc;
3381
3382	cmd_resp = &desc.params.get_set_rss_key;
3383
3384	if (set) {
3385		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3386		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3387	} else {
3388		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3389	}
3390
3391	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3392					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3393					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3394				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3395
3396	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3397}
3398
3399/**
3400 * ice_aq_get_rss_key
3401 * @hw: pointer to the HW struct
3402 * @vsi_handle: software VSI handle
3403 * @key: pointer to key info struct
3404 *
3405 * get the RSS key per VSI
3406 */
3407enum ice_status
3408ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3409		   struct ice_aqc_get_set_rss_keys *key)
3410{
3411	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3412		return ICE_ERR_PARAM;
3413
3414	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3415					key, false);
3416}
3417
3418/**
3419 * ice_aq_set_rss_key
3420 * @hw: pointer to the HW struct
3421 * @vsi_handle: software VSI handle
3422 * @keys: pointer to key info struct
3423 *
3424 * set the RSS key per VSI
3425 */
3426enum ice_status
3427ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3428		   struct ice_aqc_get_set_rss_keys *keys)
3429{
3430	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3431		return ICE_ERR_PARAM;
3432
3433	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3434					keys, true);
3435}
3436
3437/**
3438 * ice_aq_add_lan_txq
3439 * @hw: pointer to the hardware structure
3440 * @num_qgrps: Number of added queue groups
3441 * @qg_list: list of queue groups to be added
3442 * @buf_size: size of buffer for indirect command
3443 * @cd: pointer to command details structure or NULL
3444 *
3445 * Add Tx LAN queue (0x0C30)
3446 *
3447 * NOTE:
3448 * Prior to calling add Tx LAN queue:
3449 * Initialize the following as part of the Tx queue context:
3450 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3451 * Cache profile and Packet shaper profile.
3452 *
3453 * After add Tx LAN queue AQ command is completed:
3454 * Interrupts should be associated with specific queues,
3455 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3456 * flow.
3457 */
3458static enum ice_status
3459ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3460		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3461		   struct ice_sq_cd *cd)
3462{
 
3463	struct ice_aqc_add_tx_qgrp *list;
3464	struct ice_aqc_add_txqs *cmd;
3465	struct ice_aq_desc desc;
3466	u16 i, sum_size = 0;
3467
3468	cmd = &desc.params.add_txqs;
3469
3470	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3471
3472	if (!qg_list)
3473		return ICE_ERR_PARAM;
3474
3475	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3476		return ICE_ERR_PARAM;
3477
3478	for (i = 0, list = qg_list; i < num_qgrps; i++) {
3479		sum_size += struct_size(list, txqs, list->num_txqs);
3480		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3481						      list->num_txqs);
 
 
 
 
 
3482	}
3483
3484	if (buf_size != sum_size)
3485		return ICE_ERR_PARAM;
3486
3487	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3488
3489	cmd->num_qgrps = num_qgrps;
3490
3491	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3492}
3493
3494/**
3495 * ice_aq_dis_lan_txq
3496 * @hw: pointer to the hardware structure
3497 * @num_qgrps: number of groups in the list
3498 * @qg_list: the list of groups to disable
3499 * @buf_size: the total size of the qg_list buffer in bytes
3500 * @rst_src: if called due to reset, specifies the reset source
3501 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3502 * @cd: pointer to command details structure or NULL
3503 *
3504 * Disable LAN Tx queue (0x0C31)
3505 */
3506static enum ice_status
3507ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3508		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3509		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
3510		   struct ice_sq_cd *cd)
3511{
3512	struct ice_aqc_dis_txq_item *item;
3513	struct ice_aqc_dis_txqs *cmd;
3514	struct ice_aq_desc desc;
3515	enum ice_status status;
3516	u16 i, sz = 0;
3517
3518	cmd = &desc.params.dis_txqs;
3519	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3520
3521	/* qg_list can be NULL only in VM/VF reset flow */
3522	if (!qg_list && !rst_src)
3523		return ICE_ERR_PARAM;
3524
3525	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3526		return ICE_ERR_PARAM;
3527
3528	cmd->num_entries = num_qgrps;
3529
3530	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3531					    ICE_AQC_Q_DIS_TIMEOUT_M);
 
3532
3533	switch (rst_src) {
3534	case ICE_VM_RESET:
3535		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3536		cmd->vmvf_and_timeout |=
3537			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3538		break;
3539	case ICE_VF_RESET:
3540		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3541		/* In this case, FW expects vmvf_num to be absolute VF ID */
3542		cmd->vmvf_and_timeout |=
3543			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3544				    ICE_AQC_Q_DIS_VMVF_NUM_M);
3545		break;
3546	case ICE_NO_RESET:
3547	default:
3548		break;
3549	}
3550
3551	/* flush pipe on time out */
3552	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3553	/* If no queue group info, we are in a reset flow. Issue the AQ */
3554	if (!qg_list)
3555		goto do_aq;
3556
3557	/* set RD bit to indicate that command buffer is provided by the driver
3558	 * and it needs to be read by the firmware
3559	 */
3560	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3561
3562	for (i = 0, item = qg_list; i < num_qgrps; i++) {
3563		u16 item_size = struct_size(item, q_id, item->num_qs);
3564
3565		/* If the num of queues is even, add 2 bytes of padding */
3566		if ((item->num_qs % 2) == 0)
3567			item_size += 2;
3568
3569		sz += item_size;
3570
3571		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3572	}
3573
3574	if (buf_size != sz)
3575		return ICE_ERR_PARAM;
3576
3577do_aq:
3578	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3579	if (status) {
3580		if (!qg_list)
3581			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3582				  vmvf_num, hw->adminq.sq_last_status);
3583		else
3584			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3585				  le16_to_cpu(qg_list[0].q_id[0]),
3586				  hw->adminq.sq_last_status);
3587	}
3588	return status;
3589}
3590
3591/* End of FW Admin Queue command wrappers */
3592
3593/**
3594 * ice_write_byte - write a byte to a packed context structure
3595 * @src_ctx:  the context structure to read from
3596 * @dest_ctx: the context to be written to
3597 * @ce_info:  a description of the struct to be filled
3598 */
3599static void
3600ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3601{
3602	u8 src_byte, dest_byte, mask;
3603	u8 *from, *dest;
3604	u16 shift_width;
3605
3606	/* copy from the next struct field */
3607	from = src_ctx + ce_info->offset;
3608
3609	/* prepare the bits and mask */
3610	shift_width = ce_info->lsb % 8;
3611	mask = (u8)(BIT(ce_info->width) - 1);
3612
3613	src_byte = *from;
3614	src_byte &= mask;
3615
3616	/* shift to correct alignment */
3617	mask <<= shift_width;
3618	src_byte <<= shift_width;
3619
3620	/* get the current bits from the target bit string */
3621	dest = dest_ctx + (ce_info->lsb / 8);
3622
3623	memcpy(&dest_byte, dest, sizeof(dest_byte));
3624
3625	dest_byte &= ~mask;	/* get the bits not changing */
3626	dest_byte |= src_byte;	/* add in the new bits */
3627
3628	/* put it all back */
3629	memcpy(dest, &dest_byte, sizeof(dest_byte));
3630}
3631
3632/**
3633 * ice_write_word - write a word to a packed context structure
3634 * @src_ctx:  the context structure to read from
3635 * @dest_ctx: the context to be written to
3636 * @ce_info:  a description of the struct to be filled
3637 */
3638static void
3639ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3640{
3641	u16 src_word, mask;
3642	__le16 dest_word;
3643	u8 *from, *dest;
3644	u16 shift_width;
3645
3646	/* copy from the next struct field */
3647	from = src_ctx + ce_info->offset;
3648
3649	/* prepare the bits and mask */
3650	shift_width = ce_info->lsb % 8;
3651	mask = BIT(ce_info->width) - 1;
3652
3653	/* don't swizzle the bits until after the mask because the mask bits
3654	 * will be in a different bit position on big endian machines
3655	 */
3656	src_word = *(u16 *)from;
3657	src_word &= mask;
3658
3659	/* shift to correct alignment */
3660	mask <<= shift_width;
3661	src_word <<= shift_width;
3662
3663	/* get the current bits from the target bit string */
3664	dest = dest_ctx + (ce_info->lsb / 8);
3665
3666	memcpy(&dest_word, dest, sizeof(dest_word));
3667
3668	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
3669	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
3670
3671	/* put it all back */
3672	memcpy(dest, &dest_word, sizeof(dest_word));
3673}
3674
3675/**
3676 * ice_write_dword - write a dword to a packed context structure
3677 * @src_ctx:  the context structure to read from
3678 * @dest_ctx: the context to be written to
3679 * @ce_info:  a description of the struct to be filled
3680 */
3681static void
3682ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3683{
3684	u32 src_dword, mask;
3685	__le32 dest_dword;
3686	u8 *from, *dest;
3687	u16 shift_width;
3688
3689	/* copy from the next struct field */
3690	from = src_ctx + ce_info->offset;
3691
3692	/* prepare the bits and mask */
3693	shift_width = ce_info->lsb % 8;
3694
3695	/* if the field width is exactly 32 on an x86 machine, then the shift
3696	 * operation will not work because the SHL instructions count is masked
3697	 * to 5 bits so the shift will do nothing
3698	 */
3699	if (ce_info->width < 32)
3700		mask = BIT(ce_info->width) - 1;
3701	else
3702		mask = (u32)~0;
3703
3704	/* don't swizzle the bits until after the mask because the mask bits
3705	 * will be in a different bit position on big endian machines
3706	 */
3707	src_dword = *(u32 *)from;
3708	src_dword &= mask;
3709
3710	/* shift to correct alignment */
3711	mask <<= shift_width;
3712	src_dword <<= shift_width;
3713
3714	/* get the current bits from the target bit string */
3715	dest = dest_ctx + (ce_info->lsb / 8);
3716
3717	memcpy(&dest_dword, dest, sizeof(dest_dword));
3718
3719	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
3720	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
3721
3722	/* put it all back */
3723	memcpy(dest, &dest_dword, sizeof(dest_dword));
3724}
3725
3726/**
3727 * ice_write_qword - write a qword to a packed context structure
3728 * @src_ctx:  the context structure to read from
3729 * @dest_ctx: the context to be written to
3730 * @ce_info:  a description of the struct to be filled
3731 */
3732static void
3733ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3734{
3735	u64 src_qword, mask;
3736	__le64 dest_qword;
3737	u8 *from, *dest;
3738	u16 shift_width;
3739
3740	/* copy from the next struct field */
3741	from = src_ctx + ce_info->offset;
3742
3743	/* prepare the bits and mask */
3744	shift_width = ce_info->lsb % 8;
3745
3746	/* if the field width is exactly 64 on an x86 machine, then the shift
3747	 * operation will not work because the SHL instructions count is masked
3748	 * to 6 bits so the shift will do nothing
3749	 */
3750	if (ce_info->width < 64)
3751		mask = BIT_ULL(ce_info->width) - 1;
3752	else
3753		mask = (u64)~0;
3754
3755	/* don't swizzle the bits until after the mask because the mask bits
3756	 * will be in a different bit position on big endian machines
3757	 */
3758	src_qword = *(u64 *)from;
3759	src_qword &= mask;
3760
3761	/* shift to correct alignment */
3762	mask <<= shift_width;
3763	src_qword <<= shift_width;
3764
3765	/* get the current bits from the target bit string */
3766	dest = dest_ctx + (ce_info->lsb / 8);
3767
3768	memcpy(&dest_qword, dest, sizeof(dest_qword));
3769
3770	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
3771	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
3772
3773	/* put it all back */
3774	memcpy(dest, &dest_qword, sizeof(dest_qword));
3775}
3776
3777/**
3778 * ice_set_ctx - set context bits in packed structure
3779 * @hw: pointer to the hardware structure
3780 * @src_ctx:  pointer to a generic non-packed context structure
3781 * @dest_ctx: pointer to memory for the packed structure
3782 * @ce_info:  a description of the structure to be transformed
3783 */
3784enum ice_status
3785ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3786	    const struct ice_ctx_ele *ce_info)
3787{
3788	int f;
3789
3790	for (f = 0; ce_info[f].width; f++) {
3791		/* We have to deal with each element of the FW response
3792		 * using the correct size so that we are correct regardless
3793		 * of the endianness of the machine.
3794		 */
3795		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3796			ice_debug(hw, ICE_DBG_QCTX,
3797				  "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3798				  f, ce_info[f].width, ce_info[f].size_of);
3799			continue;
3800		}
3801		switch (ce_info[f].size_of) {
3802		case sizeof(u8):
3803			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3804			break;
3805		case sizeof(u16):
3806			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3807			break;
3808		case sizeof(u32):
3809			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3810			break;
3811		case sizeof(u64):
3812			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3813			break;
3814		default:
3815			return ICE_ERR_INVAL_SIZE;
3816		}
3817	}
3818
3819	return 0;
3820}
3821
3822/**
3823 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3824 * @hw: pointer to the HW struct
3825 * @vsi_handle: software VSI handle
3826 * @tc: TC number
3827 * @q_handle: software queue handle
3828 */
3829struct ice_q_ctx *
3830ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3831{
3832	struct ice_vsi_ctx *vsi;
3833	struct ice_q_ctx *q_ctx;
3834
3835	vsi = ice_get_vsi_ctx(hw, vsi_handle);
3836	if (!vsi)
3837		return NULL;
3838	if (q_handle >= vsi->num_lan_q_entries[tc])
3839		return NULL;
3840	if (!vsi->lan_q_ctx[tc])
3841		return NULL;
3842	q_ctx = vsi->lan_q_ctx[tc];
3843	return &q_ctx[q_handle];
3844}
3845
3846/**
3847 * ice_ena_vsi_txq
3848 * @pi: port information structure
3849 * @vsi_handle: software VSI handle
3850 * @tc: TC number
3851 * @q_handle: software queue handle
3852 * @num_qgrps: Number of added queue groups
3853 * @buf: list of queue groups to be added
3854 * @buf_size: size of buffer for indirect command
3855 * @cd: pointer to command details structure or NULL
3856 *
3857 * This function adds one LAN queue
3858 */
3859enum ice_status
3860ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3861		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3862		struct ice_sq_cd *cd)
3863{
3864	struct ice_aqc_txsched_elem_data node = { 0 };
3865	struct ice_sched_node *parent;
3866	struct ice_q_ctx *q_ctx;
3867	enum ice_status status;
3868	struct ice_hw *hw;
3869
3870	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3871		return ICE_ERR_CFG;
3872
3873	if (num_qgrps > 1 || buf->num_txqs > 1)
3874		return ICE_ERR_MAX_LIMIT;
3875
3876	hw = pi->hw;
3877
3878	if (!ice_is_vsi_valid(hw, vsi_handle))
3879		return ICE_ERR_PARAM;
3880
3881	mutex_lock(&pi->sched_lock);
3882
3883	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3884	if (!q_ctx) {
3885		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3886			  q_handle);
3887		status = ICE_ERR_PARAM;
3888		goto ena_txq_exit;
3889	}
3890
3891	/* find a parent node */
3892	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3893					    ICE_SCHED_NODE_OWNER_LAN);
3894	if (!parent) {
3895		status = ICE_ERR_PARAM;
3896		goto ena_txq_exit;
3897	}
3898
3899	buf->parent_teid = parent->info.node_teid;
3900	node.parent_teid = parent->info.node_teid;
3901	/* Mark that the values in the "generic" section as valid. The default
3902	 * value in the "generic" section is zero. This means that :
3903	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3904	 * - 0 priority among siblings, indicated by Bit 1-3.
3905	 * - WFQ, indicated by Bit 4.
3906	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3907	 * Bit 5-6.
3908	 * - Bit 7 is reserved.
3909	 * Without setting the generic section as valid in valid_sections, the
3910	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3911	 */
3912	buf->txqs[0].info.valid_sections =
3913		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
3914		ICE_AQC_ELEM_VALID_EIR;
3915	buf->txqs[0].info.generic = 0;
3916	buf->txqs[0].info.cir_bw.bw_profile_idx =
3917		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3918	buf->txqs[0].info.cir_bw.bw_alloc =
3919		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3920	buf->txqs[0].info.eir_bw.bw_profile_idx =
3921		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3922	buf->txqs[0].info.eir_bw.bw_alloc =
3923		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3924
3925	/* add the LAN queue */
3926	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3927	if (status) {
3928		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3929			  le16_to_cpu(buf->txqs[0].txq_id),
3930			  hw->adminq.sq_last_status);
3931		goto ena_txq_exit;
3932	}
3933
3934	node.node_teid = buf->txqs[0].q_teid;
3935	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3936	q_ctx->q_handle = q_handle;
3937	q_ctx->q_teid = le32_to_cpu(node.node_teid);
3938
3939	/* add a leaf node into scheduler tree queue layer */
3940	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3941	if (!status)
3942		status = ice_sched_replay_q_bw(pi, q_ctx);
3943
3944ena_txq_exit:
3945	mutex_unlock(&pi->sched_lock);
3946	return status;
3947}
3948
3949/**
3950 * ice_dis_vsi_txq
3951 * @pi: port information structure
3952 * @vsi_handle: software VSI handle
3953 * @tc: TC number
3954 * @num_queues: number of queues
3955 * @q_handles: pointer to software queue handle array
3956 * @q_ids: pointer to the q_id array
3957 * @q_teids: pointer to queue node teids
3958 * @rst_src: if called due to reset, specifies the reset source
3959 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3960 * @cd: pointer to command details structure or NULL
3961 *
3962 * This function removes queues and their corresponding nodes in SW DB
3963 */
3964enum ice_status
3965ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3966		u16 *q_handles, u16 *q_ids, u32 *q_teids,
3967		enum ice_disq_rst_src rst_src, u16 vmvf_num,
3968		struct ice_sq_cd *cd)
3969{
3970	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3971	struct ice_aqc_dis_txq_item *qg_list;
3972	struct ice_q_ctx *q_ctx;
3973	struct ice_hw *hw;
3974	u16 i, buf_size;
3975
3976	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3977		return ICE_ERR_CFG;
3978
3979	hw = pi->hw;
3980
3981	if (!num_queues) {
3982		/* if queue is disabled already yet the disable queue command
3983		 * has to be sent to complete the VF reset, then call
3984		 * ice_aq_dis_lan_txq without any queue information
3985		 */
3986		if (rst_src)
3987			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
3988						  vmvf_num, NULL);
3989		return ICE_ERR_CFG;
3990	}
3991
3992	buf_size = struct_size(qg_list, q_id, 1);
3993	qg_list = kzalloc(buf_size, GFP_KERNEL);
3994	if (!qg_list)
3995		return ICE_ERR_NO_MEMORY;
3996
3997	mutex_lock(&pi->sched_lock);
3998
3999	for (i = 0; i < num_queues; i++) {
4000		struct ice_sched_node *node;
4001
4002		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4003		if (!node)
4004			continue;
4005		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4006		if (!q_ctx) {
4007			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4008				  q_handles[i]);
4009			continue;
4010		}
4011		if (q_ctx->q_handle != q_handles[i]) {
4012			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4013				  q_ctx->q_handle, q_handles[i]);
4014			continue;
4015		}
4016		qg_list->parent_teid = node->info.parent_teid;
4017		qg_list->num_qs = 1;
4018		qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4019		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4020					    vmvf_num, cd);
4021
4022		if (status)
4023			break;
4024		ice_free_sched_node(pi, node);
4025		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4026	}
4027	mutex_unlock(&pi->sched_lock);
4028	kfree(qg_list);
4029	return status;
4030}
4031
4032/**
4033 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4034 * @pi: port information structure
4035 * @vsi_handle: software VSI handle
4036 * @tc_bitmap: TC bitmap
4037 * @maxqs: max queues array per TC
4038 * @owner: LAN or RDMA
4039 *
4040 * This function adds/updates the VSI queues per TC.
4041 */
4042static enum ice_status
4043ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4044	       u16 *maxqs, u8 owner)
4045{
4046	enum ice_status status = 0;
4047	u8 i;
4048
4049	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4050		return ICE_ERR_CFG;
4051
4052	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4053		return ICE_ERR_PARAM;
4054
4055	mutex_lock(&pi->sched_lock);
4056
4057	ice_for_each_traffic_class(i) {
4058		/* configuration is possible only if TC node is present */
4059		if (!ice_sched_get_tc_node(pi, i))
4060			continue;
4061
4062		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4063					   ice_is_tc_ena(tc_bitmap, i));
4064		if (status)
4065			break;
4066	}
4067
4068	mutex_unlock(&pi->sched_lock);
4069	return status;
4070}
4071
4072/**
4073 * ice_cfg_vsi_lan - configure VSI LAN queues
4074 * @pi: port information structure
4075 * @vsi_handle: software VSI handle
4076 * @tc_bitmap: TC bitmap
4077 * @max_lanqs: max LAN queues array per TC
4078 *
4079 * This function adds/updates the VSI LAN queues per TC.
4080 */
4081enum ice_status
4082ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4083		u16 *max_lanqs)
4084{
4085	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4086			      ICE_SCHED_NODE_OWNER_LAN);
4087}
4088
4089/**
4090 * ice_replay_pre_init - replay pre initialization
4091 * @hw: pointer to the HW struct
4092 *
4093 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4094 */
4095static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4096{
4097	struct ice_switch_info *sw = hw->switch_info;
4098	u8 i;
4099
4100	/* Delete old entries from replay filter list head if there is any */
4101	ice_rm_all_sw_replay_rule_info(hw);
4102	/* In start of replay, move entries into replay_rules list, it
4103	 * will allow adding rules entries back to filt_rules list,
4104	 * which is operational list.
4105	 */
4106	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4107		list_replace_init(&sw->recp_list[i].filt_rules,
4108				  &sw->recp_list[i].filt_replay_rules);
4109
4110	return 0;
4111}
4112
4113/**
4114 * ice_replay_vsi - replay VSI configuration
4115 * @hw: pointer to the HW struct
4116 * @vsi_handle: driver VSI handle
4117 *
4118 * Restore all VSI configuration after reset. It is required to call this
4119 * function with main VSI first.
4120 */
4121enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4122{
4123	enum ice_status status;
4124
4125	if (!ice_is_vsi_valid(hw, vsi_handle))
4126		return ICE_ERR_PARAM;
4127
4128	/* Replay pre-initialization if there is any */
4129	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4130		status = ice_replay_pre_init(hw);
4131		if (status)
4132			return status;
4133	}
4134	/* Replay per VSI all RSS configurations */
4135	status = ice_replay_rss_cfg(hw, vsi_handle);
4136	if (status)
4137		return status;
4138	/* Replay per VSI all filters */
4139	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4140	return status;
4141}
4142
4143/**
4144 * ice_replay_post - post replay configuration cleanup
4145 * @hw: pointer to the HW struct
4146 *
4147 * Post replay cleanup.
4148 */
4149void ice_replay_post(struct ice_hw *hw)
4150{
4151	/* Delete old entries from replay filter list head */
4152	ice_rm_all_sw_replay_rule_info(hw);
4153}
4154
4155/**
4156 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4157 * @hw: ptr to the hardware info
4158 * @reg: offset of 64 bit HW register to read from
4159 * @prev_stat_loaded: bool to specify if previous stats are loaded
4160 * @prev_stat: ptr to previous loaded stat value
4161 * @cur_stat: ptr to current stat value
4162 */
4163void
4164ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4165		  u64 *prev_stat, u64 *cur_stat)
4166{
4167	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4168
4169	/* device stats are not reset at PFR, they likely will not be zeroed
4170	 * when the driver starts. Thus, save the value from the first read
4171	 * without adding to the statistic value so that we report stats which
4172	 * count up from zero.
4173	 */
4174	if (!prev_stat_loaded) {
4175		*prev_stat = new_data;
4176		return;
4177	}
4178
4179	/* Calculate the difference between the new and old values, and then
4180	 * add it to the software stat value.
4181	 */
4182	if (new_data >= *prev_stat)
4183		*cur_stat += new_data - *prev_stat;
4184	else
4185		/* to manage the potential roll-over */
4186		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4187
4188	/* Update the previously stored value to prepare for next read */
4189	*prev_stat = new_data;
4190}
4191
4192/**
4193 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4194 * @hw: ptr to the hardware info
4195 * @reg: offset of HW register to read from
4196 * @prev_stat_loaded: bool to specify if previous stats are loaded
4197 * @prev_stat: ptr to previous loaded stat value
4198 * @cur_stat: ptr to current stat value
4199 */
4200void
4201ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4202		  u64 *prev_stat, u64 *cur_stat)
4203{
4204	u32 new_data;
4205
4206	new_data = rd32(hw, reg);
4207
4208	/* device stats are not reset at PFR, they likely will not be zeroed
4209	 * when the driver starts. Thus, save the value from the first read
4210	 * without adding to the statistic value so that we report stats which
4211	 * count up from zero.
4212	 */
4213	if (!prev_stat_loaded) {
4214		*prev_stat = new_data;
4215		return;
4216	}
4217
4218	/* Calculate the difference between the new and old values, and then
4219	 * add it to the software stat value.
4220	 */
4221	if (new_data >= *prev_stat)
4222		*cur_stat += new_data - *prev_stat;
4223	else
4224		/* to manage the potential roll-over */
4225		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4226
4227	/* Update the previously stored value to prepare for next read */
4228	*prev_stat = new_data;
4229}
4230
4231/**
4232 * ice_sched_query_elem - query element information from HW
4233 * @hw: pointer to the HW struct
4234 * @node_teid: node TEID to be queried
4235 * @buf: buffer to element information
4236 *
4237 * This function queries HW element information
4238 */
4239enum ice_status
4240ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4241		     struct ice_aqc_txsched_elem_data *buf)
4242{
4243	u16 buf_size, num_elem_ret = 0;
4244	enum ice_status status;
4245
4246	buf_size = sizeof(*buf);
4247	memset(buf, 0, buf_size);
4248	buf->node_teid = cpu_to_le32(node_teid);
4249	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4250					  NULL);
4251	if (status || num_elem_ret != 1)
4252		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4253	return status;
4254}
4255
4256/**
4257 * ice_fw_supports_link_override
4258 * @hw: pointer to the hardware structure
4259 *
4260 * Checks if the firmware supports link override
4261 */
4262bool ice_fw_supports_link_override(struct ice_hw *hw)
4263{
4264	/* Currently, only supported for E810 devices */
4265	if (hw->mac_type != ICE_MAC_E810)
4266		return false;
4267
4268	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4269		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4270			return true;
4271		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4272		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4273			return true;
4274	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4275		return true;
4276	}
4277
4278	return false;
4279}
4280
4281/**
4282 * ice_get_link_default_override
4283 * @ldo: pointer to the link default override struct
4284 * @pi: pointer to the port info struct
4285 *
4286 * Gets the link default override for a port
4287 */
4288enum ice_status
4289ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4290			      struct ice_port_info *pi)
4291{
4292	u16 i, tlv, tlv_len, tlv_start, buf, offset;
4293	struct ice_hw *hw = pi->hw;
4294	enum ice_status status;
4295
4296	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4297					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4298	if (status) {
4299		ice_debug(hw, ICE_DBG_INIT,
4300			  "Failed to read link override TLV.\n");
4301		return status;
4302	}
4303
4304	/* Each port has its own config; calculate for our port */
4305	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4306		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4307
4308	/* link options first */
4309	status = ice_read_sr_word(hw, tlv_start, &buf);
4310	if (status) {
4311		ice_debug(hw, ICE_DBG_INIT,
4312			  "Failed to read override link options.\n");
4313		return status;
4314	}
4315	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4316	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4317		ICE_LINK_OVERRIDE_PHY_CFG_S;
4318
4319	/* link PHY config */
4320	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4321	status = ice_read_sr_word(hw, offset, &buf);
4322	if (status) {
4323		ice_debug(hw, ICE_DBG_INIT,
4324			  "Failed to read override phy config.\n");
4325		return status;
4326	}
4327	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4328
4329	/* PHY types low */
4330	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4331	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4332		status = ice_read_sr_word(hw, (offset + i), &buf);
4333		if (status) {
4334			ice_debug(hw, ICE_DBG_INIT,
4335				  "Failed to read override link options.\n");
4336			return status;
4337		}
4338		/* shift 16 bits at a time to fill 64 bits */
4339		ldo->phy_type_low |= ((u64)buf << (i * 16));
4340	}
4341
4342	/* PHY types high */
4343	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4344		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4345	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4346		status = ice_read_sr_word(hw, (offset + i), &buf);
4347		if (status) {
4348			ice_debug(hw, ICE_DBG_INIT,
4349				  "Failed to read override link options.\n");
4350			return status;
4351		}
4352		/* shift 16 bits at a time to fill 64 bits */
4353		ldo->phy_type_high |= ((u64)buf << (i * 16));
4354	}
4355
4356	return status;
4357}
4358
4359/**
4360 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4361 * @caps: get PHY capability data
4362 */
4363bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4364{
4365	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4366	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4367				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
4368				       ICE_AQC_PHY_AN_EN_CLAUSE37))
4369		return true;
4370
4371	return false;
4372}
4373
4374/**
4375 * ice_aq_set_lldp_mib - Set the LLDP MIB
4376 * @hw: pointer to the HW struct
4377 * @mib_type: Local, Remote or both Local and Remote MIBs
4378 * @buf: pointer to the caller-supplied buffer to store the MIB block
4379 * @buf_size: size of the buffer (in bytes)
4380 * @cd: pointer to command details structure or NULL
4381 *
4382 * Set the LLDP MIB. (0x0A08)
4383 */
4384enum ice_status
4385ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4386		    struct ice_sq_cd *cd)
4387{
4388	struct ice_aqc_lldp_set_local_mib *cmd;
4389	struct ice_aq_desc desc;
4390
4391	cmd = &desc.params.lldp_set_mib;
4392
4393	if (buf_size == 0 || !buf)
4394		return ICE_ERR_PARAM;
4395
4396	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4397
4398	desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4399	desc.datalen = cpu_to_le16(buf_size);
4400
4401	cmd->type = mib_type;
4402	cmd->length = cpu_to_le16(buf_size);
4403
4404	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4405}