Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "i40e.h"
   5#include "i40e_type.h"
   6#include "i40e_adminq.h"
   7#include "i40e_prototype.h"
   8#include <linux/avf/virtchnl.h>
   9
  10/**
  11 * i40e_set_mac_type - Sets MAC type
  12 * @hw: pointer to the HW structure
  13 *
  14 * This function sets the mac type of the adapter based on the
  15 * vendor ID and device ID stored in the hw structure.
  16 **/
  17i40e_status i40e_set_mac_type(struct i40e_hw *hw)
  18{
  19	i40e_status status = 0;
  20
  21	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
  22		switch (hw->device_id) {
  23		case I40E_DEV_ID_SFP_XL710:
  24		case I40E_DEV_ID_QEMU:
  25		case I40E_DEV_ID_KX_B:
  26		case I40E_DEV_ID_KX_C:
  27		case I40E_DEV_ID_QSFP_A:
  28		case I40E_DEV_ID_QSFP_B:
  29		case I40E_DEV_ID_QSFP_C:
 
 
  30		case I40E_DEV_ID_10G_BASE_T:
  31		case I40E_DEV_ID_10G_BASE_T4:
 
  32		case I40E_DEV_ID_10G_B:
  33		case I40E_DEV_ID_10G_SFP:
  34		case I40E_DEV_ID_20G_KR2:
  35		case I40E_DEV_ID_20G_KR2_A:
  36		case I40E_DEV_ID_25G_B:
  37		case I40E_DEV_ID_25G_SFP28:
  38		case I40E_DEV_ID_X710_N3000:
  39		case I40E_DEV_ID_XXV710_N3000:
  40			hw->mac.type = I40E_MAC_XL710;
  41			break;
  42		case I40E_DEV_ID_KX_X722:
  43		case I40E_DEV_ID_QSFP_X722:
  44		case I40E_DEV_ID_SFP_X722:
  45		case I40E_DEV_ID_1G_BASE_T_X722:
  46		case I40E_DEV_ID_10G_BASE_T_X722:
  47		case I40E_DEV_ID_SFP_I_X722:
 
  48			hw->mac.type = I40E_MAC_X722;
  49			break;
  50		default:
  51			hw->mac.type = I40E_MAC_GENERIC;
  52			break;
  53		}
  54	} else {
  55		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
  56	}
  57
  58	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
  59		  hw->mac.type, status);
  60	return status;
  61}
  62
  63/**
  64 * i40e_aq_str - convert AQ err code to a string
  65 * @hw: pointer to the HW structure
  66 * @aq_err: the AQ error code to convert
  67 **/
  68const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
  69{
  70	switch (aq_err) {
  71	case I40E_AQ_RC_OK:
  72		return "OK";
  73	case I40E_AQ_RC_EPERM:
  74		return "I40E_AQ_RC_EPERM";
  75	case I40E_AQ_RC_ENOENT:
  76		return "I40E_AQ_RC_ENOENT";
  77	case I40E_AQ_RC_ESRCH:
  78		return "I40E_AQ_RC_ESRCH";
  79	case I40E_AQ_RC_EINTR:
  80		return "I40E_AQ_RC_EINTR";
  81	case I40E_AQ_RC_EIO:
  82		return "I40E_AQ_RC_EIO";
  83	case I40E_AQ_RC_ENXIO:
  84		return "I40E_AQ_RC_ENXIO";
  85	case I40E_AQ_RC_E2BIG:
  86		return "I40E_AQ_RC_E2BIG";
  87	case I40E_AQ_RC_EAGAIN:
  88		return "I40E_AQ_RC_EAGAIN";
  89	case I40E_AQ_RC_ENOMEM:
  90		return "I40E_AQ_RC_ENOMEM";
  91	case I40E_AQ_RC_EACCES:
  92		return "I40E_AQ_RC_EACCES";
  93	case I40E_AQ_RC_EFAULT:
  94		return "I40E_AQ_RC_EFAULT";
  95	case I40E_AQ_RC_EBUSY:
  96		return "I40E_AQ_RC_EBUSY";
  97	case I40E_AQ_RC_EEXIST:
  98		return "I40E_AQ_RC_EEXIST";
  99	case I40E_AQ_RC_EINVAL:
 100		return "I40E_AQ_RC_EINVAL";
 101	case I40E_AQ_RC_ENOTTY:
 102		return "I40E_AQ_RC_ENOTTY";
 103	case I40E_AQ_RC_ENOSPC:
 104		return "I40E_AQ_RC_ENOSPC";
 105	case I40E_AQ_RC_ENOSYS:
 106		return "I40E_AQ_RC_ENOSYS";
 107	case I40E_AQ_RC_ERANGE:
 108		return "I40E_AQ_RC_ERANGE";
 109	case I40E_AQ_RC_EFLUSHED:
 110		return "I40E_AQ_RC_EFLUSHED";
 111	case I40E_AQ_RC_BAD_ADDR:
 112		return "I40E_AQ_RC_BAD_ADDR";
 113	case I40E_AQ_RC_EMODE:
 114		return "I40E_AQ_RC_EMODE";
 115	case I40E_AQ_RC_EFBIG:
 116		return "I40E_AQ_RC_EFBIG";
 117	}
 118
 119	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
 120	return hw->err_str;
 121}
 122
 123/**
 124 * i40e_stat_str - convert status err code to a string
 125 * @hw: pointer to the HW structure
 126 * @stat_err: the status error code to convert
 127 **/
 128const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 129{
 130	switch (stat_err) {
 131	case 0:
 132		return "OK";
 133	case I40E_ERR_NVM:
 134		return "I40E_ERR_NVM";
 135	case I40E_ERR_NVM_CHECKSUM:
 136		return "I40E_ERR_NVM_CHECKSUM";
 137	case I40E_ERR_PHY:
 138		return "I40E_ERR_PHY";
 139	case I40E_ERR_CONFIG:
 140		return "I40E_ERR_CONFIG";
 141	case I40E_ERR_PARAM:
 142		return "I40E_ERR_PARAM";
 143	case I40E_ERR_MAC_TYPE:
 144		return "I40E_ERR_MAC_TYPE";
 145	case I40E_ERR_UNKNOWN_PHY:
 146		return "I40E_ERR_UNKNOWN_PHY";
 147	case I40E_ERR_LINK_SETUP:
 148		return "I40E_ERR_LINK_SETUP";
 149	case I40E_ERR_ADAPTER_STOPPED:
 150		return "I40E_ERR_ADAPTER_STOPPED";
 151	case I40E_ERR_INVALID_MAC_ADDR:
 152		return "I40E_ERR_INVALID_MAC_ADDR";
 153	case I40E_ERR_DEVICE_NOT_SUPPORTED:
 154		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
 155	case I40E_ERR_MASTER_REQUESTS_PENDING:
 156		return "I40E_ERR_MASTER_REQUESTS_PENDING";
 157	case I40E_ERR_INVALID_LINK_SETTINGS:
 158		return "I40E_ERR_INVALID_LINK_SETTINGS";
 159	case I40E_ERR_AUTONEG_NOT_COMPLETE:
 160		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
 161	case I40E_ERR_RESET_FAILED:
 162		return "I40E_ERR_RESET_FAILED";
 163	case I40E_ERR_SWFW_SYNC:
 164		return "I40E_ERR_SWFW_SYNC";
 165	case I40E_ERR_NO_AVAILABLE_VSI:
 166		return "I40E_ERR_NO_AVAILABLE_VSI";
 167	case I40E_ERR_NO_MEMORY:
 168		return "I40E_ERR_NO_MEMORY";
 169	case I40E_ERR_BAD_PTR:
 170		return "I40E_ERR_BAD_PTR";
 171	case I40E_ERR_RING_FULL:
 172		return "I40E_ERR_RING_FULL";
 173	case I40E_ERR_INVALID_PD_ID:
 174		return "I40E_ERR_INVALID_PD_ID";
 175	case I40E_ERR_INVALID_QP_ID:
 176		return "I40E_ERR_INVALID_QP_ID";
 177	case I40E_ERR_INVALID_CQ_ID:
 178		return "I40E_ERR_INVALID_CQ_ID";
 179	case I40E_ERR_INVALID_CEQ_ID:
 180		return "I40E_ERR_INVALID_CEQ_ID";
 181	case I40E_ERR_INVALID_AEQ_ID:
 182		return "I40E_ERR_INVALID_AEQ_ID";
 183	case I40E_ERR_INVALID_SIZE:
 184		return "I40E_ERR_INVALID_SIZE";
 185	case I40E_ERR_INVALID_ARP_INDEX:
 186		return "I40E_ERR_INVALID_ARP_INDEX";
 187	case I40E_ERR_INVALID_FPM_FUNC_ID:
 188		return "I40E_ERR_INVALID_FPM_FUNC_ID";
 189	case I40E_ERR_QP_INVALID_MSG_SIZE:
 190		return "I40E_ERR_QP_INVALID_MSG_SIZE";
 191	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
 192		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
 193	case I40E_ERR_INVALID_FRAG_COUNT:
 194		return "I40E_ERR_INVALID_FRAG_COUNT";
 195	case I40E_ERR_QUEUE_EMPTY:
 196		return "I40E_ERR_QUEUE_EMPTY";
 197	case I40E_ERR_INVALID_ALIGNMENT:
 198		return "I40E_ERR_INVALID_ALIGNMENT";
 199	case I40E_ERR_FLUSHED_QUEUE:
 200		return "I40E_ERR_FLUSHED_QUEUE";
 201	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
 202		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
 203	case I40E_ERR_INVALID_IMM_DATA_SIZE:
 204		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
 205	case I40E_ERR_TIMEOUT:
 206		return "I40E_ERR_TIMEOUT";
 207	case I40E_ERR_OPCODE_MISMATCH:
 208		return "I40E_ERR_OPCODE_MISMATCH";
 209	case I40E_ERR_CQP_COMPL_ERROR:
 210		return "I40E_ERR_CQP_COMPL_ERROR";
 211	case I40E_ERR_INVALID_VF_ID:
 212		return "I40E_ERR_INVALID_VF_ID";
 213	case I40E_ERR_INVALID_HMCFN_ID:
 214		return "I40E_ERR_INVALID_HMCFN_ID";
 215	case I40E_ERR_BACKING_PAGE_ERROR:
 216		return "I40E_ERR_BACKING_PAGE_ERROR";
 217	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
 218		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
 219	case I40E_ERR_INVALID_PBLE_INDEX:
 220		return "I40E_ERR_INVALID_PBLE_INDEX";
 221	case I40E_ERR_INVALID_SD_INDEX:
 222		return "I40E_ERR_INVALID_SD_INDEX";
 223	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
 224		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
 225	case I40E_ERR_INVALID_SD_TYPE:
 226		return "I40E_ERR_INVALID_SD_TYPE";
 227	case I40E_ERR_MEMCPY_FAILED:
 228		return "I40E_ERR_MEMCPY_FAILED";
 229	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
 230		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
 231	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
 232		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
 233	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
 234		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
 235	case I40E_ERR_SRQ_ENABLED:
 236		return "I40E_ERR_SRQ_ENABLED";
 237	case I40E_ERR_ADMIN_QUEUE_ERROR:
 238		return "I40E_ERR_ADMIN_QUEUE_ERROR";
 239	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
 240		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
 241	case I40E_ERR_BUF_TOO_SHORT:
 242		return "I40E_ERR_BUF_TOO_SHORT";
 243	case I40E_ERR_ADMIN_QUEUE_FULL:
 244		return "I40E_ERR_ADMIN_QUEUE_FULL";
 245	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
 246		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
 247	case I40E_ERR_BAD_IWARP_CQE:
 248		return "I40E_ERR_BAD_IWARP_CQE";
 249	case I40E_ERR_NVM_BLANK_MODE:
 250		return "I40E_ERR_NVM_BLANK_MODE";
 251	case I40E_ERR_NOT_IMPLEMENTED:
 252		return "I40E_ERR_NOT_IMPLEMENTED";
 253	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
 254		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
 255	case I40E_ERR_DIAG_TEST_FAILED:
 256		return "I40E_ERR_DIAG_TEST_FAILED";
 257	case I40E_ERR_NOT_READY:
 258		return "I40E_ERR_NOT_READY";
 259	case I40E_NOT_SUPPORTED:
 260		return "I40E_NOT_SUPPORTED";
 261	case I40E_ERR_FIRMWARE_API_VERSION:
 262		return "I40E_ERR_FIRMWARE_API_VERSION";
 263	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
 264		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
 265	}
 266
 267	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
 268	return hw->err_str;
 269}
 270
 271/**
 272 * i40e_debug_aq
 273 * @hw: debug mask related to admin queue
 274 * @mask: debug mask
 275 * @desc: pointer to admin queue descriptor
 276 * @buffer: pointer to command buffer
 277 * @buf_len: max length of buffer
 278 *
 279 * Dumps debug log about adminq command with descriptor contents.
 280 **/
 281void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 282		   void *buffer, u16 buf_len)
 283{
 284	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
 285	u32 effective_mask = hw->debug_mask & mask;
 286	char prefix[27];
 287	u16 len;
 288	u8 *buf = (u8 *)buffer;
 289
 290	if (!effective_mask || !desc)
 291		return;
 292
 293	len = le16_to_cpu(aq_desc->datalen);
 294
 295	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 296		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
 297		   le16_to_cpu(aq_desc->opcode),
 298		   le16_to_cpu(aq_desc->flags),
 299		   le16_to_cpu(aq_desc->datalen),
 300		   le16_to_cpu(aq_desc->retval));
 301	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 302		   "\tcookie (h,l) 0x%08X 0x%08X\n",
 303		   le32_to_cpu(aq_desc->cookie_high),
 304		   le32_to_cpu(aq_desc->cookie_low));
 305	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 306		   "\tparam (0,1)  0x%08X 0x%08X\n",
 307		   le32_to_cpu(aq_desc->params.internal.param0),
 308		   le32_to_cpu(aq_desc->params.internal.param1));
 309	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 310		   "\taddr (h,l)   0x%08X 0x%08X\n",
 311		   le32_to_cpu(aq_desc->params.external.addr_high),
 312		   le32_to_cpu(aq_desc->params.external.addr_low));
 313
 314	if (buffer && buf_len != 0 && len != 0 &&
 315	    (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
 316		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
 317		if (buf_len < len)
 318			len = buf_len;
 319
 320		snprintf(prefix, sizeof(prefix),
 321			 "i40e %02x:%02x.%x: \t0x",
 322			 hw->bus.bus_id,
 323			 hw->bus.device,
 324			 hw->bus.func);
 325
 326		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
 327			       16, 1, buf, len, false);
 328	}
 329}
 330
 331/**
 332 * i40e_check_asq_alive
 333 * @hw: pointer to the hw struct
 334 *
 335 * Returns true if Queue is enabled else false.
 336 **/
 337bool i40e_check_asq_alive(struct i40e_hw *hw)
 338{
 339	if (hw->aq.asq.len)
 340		return !!(rd32(hw, hw->aq.asq.len) &
 341			  I40E_PF_ATQLEN_ATQENABLE_MASK);
 342	else
 343		return false;
 344}
 345
 346/**
 347 * i40e_aq_queue_shutdown
 348 * @hw: pointer to the hw struct
 349 * @unloading: is the driver unloading itself
 350 *
 351 * Tell the Firmware that we're shutting down the AdminQ and whether
 352 * or not the driver is unloading as well.
 353 **/
 354i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
 355					     bool unloading)
 356{
 357	struct i40e_aq_desc desc;
 358	struct i40e_aqc_queue_shutdown *cmd =
 359		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
 360	i40e_status status;
 361
 362	i40e_fill_default_direct_cmd_desc(&desc,
 363					  i40e_aqc_opc_queue_shutdown);
 364
 365	if (unloading)
 366		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
 367	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
 368
 369	return status;
 370}
 371
 372/**
 373 * i40e_aq_get_set_rss_lut
 374 * @hw: pointer to the hardware structure
 375 * @vsi_id: vsi fw index
 376 * @pf_lut: for PF table set true, for VSI table set false
 377 * @lut: pointer to the lut buffer provided by the caller
 378 * @lut_size: size of the lut buffer
 379 * @set: set true to set the table, false to get the table
 380 *
 381 * Internal function to get or set RSS look up table
 382 **/
 383static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
 384					   u16 vsi_id, bool pf_lut,
 385					   u8 *lut, u16 lut_size,
 386					   bool set)
 387{
 388	i40e_status status;
 389	struct i40e_aq_desc desc;
 390	struct i40e_aqc_get_set_rss_lut *cmd_resp =
 391		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
 392
 393	if (set)
 394		i40e_fill_default_direct_cmd_desc(&desc,
 395						  i40e_aqc_opc_set_rss_lut);
 396	else
 397		i40e_fill_default_direct_cmd_desc(&desc,
 398						  i40e_aqc_opc_get_rss_lut);
 399
 400	/* Indirect command */
 401	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
 402	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
 403
 404	cmd_resp->vsi_id =
 405			cpu_to_le16((u16)((vsi_id <<
 406					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
 407					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
 408	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
 409
 410	if (pf_lut)
 411		cmd_resp->flags |= cpu_to_le16((u16)
 412					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
 413					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 414					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 415	else
 416		cmd_resp->flags |= cpu_to_le16((u16)
 417					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
 418					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 419					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 420
 421	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
 422
 423	return status;
 424}
 425
 426/**
 427 * i40e_aq_get_rss_lut
 428 * @hw: pointer to the hardware structure
 429 * @vsi_id: vsi fw index
 430 * @pf_lut: for PF table set true, for VSI table set false
 431 * @lut: pointer to the lut buffer provided by the caller
 432 * @lut_size: size of the lut buffer
 433 *
 434 * get the RSS lookup table, PF or VSI type
 435 **/
 436i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
 437				bool pf_lut, u8 *lut, u16 lut_size)
 438{
 439	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
 440				       false);
 441}
 442
 443/**
 444 * i40e_aq_set_rss_lut
 445 * @hw: pointer to the hardware structure
 446 * @vsi_id: vsi fw index
 447 * @pf_lut: for PF table set true, for VSI table set false
 448 * @lut: pointer to the lut buffer provided by the caller
 449 * @lut_size: size of the lut buffer
 450 *
 451 * set the RSS lookup table, PF or VSI type
 452 **/
 453i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
 454				bool pf_lut, u8 *lut, u16 lut_size)
 455{
 456	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
 457}
 458
 459/**
 460 * i40e_aq_get_set_rss_key
 461 * @hw: pointer to the hw struct
 462 * @vsi_id: vsi fw index
 463 * @key: pointer to key info struct
 464 * @set: set true to set the key, false to get the key
 465 *
 466 * get the RSS key per VSI
 467 **/
 468static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
 469				      u16 vsi_id,
 470				      struct i40e_aqc_get_set_rss_key_data *key,
 471				      bool set)
 472{
 473	i40e_status status;
 474	struct i40e_aq_desc desc;
 475	struct i40e_aqc_get_set_rss_key *cmd_resp =
 476			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
 477	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
 478
 479	if (set)
 480		i40e_fill_default_direct_cmd_desc(&desc,
 481						  i40e_aqc_opc_set_rss_key);
 482	else
 483		i40e_fill_default_direct_cmd_desc(&desc,
 484						  i40e_aqc_opc_get_rss_key);
 485
 486	/* Indirect command */
 487	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
 488	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
 489
 490	cmd_resp->vsi_id =
 491			cpu_to_le16((u16)((vsi_id <<
 492					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
 493					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
 494	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
 495
 496	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
 497
 498	return status;
 499}
 500
 501/**
 502 * i40e_aq_get_rss_key
 503 * @hw: pointer to the hw struct
 504 * @vsi_id: vsi fw index
 505 * @key: pointer to key info struct
 506 *
 507 **/
 508i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
 509				u16 vsi_id,
 510				struct i40e_aqc_get_set_rss_key_data *key)
 511{
 512	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
 513}
 514
 515/**
 516 * i40e_aq_set_rss_key
 517 * @hw: pointer to the hw struct
 518 * @vsi_id: vsi fw index
 519 * @key: pointer to key info struct
 520 *
 521 * set the RSS key per VSI
 522 **/
 523i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
 524				u16 vsi_id,
 525				struct i40e_aqc_get_set_rss_key_data *key)
 526{
 527	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
 528}
 529
 530/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
 531 * hardware to a bit-field that can be used by SW to more easily determine the
 532 * packet type.
 533 *
 534 * Macros are used to shorten the table lines and make this table human
 535 * readable.
 536 *
 537 * We store the PTYPE in the top byte of the bit field - this is just so that
 538 * we can check that the table doesn't have a row missing, as the index into
 539 * the table should be the PTYPE.
 540 *
 541 * Typical work flow:
 542 *
 543 * IF NOT i40e_ptype_lookup[ptype].known
 544 * THEN
 545 *      Packet is unknown
 546 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
 547 *      Use the rest of the fields to look at the tunnels, inner protocols, etc
 548 * ELSE
 549 *      Use the enum i40e_rx_l2_ptype to decode the packet type
 550 * ENDIF
 551 */
 552
 553/* macro to make the table lines short */
 554#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
 555	{	PTYPE, \
 556		1, \
 557		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
 558		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
 559		I40E_RX_PTYPE_##OUTER_FRAG, \
 560		I40E_RX_PTYPE_TUNNEL_##T, \
 561		I40E_RX_PTYPE_TUNNEL_END_##TE, \
 562		I40E_RX_PTYPE_##TEF, \
 563		I40E_RX_PTYPE_INNER_PROT_##I, \
 564		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
 565
 566#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
 567		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 568
 569/* shorter macros makes the table fit but are terse */
 570#define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
 571#define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
 572#define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
 573
 574/* Lookup table mapping the HW PTYPE to the bit field for decoding */
 575struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
 576	/* L2 Packet types */
 577	I40E_PTT_UNUSED_ENTRY(0),
 578	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 579	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
 580	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 581	I40E_PTT_UNUSED_ENTRY(4),
 582	I40E_PTT_UNUSED_ENTRY(5),
 583	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 584	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 585	I40E_PTT_UNUSED_ENTRY(8),
 586	I40E_PTT_UNUSED_ENTRY(9),
 587	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 588	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
 589	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 590	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 591	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 592	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 593	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 594	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 595	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 596	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 597	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 598	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 599
 600	/* Non Tunneled IPv4 */
 601	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
 602	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
 603	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
 604	I40E_PTT_UNUSED_ENTRY(25),
 605	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
 606	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
 607	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
 608
 609	/* IPv4 --> IPv4 */
 610	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
 611	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
 612	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
 613	I40E_PTT_UNUSED_ENTRY(32),
 614	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
 615	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
 616	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
 617
 618	/* IPv4 --> IPv6 */
 619	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
 620	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
 621	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
 622	I40E_PTT_UNUSED_ENTRY(39),
 623	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
 624	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
 625	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
 626
 627	/* IPv4 --> GRE/NAT */
 628	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
 629
 630	/* IPv4 --> GRE/NAT --> IPv4 */
 631	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
 632	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
 633	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
 634	I40E_PTT_UNUSED_ENTRY(47),
 635	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
 636	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
 637	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
 638
 639	/* IPv4 --> GRE/NAT --> IPv6 */
 640	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
 641	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
 642	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
 643	I40E_PTT_UNUSED_ENTRY(54),
 644	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
 645	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
 646	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
 647
 648	/* IPv4 --> GRE/NAT --> MAC */
 649	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
 650
 651	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
 652	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
 653	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
 654	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
 655	I40E_PTT_UNUSED_ENTRY(62),
 656	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
 657	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
 658	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
 659
 660	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
 661	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
 662	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
 663	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
 664	I40E_PTT_UNUSED_ENTRY(69),
 665	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
 666	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
 667	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
 668
 669	/* IPv4 --> GRE/NAT --> MAC/VLAN */
 670	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
 671
 672	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
 673	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
 674	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
 675	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
 676	I40E_PTT_UNUSED_ENTRY(77),
 677	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
 678	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
 679	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
 680
 681	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
 682	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
 683	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
 684	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
 685	I40E_PTT_UNUSED_ENTRY(84),
 686	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
 687	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
 688	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
 689
 690	/* Non Tunneled IPv6 */
 691	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
 692	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
 693	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
 694	I40E_PTT_UNUSED_ENTRY(91),
 695	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
 696	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
 697	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
 698
 699	/* IPv6 --> IPv4 */
 700	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
 701	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
 702	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
 703	I40E_PTT_UNUSED_ENTRY(98),
 704	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
 705	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
 706	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
 707
 708	/* IPv6 --> IPv6 */
 709	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
 710	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
 711	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
 712	I40E_PTT_UNUSED_ENTRY(105),
 713	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
 714	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
 715	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
 716
 717	/* IPv6 --> GRE/NAT */
 718	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
 719
 720	/* IPv6 --> GRE/NAT -> IPv4 */
 721	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
 722	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
 723	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
 724	I40E_PTT_UNUSED_ENTRY(113),
 725	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
 726	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
 727	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
 728
 729	/* IPv6 --> GRE/NAT -> IPv6 */
 730	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
 731	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
 732	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
 733	I40E_PTT_UNUSED_ENTRY(120),
 734	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
 735	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
 736	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
 737
 738	/* IPv6 --> GRE/NAT -> MAC */
 739	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
 740
 741	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
 742	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
 743	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
 744	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
 745	I40E_PTT_UNUSED_ENTRY(128),
 746	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
 747	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
 748	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
 749
 750	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
 751	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
 752	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
 753	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
 754	I40E_PTT_UNUSED_ENTRY(135),
 755	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
 756	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
 757	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
 758
 759	/* IPv6 --> GRE/NAT -> MAC/VLAN */
 760	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
 761
 762	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
 763	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
 764	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
 765	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
 766	I40E_PTT_UNUSED_ENTRY(143),
 767	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
 768	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
 769	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
 770
 771	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
 772	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
 773	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
 774	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
 775	I40E_PTT_UNUSED_ENTRY(150),
 776	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
 777	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
 778	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
 779
 780	/* unused entries */
 781	I40E_PTT_UNUSED_ENTRY(154),
 782	I40E_PTT_UNUSED_ENTRY(155),
 783	I40E_PTT_UNUSED_ENTRY(156),
 784	I40E_PTT_UNUSED_ENTRY(157),
 785	I40E_PTT_UNUSED_ENTRY(158),
 786	I40E_PTT_UNUSED_ENTRY(159),
 787
 788	I40E_PTT_UNUSED_ENTRY(160),
 789	I40E_PTT_UNUSED_ENTRY(161),
 790	I40E_PTT_UNUSED_ENTRY(162),
 791	I40E_PTT_UNUSED_ENTRY(163),
 792	I40E_PTT_UNUSED_ENTRY(164),
 793	I40E_PTT_UNUSED_ENTRY(165),
 794	I40E_PTT_UNUSED_ENTRY(166),
 795	I40E_PTT_UNUSED_ENTRY(167),
 796	I40E_PTT_UNUSED_ENTRY(168),
 797	I40E_PTT_UNUSED_ENTRY(169),
 798
 799	I40E_PTT_UNUSED_ENTRY(170),
 800	I40E_PTT_UNUSED_ENTRY(171),
 801	I40E_PTT_UNUSED_ENTRY(172),
 802	I40E_PTT_UNUSED_ENTRY(173),
 803	I40E_PTT_UNUSED_ENTRY(174),
 804	I40E_PTT_UNUSED_ENTRY(175),
 805	I40E_PTT_UNUSED_ENTRY(176),
 806	I40E_PTT_UNUSED_ENTRY(177),
 807	I40E_PTT_UNUSED_ENTRY(178),
 808	I40E_PTT_UNUSED_ENTRY(179),
 809
 810	I40E_PTT_UNUSED_ENTRY(180),
 811	I40E_PTT_UNUSED_ENTRY(181),
 812	I40E_PTT_UNUSED_ENTRY(182),
 813	I40E_PTT_UNUSED_ENTRY(183),
 814	I40E_PTT_UNUSED_ENTRY(184),
 815	I40E_PTT_UNUSED_ENTRY(185),
 816	I40E_PTT_UNUSED_ENTRY(186),
 817	I40E_PTT_UNUSED_ENTRY(187),
 818	I40E_PTT_UNUSED_ENTRY(188),
 819	I40E_PTT_UNUSED_ENTRY(189),
 820
 821	I40E_PTT_UNUSED_ENTRY(190),
 822	I40E_PTT_UNUSED_ENTRY(191),
 823	I40E_PTT_UNUSED_ENTRY(192),
 824	I40E_PTT_UNUSED_ENTRY(193),
 825	I40E_PTT_UNUSED_ENTRY(194),
 826	I40E_PTT_UNUSED_ENTRY(195),
 827	I40E_PTT_UNUSED_ENTRY(196),
 828	I40E_PTT_UNUSED_ENTRY(197),
 829	I40E_PTT_UNUSED_ENTRY(198),
 830	I40E_PTT_UNUSED_ENTRY(199),
 831
 832	I40E_PTT_UNUSED_ENTRY(200),
 833	I40E_PTT_UNUSED_ENTRY(201),
 834	I40E_PTT_UNUSED_ENTRY(202),
 835	I40E_PTT_UNUSED_ENTRY(203),
 836	I40E_PTT_UNUSED_ENTRY(204),
 837	I40E_PTT_UNUSED_ENTRY(205),
 838	I40E_PTT_UNUSED_ENTRY(206),
 839	I40E_PTT_UNUSED_ENTRY(207),
 840	I40E_PTT_UNUSED_ENTRY(208),
 841	I40E_PTT_UNUSED_ENTRY(209),
 842
 843	I40E_PTT_UNUSED_ENTRY(210),
 844	I40E_PTT_UNUSED_ENTRY(211),
 845	I40E_PTT_UNUSED_ENTRY(212),
 846	I40E_PTT_UNUSED_ENTRY(213),
 847	I40E_PTT_UNUSED_ENTRY(214),
 848	I40E_PTT_UNUSED_ENTRY(215),
 849	I40E_PTT_UNUSED_ENTRY(216),
 850	I40E_PTT_UNUSED_ENTRY(217),
 851	I40E_PTT_UNUSED_ENTRY(218),
 852	I40E_PTT_UNUSED_ENTRY(219),
 853
 854	I40E_PTT_UNUSED_ENTRY(220),
 855	I40E_PTT_UNUSED_ENTRY(221),
 856	I40E_PTT_UNUSED_ENTRY(222),
 857	I40E_PTT_UNUSED_ENTRY(223),
 858	I40E_PTT_UNUSED_ENTRY(224),
 859	I40E_PTT_UNUSED_ENTRY(225),
 860	I40E_PTT_UNUSED_ENTRY(226),
 861	I40E_PTT_UNUSED_ENTRY(227),
 862	I40E_PTT_UNUSED_ENTRY(228),
 863	I40E_PTT_UNUSED_ENTRY(229),
 864
 865	I40E_PTT_UNUSED_ENTRY(230),
 866	I40E_PTT_UNUSED_ENTRY(231),
 867	I40E_PTT_UNUSED_ENTRY(232),
 868	I40E_PTT_UNUSED_ENTRY(233),
 869	I40E_PTT_UNUSED_ENTRY(234),
 870	I40E_PTT_UNUSED_ENTRY(235),
 871	I40E_PTT_UNUSED_ENTRY(236),
 872	I40E_PTT_UNUSED_ENTRY(237),
 873	I40E_PTT_UNUSED_ENTRY(238),
 874	I40E_PTT_UNUSED_ENTRY(239),
 875
 876	I40E_PTT_UNUSED_ENTRY(240),
 877	I40E_PTT_UNUSED_ENTRY(241),
 878	I40E_PTT_UNUSED_ENTRY(242),
 879	I40E_PTT_UNUSED_ENTRY(243),
 880	I40E_PTT_UNUSED_ENTRY(244),
 881	I40E_PTT_UNUSED_ENTRY(245),
 882	I40E_PTT_UNUSED_ENTRY(246),
 883	I40E_PTT_UNUSED_ENTRY(247),
 884	I40E_PTT_UNUSED_ENTRY(248),
 885	I40E_PTT_UNUSED_ENTRY(249),
 886
 887	I40E_PTT_UNUSED_ENTRY(250),
 888	I40E_PTT_UNUSED_ENTRY(251),
 889	I40E_PTT_UNUSED_ENTRY(252),
 890	I40E_PTT_UNUSED_ENTRY(253),
 891	I40E_PTT_UNUSED_ENTRY(254),
 892	I40E_PTT_UNUSED_ENTRY(255)
 893};
 894
 895/**
 896 * i40e_init_shared_code - Initialize the shared code
 897 * @hw: pointer to hardware structure
 898 *
 899 * This assigns the MAC type and PHY code and inits the NVM.
 900 * Does not touch the hardware. This function must be called prior to any
 901 * other function in the shared code. The i40e_hw structure should be
 902 * memset to 0 prior to calling this function.  The following fields in
 903 * hw structure should be filled in prior to calling this function:
 904 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
 905 * subsystem_vendor_id, and revision_id
 906 **/
 907i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 908{
 909	i40e_status status = 0;
 910	u32 port, ari, func_rid;
 911
 912	i40e_set_mac_type(hw);
 913
 914	switch (hw->mac.type) {
 915	case I40E_MAC_XL710:
 916	case I40E_MAC_X722:
 917		break;
 918	default:
 919		return I40E_ERR_DEVICE_NOT_SUPPORTED;
 920	}
 921
 922	hw->phy.get_link_info = true;
 923
 924	/* Determine port number and PF number*/
 925	port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
 926					   >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
 927	hw->port = (u8)port;
 928	ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
 929						 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
 930	func_rid = rd32(hw, I40E_PF_FUNC_RID);
 931	if (ari)
 932		hw->pf_id = (u8)(func_rid & 0xff);
 933	else
 934		hw->pf_id = (u8)(func_rid & 0x7);
 935
 936	if (hw->mac.type == I40E_MAC_X722)
 937		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
 938			     I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
 939
 940	status = i40e_init_nvm(hw);
 941	return status;
 942}
 943
 944/**
 945 * i40e_aq_mac_address_read - Retrieve the MAC addresses
 946 * @hw: pointer to the hw struct
 947 * @flags: a return indicator of what addresses were added to the addr store
 948 * @addrs: the requestor's mac addr store
 949 * @cmd_details: pointer to command details structure or NULL
 950 **/
 951static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
 952				   u16 *flags,
 953				   struct i40e_aqc_mac_address_read_data *addrs,
 954				   struct i40e_asq_cmd_details *cmd_details)
 955{
 956	struct i40e_aq_desc desc;
 957	struct i40e_aqc_mac_address_read *cmd_data =
 958		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
 959	i40e_status status;
 960
 961	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
 962	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
 963
 964	status = i40e_asq_send_command(hw, &desc, addrs,
 965				       sizeof(*addrs), cmd_details);
 966	*flags = le16_to_cpu(cmd_data->command_flags);
 967
 968	return status;
 969}
 970
 971/**
 972 * i40e_aq_mac_address_write - Change the MAC addresses
 973 * @hw: pointer to the hw struct
 974 * @flags: indicates which MAC to be written
 975 * @mac_addr: address to write
 976 * @cmd_details: pointer to command details structure or NULL
 977 **/
 978i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
 979				    u16 flags, u8 *mac_addr,
 980				    struct i40e_asq_cmd_details *cmd_details)
 981{
 982	struct i40e_aq_desc desc;
 983	struct i40e_aqc_mac_address_write *cmd_data =
 984		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
 985	i40e_status status;
 986
 987	i40e_fill_default_direct_cmd_desc(&desc,
 988					  i40e_aqc_opc_mac_address_write);
 989	cmd_data->command_flags = cpu_to_le16(flags);
 990	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
 991	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
 992					((u32)mac_addr[3] << 16) |
 993					((u32)mac_addr[4] << 8) |
 994					mac_addr[5]);
 995
 996	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 997
 998	return status;
 999}
1000
1001/**
1002 * i40e_get_mac_addr - get MAC address
1003 * @hw: pointer to the HW structure
1004 * @mac_addr: pointer to MAC address
1005 *
1006 * Reads the adapter's MAC address from register
1007 **/
1008i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1009{
1010	struct i40e_aqc_mac_address_read_data addrs;
1011	i40e_status status;
1012	u16 flags = 0;
1013
1014	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1015
1016	if (flags & I40E_AQC_LAN_ADDR_VALID)
1017		ether_addr_copy(mac_addr, addrs.pf_lan_mac);
1018
1019	return status;
1020}
1021
1022/**
1023 * i40e_get_port_mac_addr - get Port MAC address
1024 * @hw: pointer to the HW structure
1025 * @mac_addr: pointer to Port MAC address
1026 *
1027 * Reads the adapter's Port MAC address
1028 **/
1029i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1030{
1031	struct i40e_aqc_mac_address_read_data addrs;
1032	i40e_status status;
1033	u16 flags = 0;
1034
1035	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1036	if (status)
1037		return status;
1038
1039	if (flags & I40E_AQC_PORT_ADDR_VALID)
1040		ether_addr_copy(mac_addr, addrs.port_mac);
1041	else
1042		status = I40E_ERR_INVALID_MAC_ADDR;
1043
1044	return status;
1045}
1046
1047/**
1048 * i40e_pre_tx_queue_cfg - pre tx queue configure
1049 * @hw: pointer to the HW structure
1050 * @queue: target PF queue index
1051 * @enable: state change request
1052 *
1053 * Handles hw requirement to indicate intention to enable
1054 * or disable target queue.
1055 **/
1056void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
1057{
1058	u32 abs_queue_idx = hw->func_caps.base_queue + queue;
1059	u32 reg_block = 0;
1060	u32 reg_val;
1061
1062	if (abs_queue_idx >= 128) {
1063		reg_block = abs_queue_idx / 128;
1064		abs_queue_idx %= 128;
1065	}
1066
1067	reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1068	reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1069	reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1070
1071	if (enable)
1072		reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
1073	else
1074		reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1075
1076	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
1077}
1078
1079/**
1080 *  i40e_read_pba_string - Reads part number string from EEPROM
1081 *  @hw: pointer to hardware structure
1082 *  @pba_num: stores the part number string from the EEPROM
1083 *  @pba_num_size: part number string buffer length
1084 *
1085 *  Reads the part number string from the EEPROM.
1086 **/
1087i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
1088				 u32 pba_num_size)
1089{
1090	i40e_status status = 0;
1091	u16 pba_word = 0;
1092	u16 pba_size = 0;
1093	u16 pba_ptr = 0;
1094	u16 i = 0;
1095
1096	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
1097	if (status || (pba_word != 0xFAFA)) {
1098		hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
1099		return status;
1100	}
1101
1102	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
1103	if (status) {
1104		hw_dbg(hw, "Failed to read PBA Block pointer.\n");
1105		return status;
1106	}
1107
1108	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
1109	if (status) {
1110		hw_dbg(hw, "Failed to read PBA Block size.\n");
1111		return status;
1112	}
1113
1114	/* Subtract one to get PBA word count (PBA Size word is included in
1115	 * total size)
1116	 */
1117	pba_size--;
1118	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1119		hw_dbg(hw, "Buffer to small for PBA data.\n");
1120		return I40E_ERR_PARAM;
1121	}
1122
1123	for (i = 0; i < pba_size; i++) {
1124		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1125		if (status) {
1126			hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1127			return status;
1128		}
1129
1130		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1131		pba_num[(i * 2) + 1] = pba_word & 0xFF;
1132	}
1133	pba_num[(pba_size * 2)] = '\0';
1134
1135	return status;
1136}
1137
1138/**
1139 * i40e_get_media_type - Gets media type
1140 * @hw: pointer to the hardware structure
1141 **/
1142static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1143{
1144	enum i40e_media_type media;
1145
1146	switch (hw->phy.link_info.phy_type) {
1147	case I40E_PHY_TYPE_10GBASE_SR:
1148	case I40E_PHY_TYPE_10GBASE_LR:
1149	case I40E_PHY_TYPE_1000BASE_SX:
1150	case I40E_PHY_TYPE_1000BASE_LX:
1151	case I40E_PHY_TYPE_40GBASE_SR4:
1152	case I40E_PHY_TYPE_40GBASE_LR4:
1153	case I40E_PHY_TYPE_25GBASE_LR:
1154	case I40E_PHY_TYPE_25GBASE_SR:
1155		media = I40E_MEDIA_TYPE_FIBER;
1156		break;
1157	case I40E_PHY_TYPE_100BASE_TX:
1158	case I40E_PHY_TYPE_1000BASE_T:
1159	case I40E_PHY_TYPE_2_5GBASE_T:
1160	case I40E_PHY_TYPE_5GBASE_T:
1161	case I40E_PHY_TYPE_10GBASE_T:
1162		media = I40E_MEDIA_TYPE_BASET;
1163		break;
1164	case I40E_PHY_TYPE_10GBASE_CR1_CU:
1165	case I40E_PHY_TYPE_40GBASE_CR4_CU:
1166	case I40E_PHY_TYPE_10GBASE_CR1:
1167	case I40E_PHY_TYPE_40GBASE_CR4:
1168	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1169	case I40E_PHY_TYPE_40GBASE_AOC:
1170	case I40E_PHY_TYPE_10GBASE_AOC:
1171	case I40E_PHY_TYPE_25GBASE_CR:
1172	case I40E_PHY_TYPE_25GBASE_AOC:
1173	case I40E_PHY_TYPE_25GBASE_ACC:
1174		media = I40E_MEDIA_TYPE_DA;
1175		break;
1176	case I40E_PHY_TYPE_1000BASE_KX:
1177	case I40E_PHY_TYPE_10GBASE_KX4:
1178	case I40E_PHY_TYPE_10GBASE_KR:
1179	case I40E_PHY_TYPE_40GBASE_KR4:
1180	case I40E_PHY_TYPE_20GBASE_KR2:
1181	case I40E_PHY_TYPE_25GBASE_KR:
1182		media = I40E_MEDIA_TYPE_BACKPLANE;
1183		break;
1184	case I40E_PHY_TYPE_SGMII:
1185	case I40E_PHY_TYPE_XAUI:
1186	case I40E_PHY_TYPE_XFI:
1187	case I40E_PHY_TYPE_XLAUI:
1188	case I40E_PHY_TYPE_XLPPI:
1189	default:
1190		media = I40E_MEDIA_TYPE_UNKNOWN;
1191		break;
1192	}
1193
1194	return media;
1195}
1196
1197/**
1198 * i40e_poll_globr - Poll for Global Reset completion
1199 * @hw: pointer to the hardware structure
1200 * @retry_limit: how many times to retry before failure
1201 **/
1202static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1203				   u32 retry_limit)
1204{
1205	u32 cnt, reg = 0;
1206
1207	for (cnt = 0; cnt < retry_limit; cnt++) {
1208		reg = rd32(hw, I40E_GLGEN_RSTAT);
1209		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1210			return 0;
1211		msleep(100);
1212	}
1213
1214	hw_dbg(hw, "Global reset failed.\n");
1215	hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1216
1217	return I40E_ERR_RESET_FAILED;
1218}
1219
1220#define I40E_PF_RESET_WAIT_COUNT_A0	200
1221#define I40E_PF_RESET_WAIT_COUNT	200
1222/**
1223 * i40e_pf_reset - Reset the PF
1224 * @hw: pointer to the hardware structure
1225 *
1226 * Assuming someone else has triggered a global reset,
1227 * assure the global reset is complete and then reset the PF
1228 **/
1229i40e_status i40e_pf_reset(struct i40e_hw *hw)
1230{
1231	u32 cnt = 0;
1232	u32 cnt1 = 0;
1233	u32 reg = 0;
1234	u32 grst_del;
1235
1236	/* Poll for Global Reset steady state in case of recent GRST.
1237	 * The grst delay value is in 100ms units, and we'll wait a
1238	 * couple counts longer to be sure we don't just miss the end.
1239	 */
1240	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1241		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1242		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1243
1244	/* It can take upto 15 secs for GRST steady state.
1245	 * Bump it to 16 secs max to be safe.
1246	 */
1247	grst_del = grst_del * 20;
1248
1249	for (cnt = 0; cnt < grst_del; cnt++) {
1250		reg = rd32(hw, I40E_GLGEN_RSTAT);
1251		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1252			break;
1253		msleep(100);
1254	}
1255	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1256		hw_dbg(hw, "Global reset polling failed to complete.\n");
1257		return I40E_ERR_RESET_FAILED;
1258	}
1259
1260	/* Now Wait for the FW to be ready */
1261	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1262		reg = rd32(hw, I40E_GLNVM_ULD);
1263		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1264			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1265		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1266			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1267			hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1268			break;
1269		}
1270		usleep_range(10000, 20000);
1271	}
1272	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1273		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1274		hw_dbg(hw, "wait for FW Reset complete timedout\n");
1275		hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1276		return I40E_ERR_RESET_FAILED;
1277	}
1278
1279	/* If there was a Global Reset in progress when we got here,
1280	 * we don't need to do the PF Reset
1281	 */
1282	if (!cnt) {
1283		u32 reg2 = 0;
1284		if (hw->revision_id == 0)
1285			cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1286		else
1287			cnt = I40E_PF_RESET_WAIT_COUNT;
1288		reg = rd32(hw, I40E_PFGEN_CTRL);
1289		wr32(hw, I40E_PFGEN_CTRL,
1290		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1291		for (; cnt; cnt--) {
1292			reg = rd32(hw, I40E_PFGEN_CTRL);
1293			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1294				break;
1295			reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1296			if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1297				break;
1298			usleep_range(1000, 2000);
1299		}
1300		if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1301			if (i40e_poll_globr(hw, grst_del))
1302				return I40E_ERR_RESET_FAILED;
1303		} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1304			hw_dbg(hw, "PF reset polling failed to complete.\n");
1305			return I40E_ERR_RESET_FAILED;
1306		}
1307	}
1308
1309	i40e_clear_pxe_mode(hw);
1310
1311	return 0;
1312}
1313
1314/**
1315 * i40e_clear_hw - clear out any left over hw state
1316 * @hw: pointer to the hw struct
1317 *
1318 * Clear queues and interrupts, typically called at init time,
1319 * but after the capabilities have been found so we know how many
1320 * queues and msix vectors have been allocated.
1321 **/
1322void i40e_clear_hw(struct i40e_hw *hw)
1323{
1324	u32 num_queues, base_queue;
1325	u32 num_pf_int;
1326	u32 num_vf_int;
1327	u32 num_vfs;
1328	u32 i, j;
1329	u32 val;
1330	u32 eol = 0x7ff;
1331
1332	/* get number of interrupts, queues, and VFs */
1333	val = rd32(hw, I40E_GLPCI_CNF2);
1334	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1335		     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1336	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1337		     I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1338
1339	val = rd32(hw, I40E_PFLAN_QALLOC);
1340	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1341		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1342	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1343	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1344	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1345		num_queues = (j - base_queue) + 1;
1346	else
1347		num_queues = 0;
1348
1349	val = rd32(hw, I40E_PF_VT_PFALLOC);
1350	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1351	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1352	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1353	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1354	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1355		num_vfs = (j - i) + 1;
1356	else
1357		num_vfs = 0;
1358
1359	/* stop all the interrupts */
1360	wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1361	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1362	for (i = 0; i < num_pf_int - 2; i++)
1363		wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1364
1365	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1366	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1367	wr32(hw, I40E_PFINT_LNKLST0, val);
1368	for (i = 0; i < num_pf_int - 2; i++)
1369		wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1370	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1371	for (i = 0; i < num_vfs; i++)
1372		wr32(hw, I40E_VPINT_LNKLST0(i), val);
1373	for (i = 0; i < num_vf_int - 2; i++)
1374		wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1375
1376	/* warn the HW of the coming Tx disables */
1377	for (i = 0; i < num_queues; i++) {
1378		u32 abs_queue_idx = base_queue + i;
1379		u32 reg_block = 0;
1380
1381		if (abs_queue_idx >= 128) {
1382			reg_block = abs_queue_idx / 128;
1383			abs_queue_idx %= 128;
1384		}
1385
1386		val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1387		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1388		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1389		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1390
1391		wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1392	}
1393	udelay(400);
1394
1395	/* stop all the queues */
1396	for (i = 0; i < num_queues; i++) {
1397		wr32(hw, I40E_QINT_TQCTL(i), 0);
1398		wr32(hw, I40E_QTX_ENA(i), 0);
1399		wr32(hw, I40E_QINT_RQCTL(i), 0);
1400		wr32(hw, I40E_QRX_ENA(i), 0);
1401	}
1402
1403	/* short wait for all queue disables to settle */
1404	udelay(50);
1405}
1406
1407/**
1408 * i40e_clear_pxe_mode - clear pxe operations mode
1409 * @hw: pointer to the hw struct
1410 *
1411 * Make sure all PXE mode settings are cleared, including things
1412 * like descriptor fetch/write-back mode.
1413 **/
1414void i40e_clear_pxe_mode(struct i40e_hw *hw)
1415{
1416	u32 reg;
1417
1418	if (i40e_check_asq_alive(hw))
1419		i40e_aq_clear_pxe_mode(hw, NULL);
1420
1421	/* Clear single descriptor fetch/write-back mode */
1422	reg = rd32(hw, I40E_GLLAN_RCTL_0);
1423
1424	if (hw->revision_id == 0) {
1425		/* As a work around clear PXE_MODE instead of setting it */
1426		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1427	} else {
1428		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1429	}
1430}
1431
1432/**
1433 * i40e_led_is_mine - helper to find matching led
1434 * @hw: pointer to the hw struct
1435 * @idx: index into GPIO registers
1436 *
1437 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1438 */
1439static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1440{
1441	u32 gpio_val = 0;
1442	u32 port;
1443
1444	if (!hw->func_caps.led[idx])
 
1445		return 0;
1446
1447	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1448	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1449		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1450
1451	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1452	 * if it is not our port then ignore
1453	 */
1454	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1455	    (port != hw->port))
1456		return 0;
1457
1458	return gpio_val;
1459}
1460
1461#define I40E_COMBINED_ACTIVITY 0xA
1462#define I40E_FILTER_ACTIVITY 0xE
1463#define I40E_LINK_ACTIVITY 0xC
1464#define I40E_MAC_ACTIVITY 0xD
1465#define I40E_LED0 22
1466
 
 
 
1467/**
1468 * i40e_led_get - return current on/off mode
1469 * @hw: pointer to the hw struct
1470 *
1471 * The value returned is the 'mode' field as defined in the
1472 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1473 * values are variations of possible behaviors relating to
1474 * blink, link, and wire.
1475 **/
1476u32 i40e_led_get(struct i40e_hw *hw)
1477{
1478	u32 mode = 0;
1479	int i;
1480
1481	/* as per the documentation GPIO 22-29 are the LED
1482	 * GPIO pins named LED0..LED7
1483	 */
1484	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1485		u32 gpio_val = i40e_led_is_mine(hw, i);
1486
1487		if (!gpio_val)
1488			continue;
1489
1490		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1491			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1492		break;
1493	}
1494
1495	return mode;
1496}
1497
1498/**
1499 * i40e_led_set - set new on/off mode
1500 * @hw: pointer to the hw struct
1501 * @mode: 0=off, 0xf=on (else see manual for mode details)
1502 * @blink: true if the LED should blink when on, false if steady
1503 *
1504 * if this function is used to turn on the blink it should
1505 * be used to disable the blink when restoring the original state.
1506 **/
1507void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1508{
1509	int i;
1510
1511	if (mode & 0xfffffff0)
1512		hw_dbg(hw, "invalid mode passed in %X\n", mode);
 
 
1513
1514	/* as per the documentation GPIO 22-29 are the LED
1515	 * GPIO pins named LED0..LED7
1516	 */
1517	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1518		u32 gpio_val = i40e_led_is_mine(hw, i);
1519
1520		if (!gpio_val)
1521			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1522		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1523		/* this & is a bit of paranoia, but serves as a range check */
1524		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1525			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1526
1527		if (blink)
1528			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1529		else
1530			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1531
1532		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1533		break;
1534	}
1535}
1536
1537/* Admin command wrappers */
1538
1539/**
1540 * i40e_aq_get_phy_capabilities
1541 * @hw: pointer to the hw struct
1542 * @abilities: structure for PHY capabilities to be filled
1543 * @qualified_modules: report Qualified Modules
1544 * @report_init: report init capabilities (active are default)
1545 * @cmd_details: pointer to command details structure or NULL
1546 *
1547 * Returns the various PHY abilities supported on the Port.
1548 **/
1549i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1550			bool qualified_modules, bool report_init,
1551			struct i40e_aq_get_phy_abilities_resp *abilities,
1552			struct i40e_asq_cmd_details *cmd_details)
1553{
1554	struct i40e_aq_desc desc;
1555	i40e_status status;
1556	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1557	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1558
1559	if (!abilities)
1560		return I40E_ERR_PARAM;
1561
1562	do {
1563		i40e_fill_default_direct_cmd_desc(&desc,
1564					       i40e_aqc_opc_get_phy_abilities);
1565
1566		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1567		if (abilities_size > I40E_AQ_LARGE_BUF)
1568			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1569
1570		if (qualified_modules)
1571			desc.params.external.param0 |=
1572			cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1573
1574		if (report_init)
1575			desc.params.external.param0 |=
1576			cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1577
1578		status = i40e_asq_send_command(hw, &desc, abilities,
1579					       abilities_size, cmd_details);
1580
1581		switch (hw->aq.asq_last_status) {
1582		case I40E_AQ_RC_EIO:
1583			status = I40E_ERR_UNKNOWN_PHY;
1584			break;
1585		case I40E_AQ_RC_EAGAIN:
1586			usleep_range(1000, 2000);
1587			total_delay++;
1588			status = I40E_ERR_TIMEOUT;
1589			break;
1590		/* also covers I40E_AQ_RC_OK */
1591		default:
1592			break;
1593		}
1594
1595	} while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1596		(total_delay < max_delay));
1597
1598	if (status)
1599		return status;
1600
1601	if (report_init) {
1602		if (hw->mac.type ==  I40E_MAC_XL710 &&
1603		    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1604		    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1605			status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1606		} else {
1607			hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1608			hw->phy.phy_types |=
1609					((u64)abilities->phy_type_ext << 32);
1610		}
1611	}
1612
1613	return status;
1614}
1615
1616/**
1617 * i40e_aq_set_phy_config
1618 * @hw: pointer to the hw struct
1619 * @config: structure with PHY configuration to be set
1620 * @cmd_details: pointer to command details structure or NULL
1621 *
1622 * Set the various PHY configuration parameters
1623 * supported on the Port.One or more of the Set PHY config parameters may be
1624 * ignored in an MFP mode as the PF may not have the privilege to set some
1625 * of the PHY Config parameters. This status will be indicated by the
1626 * command response.
1627 **/
1628enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1629				struct i40e_aq_set_phy_config *config,
1630				struct i40e_asq_cmd_details *cmd_details)
1631{
1632	struct i40e_aq_desc desc;
1633	struct i40e_aq_set_phy_config *cmd =
1634			(struct i40e_aq_set_phy_config *)&desc.params.raw;
1635	enum i40e_status_code status;
1636
1637	if (!config)
1638		return I40E_ERR_PARAM;
1639
1640	i40e_fill_default_direct_cmd_desc(&desc,
1641					  i40e_aqc_opc_set_phy_config);
1642
1643	*cmd = *config;
1644
1645	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1646
1647	return status;
1648}
1649
1650static noinline_for_stack enum i40e_status_code
1651i40e_set_fc_status(struct i40e_hw *hw,
1652		   struct i40e_aq_get_phy_abilities_resp *abilities,
1653		   bool atomic_restart)
1654{
1655	struct i40e_aq_set_phy_config config;
1656	enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1657	u8 pause_mask = 0x0;
1658
1659	switch (fc_mode) {
1660	case I40E_FC_FULL:
1661		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1662		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1663		break;
1664	case I40E_FC_RX_PAUSE:
1665		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1666		break;
1667	case I40E_FC_TX_PAUSE:
1668		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1669		break;
1670	default:
1671		break;
1672	}
1673
1674	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1675	/* clear the old pause settings */
1676	config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1677			   ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1678	/* set the new abilities */
1679	config.abilities |= pause_mask;
1680	/* If the abilities have changed, then set the new config */
1681	if (config.abilities == abilities->abilities)
1682		return 0;
1683
1684	/* Auto restart link so settings take effect */
1685	if (atomic_restart)
1686		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1687	/* Copy over all the old settings */
1688	config.phy_type = abilities->phy_type;
1689	config.phy_type_ext = abilities->phy_type_ext;
1690	config.link_speed = abilities->link_speed;
1691	config.eee_capability = abilities->eee_capability;
1692	config.eeer = abilities->eeer_val;
1693	config.low_power_ctrl = abilities->d3_lpan;
1694	config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1695			    I40E_AQ_PHY_FEC_CONFIG_MASK;
1696
1697	return i40e_aq_set_phy_config(hw, &config, NULL);
1698}
1699
1700/**
1701 * i40e_set_fc
1702 * @hw: pointer to the hw struct
1703 * @aq_failures: buffer to return AdminQ failure information
1704 * @atomic_restart: whether to enable atomic link restart
1705 *
1706 * Set the requested flow control mode using set_phy_config.
1707 **/
1708enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1709				  bool atomic_restart)
1710{
1711	struct i40e_aq_get_phy_abilities_resp abilities;
1712	enum i40e_status_code status;
1713
1714	*aq_failures = 0x0;
1715
1716	/* Get the current phy config */
1717	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1718					      NULL);
1719	if (status) {
1720		*aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1721		return status;
1722	}
1723
1724	status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1725	if (status)
1726		*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1727
1728	/* Update the link info */
1729	status = i40e_update_link_info(hw);
1730	if (status) {
1731		/* Wait a little bit (on 40G cards it sometimes takes a really
1732		 * long time for link to come back from the atomic reset)
1733		 * and try once more
1734		 */
1735		msleep(1000);
1736		status = i40e_update_link_info(hw);
1737	}
1738	if (status)
1739		*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1740
1741	return status;
1742}
1743
1744/**
1745 * i40e_aq_clear_pxe_mode
1746 * @hw: pointer to the hw struct
1747 * @cmd_details: pointer to command details structure or NULL
1748 *
1749 * Tell the firmware that the driver is taking over from PXE
1750 **/
1751i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1752				struct i40e_asq_cmd_details *cmd_details)
1753{
1754	i40e_status status;
1755	struct i40e_aq_desc desc;
1756	struct i40e_aqc_clear_pxe *cmd =
1757		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
1758
1759	i40e_fill_default_direct_cmd_desc(&desc,
1760					  i40e_aqc_opc_clear_pxe_mode);
1761
1762	cmd->rx_cnt = 0x2;
1763
1764	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1765
1766	wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1767
1768	return status;
1769}
1770
1771/**
1772 * i40e_aq_set_link_restart_an
1773 * @hw: pointer to the hw struct
1774 * @enable_link: if true: enable link, if false: disable link
1775 * @cmd_details: pointer to command details structure or NULL
1776 *
1777 * Sets up the link and restarts the Auto-Negotiation over the link.
1778 **/
1779i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1780					bool enable_link,
1781					struct i40e_asq_cmd_details *cmd_details)
1782{
1783	struct i40e_aq_desc desc;
1784	struct i40e_aqc_set_link_restart_an *cmd =
1785		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1786	i40e_status status;
1787
1788	i40e_fill_default_direct_cmd_desc(&desc,
1789					  i40e_aqc_opc_set_link_restart_an);
1790
1791	cmd->command = I40E_AQ_PHY_RESTART_AN;
1792	if (enable_link)
1793		cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1794	else
1795		cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1796
1797	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1798
1799	return status;
1800}
1801
1802/**
1803 * i40e_aq_get_link_info
1804 * @hw: pointer to the hw struct
1805 * @enable_lse: enable/disable LinkStatusEvent reporting
1806 * @link: pointer to link status structure - optional
1807 * @cmd_details: pointer to command details structure or NULL
1808 *
1809 * Returns the link status of the adapter.
1810 **/
1811i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1812				bool enable_lse, struct i40e_link_status *link,
1813				struct i40e_asq_cmd_details *cmd_details)
1814{
1815	struct i40e_aq_desc desc;
1816	struct i40e_aqc_get_link_status *resp =
1817		(struct i40e_aqc_get_link_status *)&desc.params.raw;
1818	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1819	i40e_status status;
1820	bool tx_pause, rx_pause;
1821	u16 command_flags;
1822
1823	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1824
1825	if (enable_lse)
1826		command_flags = I40E_AQ_LSE_ENABLE;
1827	else
1828		command_flags = I40E_AQ_LSE_DISABLE;
1829	resp->command_flags = cpu_to_le16(command_flags);
1830
1831	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1832
1833	if (status)
1834		goto aq_get_link_info_exit;
1835
1836	/* save off old link status information */
1837	hw->phy.link_info_old = *hw_link_info;
1838
1839	/* update link status */
1840	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1841	hw->phy.media_type = i40e_get_media_type(hw);
1842	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1843	hw_link_info->link_info = resp->link_info;
1844	hw_link_info->an_info = resp->an_info;
1845	hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1846						 I40E_AQ_CONFIG_FEC_RS_ENA);
1847	hw_link_info->ext_info = resp->ext_info;
1848	hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1849	hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1850	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1851
1852	/* update fc info */
1853	tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1854	rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1855	if (tx_pause & rx_pause)
1856		hw->fc.current_mode = I40E_FC_FULL;
1857	else if (tx_pause)
1858		hw->fc.current_mode = I40E_FC_TX_PAUSE;
1859	else if (rx_pause)
1860		hw->fc.current_mode = I40E_FC_RX_PAUSE;
1861	else
1862		hw->fc.current_mode = I40E_FC_NONE;
1863
1864	if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1865		hw_link_info->crc_enable = true;
1866	else
1867		hw_link_info->crc_enable = false;
1868
1869	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1870		hw_link_info->lse_enable = true;
1871	else
1872		hw_link_info->lse_enable = false;
1873
1874	if ((hw->mac.type == I40E_MAC_XL710) &&
1875	    (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1876	     hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1877		hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1878
1879	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1880	    hw->mac.type != I40E_MAC_X722) {
1881		__le32 tmp;
1882
1883		memcpy(&tmp, resp->link_type, sizeof(tmp));
1884		hw->phy.phy_types = le32_to_cpu(tmp);
1885		hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1886	}
1887
1888	/* save link status information */
1889	if (link)
1890		*link = *hw_link_info;
1891
1892	/* flag cleared so helper functions don't call AQ again */
1893	hw->phy.get_link_info = false;
1894
1895aq_get_link_info_exit:
1896	return status;
1897}
1898
1899/**
1900 * i40e_aq_set_phy_int_mask
1901 * @hw: pointer to the hw struct
1902 * @mask: interrupt mask to be set
1903 * @cmd_details: pointer to command details structure or NULL
1904 *
1905 * Set link interrupt mask.
1906 **/
1907i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1908				     u16 mask,
1909				     struct i40e_asq_cmd_details *cmd_details)
1910{
1911	struct i40e_aq_desc desc;
1912	struct i40e_aqc_set_phy_int_mask *cmd =
1913		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1914	i40e_status status;
1915
1916	i40e_fill_default_direct_cmd_desc(&desc,
1917					  i40e_aqc_opc_set_phy_int_mask);
1918
1919	cmd->event_mask = cpu_to_le16(mask);
1920
1921	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1922
1923	return status;
1924}
1925
1926/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1927 * i40e_aq_set_phy_debug
1928 * @hw: pointer to the hw struct
1929 * @cmd_flags: debug command flags
1930 * @cmd_details: pointer to command details structure or NULL
1931 *
1932 * Reset the external PHY.
1933 **/
1934i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1935				  struct i40e_asq_cmd_details *cmd_details)
1936{
1937	struct i40e_aq_desc desc;
1938	struct i40e_aqc_set_phy_debug *cmd =
1939		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1940	i40e_status status;
1941
1942	i40e_fill_default_direct_cmd_desc(&desc,
1943					  i40e_aqc_opc_set_phy_debug);
1944
1945	cmd->command_flags = cmd_flags;
1946
1947	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1948
1949	return status;
1950}
1951
1952/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1953 * i40e_aq_add_vsi
1954 * @hw: pointer to the hw struct
1955 * @vsi_ctx: pointer to a vsi context struct
1956 * @cmd_details: pointer to command details structure or NULL
1957 *
1958 * Add a VSI context to the hardware.
1959**/
1960i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1961				struct i40e_vsi_context *vsi_ctx,
1962				struct i40e_asq_cmd_details *cmd_details)
1963{
1964	struct i40e_aq_desc desc;
1965	struct i40e_aqc_add_get_update_vsi *cmd =
1966		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1967	struct i40e_aqc_add_get_update_vsi_completion *resp =
1968		(struct i40e_aqc_add_get_update_vsi_completion *)
1969		&desc.params.raw;
1970	i40e_status status;
1971
1972	i40e_fill_default_direct_cmd_desc(&desc,
1973					  i40e_aqc_opc_add_vsi);
1974
1975	cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1976	cmd->connection_type = vsi_ctx->connection_type;
1977	cmd->vf_id = vsi_ctx->vf_num;
1978	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1979
1980	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1981
1982	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1983				    sizeof(vsi_ctx->info), cmd_details);
 
1984
1985	if (status)
1986		goto aq_add_vsi_exit;
1987
1988	vsi_ctx->seid = le16_to_cpu(resp->seid);
1989	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1990	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1991	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1992
1993aq_add_vsi_exit:
1994	return status;
1995}
1996
1997/**
1998 * i40e_aq_set_default_vsi
1999 * @hw: pointer to the hw struct
2000 * @seid: vsi number
2001 * @cmd_details: pointer to command details structure or NULL
2002 **/
2003i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
2004				    u16 seid,
2005				    struct i40e_asq_cmd_details *cmd_details)
2006{
2007	struct i40e_aq_desc desc;
2008	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2009		(struct i40e_aqc_set_vsi_promiscuous_modes *)
2010		&desc.params.raw;
2011	i40e_status status;
2012
2013	i40e_fill_default_direct_cmd_desc(&desc,
2014					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2015
2016	cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2017	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2018	cmd->seid = cpu_to_le16(seid);
2019
2020	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2021
2022	return status;
2023}
2024
2025/**
2026 * i40e_aq_clear_default_vsi
2027 * @hw: pointer to the hw struct
2028 * @seid: vsi number
2029 * @cmd_details: pointer to command details structure or NULL
2030 **/
2031i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2032				      u16 seid,
2033				      struct i40e_asq_cmd_details *cmd_details)
2034{
2035	struct i40e_aq_desc desc;
2036	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2037		(struct i40e_aqc_set_vsi_promiscuous_modes *)
2038		&desc.params.raw;
2039	i40e_status status;
2040
2041	i40e_fill_default_direct_cmd_desc(&desc,
2042					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2043
2044	cmd->promiscuous_flags = cpu_to_le16(0);
2045	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2046	cmd->seid = cpu_to_le16(seid);
2047
2048	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2049
2050	return status;
2051}
2052
2053/**
2054 * i40e_aq_set_vsi_unicast_promiscuous
2055 * @hw: pointer to the hw struct
2056 * @seid: vsi number
2057 * @set: set unicast promiscuous enable/disable
2058 * @cmd_details: pointer to command details structure or NULL
2059 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2060 **/
2061i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2062				u16 seid, bool set,
2063				struct i40e_asq_cmd_details *cmd_details,
2064				bool rx_only_promisc)
2065{
2066	struct i40e_aq_desc desc;
2067	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2068		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2069	i40e_status status;
2070	u16 flags = 0;
2071
2072	i40e_fill_default_direct_cmd_desc(&desc,
2073					i40e_aqc_opc_set_vsi_promiscuous_modes);
2074
2075	if (set) {
2076		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2077		if (rx_only_promisc &&
2078		    (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
2079		     (hw->aq.api_maj_ver > 1)))
2080			flags |= I40E_AQC_SET_VSI_PROMISC_TX;
2081	}
2082
2083	cmd->promiscuous_flags = cpu_to_le16(flags);
2084
2085	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2086	if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
2087	    (hw->aq.api_maj_ver > 1))
2088		cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
2089
2090	cmd->seid = cpu_to_le16(seid);
2091	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2092
2093	return status;
2094}
2095
2096/**
2097 * i40e_aq_set_vsi_multicast_promiscuous
2098 * @hw: pointer to the hw struct
2099 * @seid: vsi number
2100 * @set: set multicast promiscuous enable/disable
2101 * @cmd_details: pointer to command details structure or NULL
2102 **/
2103i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2104				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2105{
2106	struct i40e_aq_desc desc;
2107	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2108		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2109	i40e_status status;
2110	u16 flags = 0;
2111
2112	i40e_fill_default_direct_cmd_desc(&desc,
2113					i40e_aqc_opc_set_vsi_promiscuous_modes);
2114
2115	if (set)
2116		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2117
2118	cmd->promiscuous_flags = cpu_to_le16(flags);
2119
2120	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2121
2122	cmd->seid = cpu_to_le16(seid);
2123	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2124
2125	return status;
2126}
2127
2128/**
2129 * i40e_aq_set_vsi_mc_promisc_on_vlan
2130 * @hw: pointer to the hw struct
2131 * @seid: vsi number
2132 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2133 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2134 * @cmd_details: pointer to command details structure or NULL
2135 **/
2136enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2137							 u16 seid, bool enable,
2138							 u16 vid,
2139				struct i40e_asq_cmd_details *cmd_details)
2140{
2141	struct i40e_aq_desc desc;
2142	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2143		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2144	enum i40e_status_code status;
2145	u16 flags = 0;
2146
2147	i40e_fill_default_direct_cmd_desc(&desc,
2148					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2149
2150	if (enable)
2151		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2152
2153	cmd->promiscuous_flags = cpu_to_le16(flags);
2154	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2155	cmd->seid = cpu_to_le16(seid);
2156	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2157
2158	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
2159
2160	return status;
2161}
2162
2163/**
2164 * i40e_aq_set_vsi_uc_promisc_on_vlan
2165 * @hw: pointer to the hw struct
2166 * @seid: vsi number
2167 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2168 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2169 * @cmd_details: pointer to command details structure or NULL
2170 **/
2171enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2172							 u16 seid, bool enable,
2173							 u16 vid,
2174				struct i40e_asq_cmd_details *cmd_details)
2175{
2176	struct i40e_aq_desc desc;
2177	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2178		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2179	enum i40e_status_code status;
2180	u16 flags = 0;
2181
2182	i40e_fill_default_direct_cmd_desc(&desc,
2183					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2184
2185	if (enable)
2186		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
 
 
 
2187
2188	cmd->promiscuous_flags = cpu_to_le16(flags);
2189	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
 
 
 
2190	cmd->seid = cpu_to_le16(seid);
2191	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2192
2193	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
2194
2195	return status;
2196}
2197
2198/**
2199 * i40e_aq_set_vsi_bc_promisc_on_vlan
2200 * @hw: pointer to the hw struct
2201 * @seid: vsi number
2202 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2203 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2204 * @cmd_details: pointer to command details structure or NULL
2205 **/
2206i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2207				u16 seid, bool enable, u16 vid,
2208				struct i40e_asq_cmd_details *cmd_details)
2209{
2210	struct i40e_aq_desc desc;
2211	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2212		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2213	i40e_status status;
2214	u16 flags = 0;
2215
2216	i40e_fill_default_direct_cmd_desc(&desc,
2217					i40e_aqc_opc_set_vsi_promiscuous_modes);
2218
2219	if (enable)
2220		flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2221
2222	cmd->promiscuous_flags = cpu_to_le16(flags);
2223	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2224	cmd->seid = cpu_to_le16(seid);
2225	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2226
2227	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2228
2229	return status;
2230}
2231
2232/**
2233 * i40e_aq_set_vsi_broadcast
2234 * @hw: pointer to the hw struct
2235 * @seid: vsi number
2236 * @set_filter: true to set filter, false to clear filter
2237 * @cmd_details: pointer to command details structure or NULL
2238 *
2239 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2240 **/
2241i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2242				u16 seid, bool set_filter,
2243				struct i40e_asq_cmd_details *cmd_details)
2244{
2245	struct i40e_aq_desc desc;
2246	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2247		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2248	i40e_status status;
2249
2250	i40e_fill_default_direct_cmd_desc(&desc,
2251					i40e_aqc_opc_set_vsi_promiscuous_modes);
2252
2253	if (set_filter)
2254		cmd->promiscuous_flags
2255			    |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2256	else
2257		cmd->promiscuous_flags
2258			    &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2259
2260	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2261	cmd->seid = cpu_to_le16(seid);
2262	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2263
2264	return status;
2265}
2266
2267/**
2268 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2269 * @hw: pointer to the hw struct
2270 * @seid: vsi number
2271 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2272 * @cmd_details: pointer to command details structure or NULL
2273 **/
2274i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2275				       u16 seid, bool enable,
2276				       struct i40e_asq_cmd_details *cmd_details)
2277{
2278	struct i40e_aq_desc desc;
2279	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2280		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2281	i40e_status status;
2282	u16 flags = 0;
2283
2284	i40e_fill_default_direct_cmd_desc(&desc,
2285					i40e_aqc_opc_set_vsi_promiscuous_modes);
2286	if (enable)
2287		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2288
2289	cmd->promiscuous_flags = cpu_to_le16(flags);
2290	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2291	cmd->seid = cpu_to_le16(seid);
2292
2293	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2294
2295	return status;
2296}
2297
2298/**
2299 * i40e_get_vsi_params - get VSI configuration info
2300 * @hw: pointer to the hw struct
2301 * @vsi_ctx: pointer to a vsi context struct
2302 * @cmd_details: pointer to command details structure or NULL
2303 **/
2304i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2305				struct i40e_vsi_context *vsi_ctx,
2306				struct i40e_asq_cmd_details *cmd_details)
2307{
2308	struct i40e_aq_desc desc;
2309	struct i40e_aqc_add_get_update_vsi *cmd =
2310		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2311	struct i40e_aqc_add_get_update_vsi_completion *resp =
2312		(struct i40e_aqc_add_get_update_vsi_completion *)
2313		&desc.params.raw;
2314	i40e_status status;
2315
2316	i40e_fill_default_direct_cmd_desc(&desc,
2317					  i40e_aqc_opc_get_vsi_parameters);
2318
2319	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2320
2321	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2322
2323	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2324				    sizeof(vsi_ctx->info), NULL);
2325
2326	if (status)
2327		goto aq_get_vsi_params_exit;
2328
2329	vsi_ctx->seid = le16_to_cpu(resp->seid);
2330	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2331	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2332	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2333
2334aq_get_vsi_params_exit:
2335	return status;
2336}
2337
2338/**
2339 * i40e_aq_update_vsi_params
2340 * @hw: pointer to the hw struct
2341 * @vsi_ctx: pointer to a vsi context struct
2342 * @cmd_details: pointer to command details structure or NULL
2343 *
2344 * Update a VSI context.
2345 **/
2346i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2347				struct i40e_vsi_context *vsi_ctx,
2348				struct i40e_asq_cmd_details *cmd_details)
2349{
2350	struct i40e_aq_desc desc;
2351	struct i40e_aqc_add_get_update_vsi *cmd =
2352		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2353	struct i40e_aqc_add_get_update_vsi_completion *resp =
2354		(struct i40e_aqc_add_get_update_vsi_completion *)
2355		&desc.params.raw;
2356	i40e_status status;
2357
2358	i40e_fill_default_direct_cmd_desc(&desc,
2359					  i40e_aqc_opc_update_vsi_parameters);
2360	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2361
2362	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2363
2364	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2365				    sizeof(vsi_ctx->info), cmd_details);
 
2366
2367	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2368	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2369
2370	return status;
2371}
2372
2373/**
2374 * i40e_aq_get_switch_config
2375 * @hw: pointer to the hardware structure
2376 * @buf: pointer to the result buffer
2377 * @buf_size: length of input buffer
2378 * @start_seid: seid to start for the report, 0 == beginning
2379 * @cmd_details: pointer to command details structure or NULL
2380 *
2381 * Fill the buf with switch configuration returned from AdminQ command
2382 **/
2383i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2384				struct i40e_aqc_get_switch_config_resp *buf,
2385				u16 buf_size, u16 *start_seid,
2386				struct i40e_asq_cmd_details *cmd_details)
2387{
2388	struct i40e_aq_desc desc;
2389	struct i40e_aqc_switch_seid *scfg =
2390		(struct i40e_aqc_switch_seid *)&desc.params.raw;
2391	i40e_status status;
2392
2393	i40e_fill_default_direct_cmd_desc(&desc,
2394					  i40e_aqc_opc_get_switch_config);
2395	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2396	if (buf_size > I40E_AQ_LARGE_BUF)
2397		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2398	scfg->seid = cpu_to_le16(*start_seid);
2399
2400	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2401	*start_seid = le16_to_cpu(scfg->seid);
2402
2403	return status;
2404}
2405
2406/**
2407 * i40e_aq_set_switch_config
2408 * @hw: pointer to the hardware structure
2409 * @flags: bit flag values to set
2410 * @mode: cloud filter mode
2411 * @valid_flags: which bit flags to set
2412 * @mode: cloud filter mode
2413 * @cmd_details: pointer to command details structure or NULL
2414 *
2415 * Set switch configuration bits
2416 **/
2417enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2418						u16 flags,
2419						u16 valid_flags, u8 mode,
2420				struct i40e_asq_cmd_details *cmd_details)
2421{
2422	struct i40e_aq_desc desc;
2423	struct i40e_aqc_set_switch_config *scfg =
2424		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
2425	enum i40e_status_code status;
2426
2427	i40e_fill_default_direct_cmd_desc(&desc,
2428					  i40e_aqc_opc_set_switch_config);
2429	scfg->flags = cpu_to_le16(flags);
2430	scfg->valid_flags = cpu_to_le16(valid_flags);
2431	scfg->mode = mode;
2432	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2433		scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2434		scfg->first_tag = cpu_to_le16(hw->first_tag);
2435		scfg->second_tag = cpu_to_le16(hw->second_tag);
2436	}
2437	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2438
2439	return status;
2440}
2441
2442/**
2443 * i40e_aq_get_firmware_version
2444 * @hw: pointer to the hw struct
2445 * @fw_major_version: firmware major version
2446 * @fw_minor_version: firmware minor version
2447 * @fw_build: firmware build number
2448 * @api_major_version: major queue version
2449 * @api_minor_version: minor queue version
2450 * @cmd_details: pointer to command details structure or NULL
2451 *
2452 * Get the firmware version from the admin queue commands
2453 **/
2454i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2455				u16 *fw_major_version, u16 *fw_minor_version,
2456				u32 *fw_build,
2457				u16 *api_major_version, u16 *api_minor_version,
2458				struct i40e_asq_cmd_details *cmd_details)
2459{
2460	struct i40e_aq_desc desc;
2461	struct i40e_aqc_get_version *resp =
2462		(struct i40e_aqc_get_version *)&desc.params.raw;
2463	i40e_status status;
2464
2465	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2466
2467	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2468
2469	if (!status) {
2470		if (fw_major_version)
2471			*fw_major_version = le16_to_cpu(resp->fw_major);
2472		if (fw_minor_version)
2473			*fw_minor_version = le16_to_cpu(resp->fw_minor);
2474		if (fw_build)
2475			*fw_build = le32_to_cpu(resp->fw_build);
2476		if (api_major_version)
2477			*api_major_version = le16_to_cpu(resp->api_major);
2478		if (api_minor_version)
2479			*api_minor_version = le16_to_cpu(resp->api_minor);
2480	}
2481
2482	return status;
2483}
2484
2485/**
2486 * i40e_aq_send_driver_version
2487 * @hw: pointer to the hw struct
2488 * @dv: driver's major, minor version
2489 * @cmd_details: pointer to command details structure or NULL
2490 *
2491 * Send the driver version to the firmware
2492 **/
2493i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2494				struct i40e_driver_version *dv,
2495				struct i40e_asq_cmd_details *cmd_details)
2496{
2497	struct i40e_aq_desc desc;
2498	struct i40e_aqc_driver_version *cmd =
2499		(struct i40e_aqc_driver_version *)&desc.params.raw;
2500	i40e_status status;
2501	u16 len;
2502
2503	if (dv == NULL)
2504		return I40E_ERR_PARAM;
2505
2506	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2507
2508	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2509	cmd->driver_major_ver = dv->major_version;
2510	cmd->driver_minor_ver = dv->minor_version;
2511	cmd->driver_build_ver = dv->build_version;
2512	cmd->driver_subbuild_ver = dv->subbuild_version;
2513
2514	len = 0;
2515	while (len < sizeof(dv->driver_string) &&
2516	       (dv->driver_string[len] < 0x80) &&
2517	       dv->driver_string[len])
2518		len++;
2519	status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2520				       len, cmd_details);
2521
2522	return status;
2523}
2524
2525/**
2526 * i40e_get_link_status - get status of the HW network link
2527 * @hw: pointer to the hw struct
2528 * @link_up: pointer to bool (true/false = linkup/linkdown)
2529 *
2530 * Variable link_up true if link is up, false if link is down.
2531 * The variable link_up is invalid if returned value of status != 0
2532 *
2533 * Side effect: LinkStatusEvent reporting becomes enabled
2534 **/
2535i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2536{
2537	i40e_status status = 0;
2538
2539	if (hw->phy.get_link_info) {
2540		status = i40e_update_link_info(hw);
2541
2542		if (status)
2543			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2544				   status);
2545	}
2546
2547	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2548
2549	return status;
2550}
2551
2552/**
2553 * i40e_updatelink_status - update status of the HW network link
2554 * @hw: pointer to the hw struct
2555 **/
2556noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2557{
2558	struct i40e_aq_get_phy_abilities_resp abilities;
2559	i40e_status status = 0;
2560
2561	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2562	if (status)
2563		return status;
2564
2565	/* extra checking needed to ensure link info to user is timely */
2566	if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2567	    ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2568	     !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2569		status = i40e_aq_get_phy_capabilities(hw, false, false,
2570						      &abilities, NULL);
2571		if (status)
2572			return status;
2573
2574		hw->phy.link_info.req_fec_info =
2575			abilities.fec_cfg_curr_mod_ext_info &
2576			(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
 
 
 
 
 
 
 
2577
2578		memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2579		       sizeof(hw->phy.link_info.module_type));
2580	}
2581
2582	return status;
2583}
2584
2585/**
2586 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2587 * @hw: pointer to the hw struct
2588 * @uplink_seid: the MAC or other gizmo SEID
2589 * @downlink_seid: the VSI SEID
2590 * @enabled_tc: bitmap of TCs to be enabled
2591 * @default_port: true for default port VSI, false for control port
2592 * @veb_seid: pointer to where to put the resulting VEB SEID
2593 * @enable_stats: true to turn on VEB stats
2594 * @cmd_details: pointer to command details structure or NULL
2595 *
2596 * This asks the FW to add a VEB between the uplink and downlink
2597 * elements.  If the uplink SEID is 0, this will be a floating VEB.
2598 **/
2599i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2600				u16 downlink_seid, u8 enabled_tc,
2601				bool default_port, u16 *veb_seid,
2602				bool enable_stats,
2603				struct i40e_asq_cmd_details *cmd_details)
2604{
2605	struct i40e_aq_desc desc;
2606	struct i40e_aqc_add_veb *cmd =
2607		(struct i40e_aqc_add_veb *)&desc.params.raw;
2608	struct i40e_aqc_add_veb_completion *resp =
2609		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2610	i40e_status status;
2611	u16 veb_flags = 0;
2612
2613	/* SEIDs need to either both be set or both be 0 for floating VEB */
2614	if (!!uplink_seid != !!downlink_seid)
2615		return I40E_ERR_PARAM;
2616
2617	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2618
2619	cmd->uplink_seid = cpu_to_le16(uplink_seid);
2620	cmd->downlink_seid = cpu_to_le16(downlink_seid);
2621	cmd->enable_tcs = enabled_tc;
2622	if (!uplink_seid)
2623		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2624	if (default_port)
2625		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2626	else
2627		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2628
2629	/* reverse logic here: set the bitflag to disable the stats */
2630	if (!enable_stats)
2631		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2632
2633	cmd->veb_flags = cpu_to_le16(veb_flags);
2634
2635	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2636
2637	if (!status && veb_seid)
2638		*veb_seid = le16_to_cpu(resp->veb_seid);
2639
2640	return status;
2641}
2642
2643/**
2644 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2645 * @hw: pointer to the hw struct
2646 * @veb_seid: the SEID of the VEB to query
2647 * @switch_id: the uplink switch id
2648 * @floating: set to true if the VEB is floating
2649 * @statistic_index: index of the stats counter block for this VEB
2650 * @vebs_used: number of VEB's used by function
2651 * @vebs_free: total VEB's not reserved by any function
2652 * @cmd_details: pointer to command details structure or NULL
2653 *
2654 * This retrieves the parameters for a particular VEB, specified by
2655 * uplink_seid, and returns them to the caller.
2656 **/
2657i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2658				u16 veb_seid, u16 *switch_id,
2659				bool *floating, u16 *statistic_index,
2660				u16 *vebs_used, u16 *vebs_free,
2661				struct i40e_asq_cmd_details *cmd_details)
2662{
2663	struct i40e_aq_desc desc;
2664	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2665		(struct i40e_aqc_get_veb_parameters_completion *)
2666		&desc.params.raw;
2667	i40e_status status;
2668
2669	if (veb_seid == 0)
2670		return I40E_ERR_PARAM;
2671
2672	i40e_fill_default_direct_cmd_desc(&desc,
2673					  i40e_aqc_opc_get_veb_parameters);
2674	cmd_resp->seid = cpu_to_le16(veb_seid);
2675
2676	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2677	if (status)
2678		goto get_veb_exit;
2679
2680	if (switch_id)
2681		*switch_id = le16_to_cpu(cmd_resp->switch_id);
2682	if (statistic_index)
2683		*statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2684	if (vebs_used)
2685		*vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2686	if (vebs_free)
2687		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2688	if (floating) {
2689		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2690
2691		if (flags & I40E_AQC_ADD_VEB_FLOATING)
2692			*floating = true;
2693		else
2694			*floating = false;
2695	}
2696
2697get_veb_exit:
2698	return status;
2699}
2700
2701/**
2702 * i40e_aq_add_macvlan
2703 * @hw: pointer to the hw struct
2704 * @seid: VSI for the mac address
2705 * @mv_list: list of macvlans to be added
 
2706 * @count: length of the list
2707 * @cmd_details: pointer to command details structure or NULL
2708 *
2709 * Add MAC/VLAN addresses to the HW filtering
 
2710 **/
2711i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2712			struct i40e_aqc_add_macvlan_element_data *mv_list,
2713			u16 count, struct i40e_asq_cmd_details *cmd_details)
2714{
2715	struct i40e_aq_desc desc;
2716	struct i40e_aqc_macvlan *cmd =
2717		(struct i40e_aqc_macvlan *)&desc.params.raw;
2718	i40e_status status;
2719	u16 buf_size;
2720	int i;
2721
2722	if (count == 0 || !mv_list || !hw)
2723		return I40E_ERR_PARAM;
2724
2725	buf_size = count * sizeof(*mv_list);
2726
2727	/* prep the rest of the request */
2728	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
2729	cmd->num_addresses = cpu_to_le16(count);
2730	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2731	cmd->seid[1] = 0;
2732	cmd->seid[2] = 0;
2733
2734	for (i = 0; i < count; i++)
2735		if (is_multicast_ether_addr(mv_list[i].mac_addr))
2736			mv_list[i].flags |=
2737			       cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2738
2739	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2740	if (buf_size > I40E_AQ_LARGE_BUF)
2741		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2742
2743	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2744				       cmd_details);
2745
2746	return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2747}
2748
2749/**
2750 * i40e_aq_remove_macvlan
2751 * @hw: pointer to the hw struct
2752 * @seid: VSI for the mac address
2753 * @mv_list: list of macvlans to be removed
2754 * @count: length of the list
2755 * @cmd_details: pointer to command details structure or NULL
2756 *
2757 * Remove MAC/VLAN addresses from the HW filtering
2758 **/
2759i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2760			struct i40e_aqc_remove_macvlan_element_data *mv_list,
2761			u16 count, struct i40e_asq_cmd_details *cmd_details)
2762{
2763	struct i40e_aq_desc desc;
2764	struct i40e_aqc_macvlan *cmd =
2765		(struct i40e_aqc_macvlan *)&desc.params.raw;
2766	i40e_status status;
2767	u16 buf_size;
2768
2769	if (count == 0 || !mv_list || !hw)
2770		return I40E_ERR_PARAM;
2771
2772	buf_size = count * sizeof(*mv_list);
2773
2774	/* prep the rest of the request */
2775	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2776	cmd->num_addresses = cpu_to_le16(count);
2777	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2778	cmd->seid[1] = 0;
2779	cmd->seid[2] = 0;
2780
2781	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2782	if (buf_size > I40E_AQ_LARGE_BUF)
2783		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2784
2785	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2786				       cmd_details);
2787
2788	return status;
2789}
2790
2791/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2792 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2793 * @hw: pointer to the hw struct
2794 * @opcode: AQ opcode for add or delete mirror rule
2795 * @sw_seid: Switch SEID (to which rule refers)
2796 * @rule_type: Rule Type (ingress/egress/VLAN)
2797 * @id: Destination VSI SEID or Rule ID
2798 * @count: length of the list
2799 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2800 * @cmd_details: pointer to command details structure or NULL
2801 * @rule_id: Rule ID returned from FW
2802 * @rules_used: Number of rules used in internal switch
2803 * @rules_free: Number of rules free in internal switch
2804 *
2805 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2806 * VEBs/VEPA elements only
2807 **/
2808static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2809				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2810				u16 count, __le16 *mr_list,
2811				struct i40e_asq_cmd_details *cmd_details,
2812				u16 *rule_id, u16 *rules_used, u16 *rules_free)
2813{
2814	struct i40e_aq_desc desc;
2815	struct i40e_aqc_add_delete_mirror_rule *cmd =
2816		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2817	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2818	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2819	i40e_status status;
2820	u16 buf_size;
2821
2822	buf_size = count * sizeof(*mr_list);
2823
2824	/* prep the rest of the request */
2825	i40e_fill_default_direct_cmd_desc(&desc, opcode);
2826	cmd->seid = cpu_to_le16(sw_seid);
2827	cmd->rule_type = cpu_to_le16(rule_type &
2828				     I40E_AQC_MIRROR_RULE_TYPE_MASK);
2829	cmd->num_entries = cpu_to_le16(count);
2830	/* Dest VSI for add, rule_id for delete */
2831	cmd->destination = cpu_to_le16(id);
2832	if (mr_list) {
2833		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2834						I40E_AQ_FLAG_RD));
2835		if (buf_size > I40E_AQ_LARGE_BUF)
2836			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2837	}
2838
2839	status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2840				       cmd_details);
2841	if (!status ||
2842	    hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2843		if (rule_id)
2844			*rule_id = le16_to_cpu(resp->rule_id);
2845		if (rules_used)
2846			*rules_used = le16_to_cpu(resp->mirror_rules_used);
2847		if (rules_free)
2848			*rules_free = le16_to_cpu(resp->mirror_rules_free);
2849	}
2850	return status;
2851}
2852
2853/**
2854 * i40e_aq_add_mirrorrule - add a mirror rule
2855 * @hw: pointer to the hw struct
2856 * @sw_seid: Switch SEID (to which rule refers)
2857 * @rule_type: Rule Type (ingress/egress/VLAN)
2858 * @dest_vsi: SEID of VSI to which packets will be mirrored
2859 * @count: length of the list
2860 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2861 * @cmd_details: pointer to command details structure or NULL
2862 * @rule_id: Rule ID returned from FW
2863 * @rules_used: Number of rules used in internal switch
2864 * @rules_free: Number of rules free in internal switch
2865 *
2866 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2867 **/
2868i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2869			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2870			struct i40e_asq_cmd_details *cmd_details,
2871			u16 *rule_id, u16 *rules_used, u16 *rules_free)
2872{
2873	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2874	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2875		if (count == 0 || !mr_list)
2876			return I40E_ERR_PARAM;
2877	}
2878
2879	return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2880				  rule_type, dest_vsi, count, mr_list,
2881				  cmd_details, rule_id, rules_used, rules_free);
2882}
2883
2884/**
2885 * i40e_aq_delete_mirrorrule - delete a mirror rule
2886 * @hw: pointer to the hw struct
2887 * @sw_seid: Switch SEID (to which rule refers)
2888 * @rule_type: Rule Type (ingress/egress/VLAN)
2889 * @count: length of the list
2890 * @rule_id: Rule ID that is returned in the receive desc as part of
2891 *		add_mirrorrule.
2892 * @mr_list: list of mirrored VLAN IDs to be removed
2893 * @cmd_details: pointer to command details structure or NULL
2894 * @rules_used: Number of rules used in internal switch
2895 * @rules_free: Number of rules free in internal switch
2896 *
2897 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2898 **/
2899i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2900			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2901			struct i40e_asq_cmd_details *cmd_details,
2902			u16 *rules_used, u16 *rules_free)
2903{
2904	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2905	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2906		/* count and mr_list shall be valid for rule_type INGRESS VLAN
2907		 * mirroring. For other rule_type, count and rule_type should
2908		 * not matter.
2909		 */
2910		if (count == 0 || !mr_list)
2911			return I40E_ERR_PARAM;
2912	}
2913
2914	return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2915				  rule_type, rule_id, count, mr_list,
2916				  cmd_details, NULL, rules_used, rules_free);
2917}
2918
2919/**
2920 * i40e_aq_send_msg_to_vf
2921 * @hw: pointer to the hardware structure
2922 * @vfid: VF id to send msg
2923 * @v_opcode: opcodes for VF-PF communication
2924 * @v_retval: return error code
2925 * @msg: pointer to the msg buffer
2926 * @msglen: msg length
2927 * @cmd_details: pointer to command details
2928 *
2929 * send msg to vf
2930 **/
2931i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2932				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2933				struct i40e_asq_cmd_details *cmd_details)
2934{
2935	struct i40e_aq_desc desc;
2936	struct i40e_aqc_pf_vf_message *cmd =
2937		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2938	i40e_status status;
2939
2940	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2941	cmd->id = cpu_to_le32(vfid);
2942	desc.cookie_high = cpu_to_le32(v_opcode);
2943	desc.cookie_low = cpu_to_le32(v_retval);
2944	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2945	if (msglen) {
2946		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2947						I40E_AQ_FLAG_RD));
2948		if (msglen > I40E_AQ_LARGE_BUF)
2949			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2950		desc.datalen = cpu_to_le16(msglen);
2951	}
2952	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2953
2954	return status;
2955}
2956
2957/**
2958 * i40e_aq_debug_read_register
2959 * @hw: pointer to the hw struct
2960 * @reg_addr: register address
2961 * @reg_val: register value
2962 * @cmd_details: pointer to command details structure or NULL
2963 *
2964 * Read the register using the admin queue commands
2965 **/
2966i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
2967				u32 reg_addr, u64 *reg_val,
2968				struct i40e_asq_cmd_details *cmd_details)
2969{
2970	struct i40e_aq_desc desc;
2971	struct i40e_aqc_debug_reg_read_write *cmd_resp =
2972		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2973	i40e_status status;
2974
2975	if (reg_val == NULL)
2976		return I40E_ERR_PARAM;
2977
2978	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2979
2980	cmd_resp->address = cpu_to_le32(reg_addr);
2981
2982	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2983
2984	if (!status) {
2985		*reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2986			   (u64)le32_to_cpu(cmd_resp->value_low);
2987	}
2988
2989	return status;
2990}
2991
2992/**
2993 * i40e_aq_debug_write_register
2994 * @hw: pointer to the hw struct
2995 * @reg_addr: register address
2996 * @reg_val: register value
2997 * @cmd_details: pointer to command details structure or NULL
2998 *
2999 * Write to a register using the admin queue commands
3000 **/
3001i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
3002					u32 reg_addr, u64 reg_val,
3003					struct i40e_asq_cmd_details *cmd_details)
3004{
3005	struct i40e_aq_desc desc;
3006	struct i40e_aqc_debug_reg_read_write *cmd =
3007		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3008	i40e_status status;
3009
3010	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
3011
3012	cmd->address = cpu_to_le32(reg_addr);
3013	cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
3014	cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
3015
3016	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3017
3018	return status;
3019}
3020
3021/**
3022 * i40e_aq_request_resource
3023 * @hw: pointer to the hw struct
3024 * @resource: resource id
3025 * @access: access type
3026 * @sdp_number: resource number
3027 * @timeout: the maximum time in ms that the driver may hold the resource
3028 * @cmd_details: pointer to command details structure or NULL
3029 *
3030 * requests common resource using the admin queue commands
3031 **/
3032i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3033				enum i40e_aq_resources_ids resource,
3034				enum i40e_aq_resource_access_type access,
3035				u8 sdp_number, u64 *timeout,
3036				struct i40e_asq_cmd_details *cmd_details)
3037{
3038	struct i40e_aq_desc desc;
3039	struct i40e_aqc_request_resource *cmd_resp =
3040		(struct i40e_aqc_request_resource *)&desc.params.raw;
3041	i40e_status status;
3042
3043	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3044
3045	cmd_resp->resource_id = cpu_to_le16(resource);
3046	cmd_resp->access_type = cpu_to_le16(access);
3047	cmd_resp->resource_number = cpu_to_le32(sdp_number);
3048
3049	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3050	/* The completion specifies the maximum time in ms that the driver
3051	 * may hold the resource in the Timeout field.
3052	 * If the resource is held by someone else, the command completes with
3053	 * busy return value and the timeout field indicates the maximum time
3054	 * the current owner of the resource has to free it.
3055	 */
3056	if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3057		*timeout = le32_to_cpu(cmd_resp->timeout);
3058
3059	return status;
3060}
3061
3062/**
3063 * i40e_aq_release_resource
3064 * @hw: pointer to the hw struct
3065 * @resource: resource id
3066 * @sdp_number: resource number
3067 * @cmd_details: pointer to command details structure or NULL
3068 *
3069 * release common resource using the admin queue commands
3070 **/
3071i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3072				enum i40e_aq_resources_ids resource,
3073				u8 sdp_number,
3074				struct i40e_asq_cmd_details *cmd_details)
3075{
3076	struct i40e_aq_desc desc;
3077	struct i40e_aqc_request_resource *cmd =
3078		(struct i40e_aqc_request_resource *)&desc.params.raw;
3079	i40e_status status;
3080
3081	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3082
3083	cmd->resource_id = cpu_to_le16(resource);
3084	cmd->resource_number = cpu_to_le32(sdp_number);
3085
3086	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3087
3088	return status;
3089}
3090
3091/**
3092 * i40e_aq_read_nvm
3093 * @hw: pointer to the hw struct
3094 * @module_pointer: module pointer location in words from the NVM beginning
3095 * @offset: byte offset from the module beginning
3096 * @length: length of the section to be read (in bytes from the offset)
3097 * @data: command buffer (size [bytes] = length)
3098 * @last_command: tells if this is the last command in a series
3099 * @cmd_details: pointer to command details structure or NULL
3100 *
3101 * Read the NVM using the admin queue commands
3102 **/
3103i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3104				u32 offset, u16 length, void *data,
3105				bool last_command,
3106				struct i40e_asq_cmd_details *cmd_details)
3107{
3108	struct i40e_aq_desc desc;
3109	struct i40e_aqc_nvm_update *cmd =
3110		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3111	i40e_status status;
3112
3113	/* In offset the highest byte must be zeroed. */
3114	if (offset & 0xFF000000) {
3115		status = I40E_ERR_PARAM;
3116		goto i40e_aq_read_nvm_exit;
3117	}
3118
3119	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3120
3121	/* If this is the last command in a series, set the proper flag. */
3122	if (last_command)
3123		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3124	cmd->module_pointer = module_pointer;
3125	cmd->offset = cpu_to_le32(offset);
3126	cmd->length = cpu_to_le16(length);
3127
3128	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3129	if (length > I40E_AQ_LARGE_BUF)
3130		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3131
3132	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3133
3134i40e_aq_read_nvm_exit:
3135	return status;
3136}
3137
3138/**
3139 * i40e_aq_erase_nvm
3140 * @hw: pointer to the hw struct
3141 * @module_pointer: module pointer location in words from the NVM beginning
3142 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3143 * @length: length of the section to be erased (expressed in 4 KB)
3144 * @last_command: tells if this is the last command in a series
3145 * @cmd_details: pointer to command details structure or NULL
3146 *
3147 * Erase the NVM sector using the admin queue commands
3148 **/
3149i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3150			      u32 offset, u16 length, bool last_command,
3151			      struct i40e_asq_cmd_details *cmd_details)
3152{
3153	struct i40e_aq_desc desc;
3154	struct i40e_aqc_nvm_update *cmd =
3155		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3156	i40e_status status;
3157
3158	/* In offset the highest byte must be zeroed. */
3159	if (offset & 0xFF000000) {
3160		status = I40E_ERR_PARAM;
3161		goto i40e_aq_erase_nvm_exit;
3162	}
3163
3164	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3165
3166	/* If this is the last command in a series, set the proper flag. */
3167	if (last_command)
3168		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3169	cmd->module_pointer = module_pointer;
3170	cmd->offset = cpu_to_le32(offset);
3171	cmd->length = cpu_to_le16(length);
3172
3173	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3174
3175i40e_aq_erase_nvm_exit:
3176	return status;
3177}
3178
3179/**
3180 * i40e_parse_discover_capabilities
3181 * @hw: pointer to the hw struct
3182 * @buff: pointer to a buffer containing device/function capability records
3183 * @cap_count: number of capability records in the list
3184 * @list_type_opc: type of capabilities list to parse
3185 *
3186 * Parse the device/function capabilities list.
3187 **/
3188static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3189				     u32 cap_count,
3190				     enum i40e_admin_queue_opc list_type_opc)
3191{
3192	struct i40e_aqc_list_capabilities_element_resp *cap;
3193	u32 valid_functions, num_functions;
3194	u32 number, logical_id, phys_id;
3195	struct i40e_hw_capabilities *p;
3196	u16 id, ocp_cfg_word0;
3197	i40e_status status;
3198	u8 major_rev;
3199	u32 i = 0;
3200
3201	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3202
3203	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3204		p = &hw->dev_caps;
3205	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3206		p = &hw->func_caps;
3207	else
3208		return;
3209
3210	for (i = 0; i < cap_count; i++, cap++) {
3211		id = le16_to_cpu(cap->id);
3212		number = le32_to_cpu(cap->number);
3213		logical_id = le32_to_cpu(cap->logical_id);
3214		phys_id = le32_to_cpu(cap->phys_id);
3215		major_rev = cap->major_rev;
3216
3217		switch (id) {
3218		case I40E_AQ_CAP_ID_SWITCH_MODE:
3219			p->switch_mode = number;
3220			break;
3221		case I40E_AQ_CAP_ID_MNG_MODE:
3222			p->management_mode = number;
3223			if (major_rev > 1) {
3224				p->mng_protocols_over_mctp = logical_id;
3225				i40e_debug(hw, I40E_DEBUG_INIT,
3226					   "HW Capability: Protocols over MCTP = %d\n",
3227					   p->mng_protocols_over_mctp);
3228			} else {
3229				p->mng_protocols_over_mctp = 0;
3230			}
3231			break;
3232		case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3233			p->npar_enable = number;
3234			break;
3235		case I40E_AQ_CAP_ID_OS2BMC_CAP:
3236			p->os2bmc = number;
3237			break;
3238		case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3239			p->valid_functions = number;
3240			break;
3241		case I40E_AQ_CAP_ID_SRIOV:
3242			if (number == 1)
3243				p->sr_iov_1_1 = true;
3244			break;
3245		case I40E_AQ_CAP_ID_VF:
3246			p->num_vfs = number;
3247			p->vf_base_id = logical_id;
3248			break;
3249		case I40E_AQ_CAP_ID_VMDQ:
3250			if (number == 1)
3251				p->vmdq = true;
3252			break;
3253		case I40E_AQ_CAP_ID_8021QBG:
3254			if (number == 1)
3255				p->evb_802_1_qbg = true;
3256			break;
3257		case I40E_AQ_CAP_ID_8021QBR:
3258			if (number == 1)
3259				p->evb_802_1_qbh = true;
3260			break;
3261		case I40E_AQ_CAP_ID_VSI:
3262			p->num_vsis = number;
3263			break;
3264		case I40E_AQ_CAP_ID_DCB:
3265			if (number == 1) {
3266				p->dcb = true;
3267				p->enabled_tcmap = logical_id;
3268				p->maxtc = phys_id;
3269			}
3270			break;
3271		case I40E_AQ_CAP_ID_FCOE:
3272			if (number == 1)
3273				p->fcoe = true;
3274			break;
3275		case I40E_AQ_CAP_ID_ISCSI:
3276			if (number == 1)
3277				p->iscsi = true;
3278			break;
3279		case I40E_AQ_CAP_ID_RSS:
3280			p->rss = true;
3281			p->rss_table_size = number;
3282			p->rss_table_entry_width = logical_id;
3283			break;
3284		case I40E_AQ_CAP_ID_RXQ:
3285			p->num_rx_qp = number;
3286			p->base_queue = phys_id;
3287			break;
3288		case I40E_AQ_CAP_ID_TXQ:
3289			p->num_tx_qp = number;
3290			p->base_queue = phys_id;
3291			break;
3292		case I40E_AQ_CAP_ID_MSIX:
3293			p->num_msix_vectors = number;
3294			i40e_debug(hw, I40E_DEBUG_INIT,
3295				   "HW Capability: MSIX vector count = %d\n",
3296				   p->num_msix_vectors);
3297			break;
3298		case I40E_AQ_CAP_ID_VF_MSIX:
3299			p->num_msix_vectors_vf = number;
3300			break;
3301		case I40E_AQ_CAP_ID_FLEX10:
3302			if (major_rev == 1) {
3303				if (number == 1) {
3304					p->flex10_enable = true;
3305					p->flex10_capable = true;
3306				}
3307			} else {
3308				/* Capability revision >= 2 */
3309				if (number & 1)
3310					p->flex10_enable = true;
3311				if (number & 2)
3312					p->flex10_capable = true;
3313			}
3314			p->flex10_mode = logical_id;
3315			p->flex10_status = phys_id;
3316			break;
3317		case I40E_AQ_CAP_ID_CEM:
3318			if (number == 1)
3319				p->mgmt_cem = true;
3320			break;
3321		case I40E_AQ_CAP_ID_IWARP:
3322			if (number == 1)
3323				p->iwarp = true;
3324			break;
3325		case I40E_AQ_CAP_ID_LED:
3326			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3327				p->led[phys_id] = true;
3328			break;
3329		case I40E_AQ_CAP_ID_SDP:
3330			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3331				p->sdp[phys_id] = true;
3332			break;
3333		case I40E_AQ_CAP_ID_MDIO:
3334			if (number == 1) {
3335				p->mdio_port_num = phys_id;
3336				p->mdio_port_mode = logical_id;
3337			}
3338			break;
3339		case I40E_AQ_CAP_ID_1588:
3340			if (number == 1)
3341				p->ieee_1588 = true;
3342			break;
3343		case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3344			p->fd = true;
3345			p->fd_filters_guaranteed = number;
3346			p->fd_filters_best_effort = logical_id;
3347			break;
3348		case I40E_AQ_CAP_ID_WSR_PROT:
3349			p->wr_csr_prot = (u64)number;
3350			p->wr_csr_prot |= (u64)logical_id << 32;
3351			break;
3352		case I40E_AQ_CAP_ID_NVM_MGMT:
3353			if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3354				p->sec_rev_disabled = true;
3355			if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3356				p->update_disabled = true;
3357			break;
3358		default:
3359			break;
3360		}
3361	}
3362
3363	if (p->fcoe)
3364		i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3365
3366	/* Software override ensuring FCoE is disabled if npar or mfp
3367	 * mode because it is not supported in these modes.
3368	 */
3369	if (p->npar_enable || p->flex10_enable)
3370		p->fcoe = false;
3371
3372	/* count the enabled ports (aka the "not disabled" ports) */
3373	hw->num_ports = 0;
3374	for (i = 0; i < 4; i++) {
3375		u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3376		u64 port_cfg = 0;
3377
3378		/* use AQ read to get the physical register offset instead
3379		 * of the port relative offset
3380		 */
3381		i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3382		if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3383			hw->num_ports++;
3384	}
3385
3386	/* OCP cards case: if a mezz is removed the Ethernet port is at
3387	 * disabled state in PRTGEN_CNF register. Additional NVM read is
3388	 * needed in order to check if we are dealing with OCP card.
3389	 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3390	 * physical ports results in wrong partition id calculation and thus
3391	 * not supporting WoL.
3392	 */
3393	if (hw->mac.type == I40E_MAC_X722) {
3394		if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3395			status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3396						  2 * I40E_SR_OCP_CFG_WORD0,
3397						  sizeof(ocp_cfg_word0),
3398						  &ocp_cfg_word0, true, NULL);
3399			if (!status &&
3400			    (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3401				hw->num_ports = 4;
3402			i40e_release_nvm(hw);
3403		}
3404	}
3405
3406	valid_functions = p->valid_functions;
3407	num_functions = 0;
3408	while (valid_functions) {
3409		if (valid_functions & 1)
3410			num_functions++;
3411		valid_functions >>= 1;
3412	}
3413
3414	/* partition id is 1-based, and functions are evenly spread
3415	 * across the ports as partitions
3416	 */
3417	if (hw->num_ports != 0) {
3418		hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3419		hw->num_partitions = num_functions / hw->num_ports;
3420	}
3421
3422	/* additional HW specific goodies that might
3423	 * someday be HW version specific
3424	 */
3425	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3426}
3427
3428/**
3429 * i40e_aq_discover_capabilities
3430 * @hw: pointer to the hw struct
3431 * @buff: a virtual buffer to hold the capabilities
3432 * @buff_size: Size of the virtual buffer
3433 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3434 * @list_type_opc: capabilities type to discover - pass in the command opcode
3435 * @cmd_details: pointer to command details structure or NULL
3436 *
3437 * Get the device capabilities descriptions from the firmware
3438 **/
3439i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3440				void *buff, u16 buff_size, u16 *data_size,
3441				enum i40e_admin_queue_opc list_type_opc,
3442				struct i40e_asq_cmd_details *cmd_details)
3443{
3444	struct i40e_aqc_list_capabilites *cmd;
3445	struct i40e_aq_desc desc;
3446	i40e_status status = 0;
3447
3448	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3449
3450	if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3451		list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3452		status = I40E_ERR_PARAM;
3453		goto exit;
3454	}
3455
3456	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3457
3458	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3459	if (buff_size > I40E_AQ_LARGE_BUF)
3460		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3461
3462	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3463	*data_size = le16_to_cpu(desc.datalen);
3464
3465	if (status)
3466		goto exit;
3467
3468	i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3469					 list_type_opc);
3470
3471exit:
3472	return status;
3473}
3474
3475/**
3476 * i40e_aq_update_nvm
3477 * @hw: pointer to the hw struct
3478 * @module_pointer: module pointer location in words from the NVM beginning
3479 * @offset: byte offset from the module beginning
3480 * @length: length of the section to be written (in bytes from the offset)
3481 * @data: command buffer (size [bytes] = length)
3482 * @last_command: tells if this is the last command in a series
3483 * @preservation_flags: Preservation mode flags
3484 * @cmd_details: pointer to command details structure or NULL
3485 *
3486 * Update the NVM using the admin queue commands
3487 **/
3488i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3489			       u32 offset, u16 length, void *data,
3490				bool last_command, u8 preservation_flags,
3491			       struct i40e_asq_cmd_details *cmd_details)
3492{
3493	struct i40e_aq_desc desc;
3494	struct i40e_aqc_nvm_update *cmd =
3495		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3496	i40e_status status;
3497
3498	/* In offset the highest byte must be zeroed. */
3499	if (offset & 0xFF000000) {
3500		status = I40E_ERR_PARAM;
3501		goto i40e_aq_update_nvm_exit;
3502	}
3503
3504	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3505
3506	/* If this is the last command in a series, set the proper flag. */
3507	if (last_command)
3508		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3509	if (hw->mac.type == I40E_MAC_X722) {
3510		if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3511			cmd->command_flags |=
3512				(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3513				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3514		else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3515			cmd->command_flags |=
3516				(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3517				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3518	}
3519	cmd->module_pointer = module_pointer;
3520	cmd->offset = cpu_to_le32(offset);
3521	cmd->length = cpu_to_le16(length);
3522
3523	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3524	if (length > I40E_AQ_LARGE_BUF)
3525		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3526
3527	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3528
3529i40e_aq_update_nvm_exit:
3530	return status;
3531}
3532
3533/**
3534 * i40e_aq_rearrange_nvm
3535 * @hw: pointer to the hw struct
3536 * @rearrange_nvm: defines direction of rearrangement
3537 * @cmd_details: pointer to command details structure or NULL
3538 *
3539 * Rearrange NVM structure, available only for transition FW
3540 **/
3541i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3542				  u8 rearrange_nvm,
3543				  struct i40e_asq_cmd_details *cmd_details)
3544{
3545	struct i40e_aqc_nvm_update *cmd;
3546	i40e_status status;
3547	struct i40e_aq_desc desc;
3548
3549	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3550
3551	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3552
3553	rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3554			 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3555
3556	if (!rearrange_nvm) {
3557		status = I40E_ERR_PARAM;
3558		goto i40e_aq_rearrange_nvm_exit;
3559	}
3560
3561	cmd->command_flags |= rearrange_nvm;
3562	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3563
3564i40e_aq_rearrange_nvm_exit:
3565	return status;
3566}
3567
3568/**
3569 * i40e_aq_get_lldp_mib
3570 * @hw: pointer to the hw struct
3571 * @bridge_type: type of bridge requested
3572 * @mib_type: Local, Remote or both Local and Remote MIBs
3573 * @buff: pointer to a user supplied buffer to store the MIB block
3574 * @buff_size: size of the buffer (in bytes)
3575 * @local_len : length of the returned Local LLDP MIB
3576 * @remote_len: length of the returned Remote LLDP MIB
3577 * @cmd_details: pointer to command details structure or NULL
3578 *
3579 * Requests the complete LLDP MIB (entire packet).
3580 **/
3581i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3582				u8 mib_type, void *buff, u16 buff_size,
3583				u16 *local_len, u16 *remote_len,
3584				struct i40e_asq_cmd_details *cmd_details)
3585{
3586	struct i40e_aq_desc desc;
3587	struct i40e_aqc_lldp_get_mib *cmd =
3588		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3589	struct i40e_aqc_lldp_get_mib *resp =
3590		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3591	i40e_status status;
3592
3593	if (buff_size == 0 || !buff)
3594		return I40E_ERR_PARAM;
3595
3596	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3597	/* Indirect Command */
3598	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3599
3600	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3601	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3602		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3603
3604	desc.datalen = cpu_to_le16(buff_size);
3605
3606	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3607	if (buff_size > I40E_AQ_LARGE_BUF)
3608		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3609
3610	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3611	if (!status) {
3612		if (local_len != NULL)
3613			*local_len = le16_to_cpu(resp->local_len);
3614		if (remote_len != NULL)
3615			*remote_len = le16_to_cpu(resp->remote_len);
3616	}
3617
3618	return status;
3619}
3620
3621/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3622 * i40e_aq_cfg_lldp_mib_change_event
3623 * @hw: pointer to the hw struct
3624 * @enable_update: Enable or Disable event posting
3625 * @cmd_details: pointer to command details structure or NULL
3626 *
3627 * Enable or Disable posting of an event on ARQ when LLDP MIB
3628 * associated with the interface changes
3629 **/
3630i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3631				bool enable_update,
3632				struct i40e_asq_cmd_details *cmd_details)
3633{
3634	struct i40e_aq_desc desc;
3635	struct i40e_aqc_lldp_update_mib *cmd =
3636		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3637	i40e_status status;
3638
3639	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3640
3641	if (!enable_update)
3642		cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3643
3644	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3645
3646	return status;
3647}
3648
3649/**
3650 * i40e_aq_restore_lldp
3651 * @hw: pointer to the hw struct
3652 * @setting: pointer to factory setting variable or NULL
3653 * @restore: True if factory settings should be restored
3654 * @cmd_details: pointer to command details structure or NULL
3655 *
3656 * Restore LLDP Agent factory settings if @restore set to True. In other case
3657 * only returns factory setting in AQ response.
3658 **/
3659enum i40e_status_code
3660i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3661		     struct i40e_asq_cmd_details *cmd_details)
3662{
3663	struct i40e_aq_desc desc;
3664	struct i40e_aqc_lldp_restore *cmd =
3665		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
3666	i40e_status status;
3667
3668	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3669		i40e_debug(hw, I40E_DEBUG_ALL,
3670			   "Restore LLDP not supported by current FW version.\n");
3671		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3672	}
3673
3674	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3675
3676	if (restore)
3677		cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3678
3679	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3680
3681	if (setting)
3682		*setting = cmd->command & 1;
3683
3684	return status;
3685}
3686
3687/**
3688 * i40e_aq_stop_lldp
3689 * @hw: pointer to the hw struct
3690 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3691 * @persist: True if stop of LLDP should be persistent across power cycles
3692 * @cmd_details: pointer to command details structure or NULL
3693 *
3694 * Stop or Shutdown the embedded LLDP Agent
3695 **/
3696i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3697				bool persist,
3698				struct i40e_asq_cmd_details *cmd_details)
3699{
3700	struct i40e_aq_desc desc;
3701	struct i40e_aqc_lldp_stop *cmd =
3702		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
3703	i40e_status status;
3704
3705	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3706
3707	if (shutdown_agent)
3708		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3709
3710	if (persist) {
3711		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3712			cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3713		else
3714			i40e_debug(hw, I40E_DEBUG_ALL,
3715				   "Persistent Stop LLDP not supported by current FW version.\n");
3716	}
3717
3718	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3719
3720	return status;
3721}
3722
3723/**
3724 * i40e_aq_start_lldp
3725 * @hw: pointer to the hw struct
3726 * @buff: buffer for result
3727 * @persist: True if start of LLDP should be persistent across power cycles
3728 * @buff_size: buffer size
3729 * @cmd_details: pointer to command details structure or NULL
3730 *
3731 * Start the embedded LLDP Agent on all ports.
3732 **/
3733i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3734			       struct i40e_asq_cmd_details *cmd_details)
3735{
3736	struct i40e_aq_desc desc;
3737	struct i40e_aqc_lldp_start *cmd =
3738		(struct i40e_aqc_lldp_start *)&desc.params.raw;
3739	i40e_status status;
3740
3741	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3742
3743	cmd->command = I40E_AQ_LLDP_AGENT_START;
3744
3745	if (persist) {
3746		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3747			cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3748		else
3749			i40e_debug(hw, I40E_DEBUG_ALL,
3750				   "Persistent Start LLDP not supported by current FW version.\n");
3751	}
3752
3753	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3754
3755	return status;
3756}
3757
3758/**
3759 * i40e_aq_set_dcb_parameters
3760 * @hw: pointer to the hw struct
3761 * @cmd_details: pointer to command details structure or NULL
3762 * @dcb_enable: True if DCB configuration needs to be applied
3763 *
3764 **/
3765enum i40e_status_code
3766i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3767			   struct i40e_asq_cmd_details *cmd_details)
3768{
3769	struct i40e_aq_desc desc;
3770	struct i40e_aqc_set_dcb_parameters *cmd =
3771		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3772	i40e_status status;
3773
3774	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3775		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3776
3777	i40e_fill_default_direct_cmd_desc(&desc,
3778					  i40e_aqc_opc_set_dcb_parameters);
3779
3780	if (dcb_enable) {
3781		cmd->valid_flags = I40E_DCB_VALID;
3782		cmd->command = I40E_AQ_DCB_SET_AGENT;
3783	}
3784	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3785
3786	return status;
3787}
3788
3789/**
3790 * i40e_aq_get_cee_dcb_config
3791 * @hw: pointer to the hw struct
3792 * @buff: response buffer that stores CEE operational configuration
3793 * @buff_size: size of the buffer passed
3794 * @cmd_details: pointer to command details structure or NULL
3795 *
3796 * Get CEE DCBX mode operational configuration from firmware
3797 **/
3798i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3799				       void *buff, u16 buff_size,
3800				       struct i40e_asq_cmd_details *cmd_details)
3801{
3802	struct i40e_aq_desc desc;
3803	i40e_status status;
3804
3805	if (buff_size == 0 || !buff)
3806		return I40E_ERR_PARAM;
3807
3808	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3809
3810	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3811	status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3812				       cmd_details);
3813
3814	return status;
3815}
3816
3817/**
3818 * i40e_aq_add_udp_tunnel
3819 * @hw: pointer to the hw struct
3820 * @udp_port: the UDP port to add in Host byte order
3821 * @protocol_index: protocol index type
3822 * @filter_index: pointer to filter index
3823 * @cmd_details: pointer to command details structure or NULL
3824 *
3825 * Note: Firmware expects the udp_port value to be in Little Endian format,
3826 * and this function will call cpu_to_le16 to convert from Host byte order to
3827 * Little Endian order.
3828 **/
3829i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3830				u16 udp_port, u8 protocol_index,
3831				u8 *filter_index,
3832				struct i40e_asq_cmd_details *cmd_details)
3833{
3834	struct i40e_aq_desc desc;
3835	struct i40e_aqc_add_udp_tunnel *cmd =
3836		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3837	struct i40e_aqc_del_udp_tunnel_completion *resp =
3838		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3839	i40e_status status;
3840
3841	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3842
3843	cmd->udp_port = cpu_to_le16(udp_port);
3844	cmd->protocol_type = protocol_index;
3845
3846	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3847
3848	if (!status && filter_index)
3849		*filter_index = resp->index;
3850
3851	return status;
3852}
3853
3854/**
3855 * i40e_aq_del_udp_tunnel
3856 * @hw: pointer to the hw struct
3857 * @index: filter index
3858 * @cmd_details: pointer to command details structure or NULL
3859 **/
3860i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3861				struct i40e_asq_cmd_details *cmd_details)
3862{
3863	struct i40e_aq_desc desc;
3864	struct i40e_aqc_remove_udp_tunnel *cmd =
3865		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3866	i40e_status status;
3867
3868	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3869
3870	cmd->index = index;
3871
3872	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3873
3874	return status;
3875}
3876
3877/**
3878 * i40e_aq_delete_element - Delete switch element
3879 * @hw: pointer to the hw struct
3880 * @seid: the SEID to delete from the switch
3881 * @cmd_details: pointer to command details structure or NULL
3882 *
3883 * This deletes a switch element from the switch.
3884 **/
3885i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3886				struct i40e_asq_cmd_details *cmd_details)
3887{
3888	struct i40e_aq_desc desc;
3889	struct i40e_aqc_switch_seid *cmd =
3890		(struct i40e_aqc_switch_seid *)&desc.params.raw;
3891	i40e_status status;
3892
3893	if (seid == 0)
3894		return I40E_ERR_PARAM;
3895
3896	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3897
3898	cmd->seid = cpu_to_le16(seid);
3899
3900	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
3901
3902	return status;
3903}
3904
3905/**
3906 * i40e_aq_dcb_updated - DCB Updated Command
3907 * @hw: pointer to the hw struct
3908 * @cmd_details: pointer to command details structure or NULL
3909 *
3910 * EMP will return when the shared RPB settings have been
3911 * recomputed and modified. The retval field in the descriptor
3912 * will be set to 0 when RPB is modified.
3913 **/
3914i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3915				struct i40e_asq_cmd_details *cmd_details)
3916{
3917	struct i40e_aq_desc desc;
3918	i40e_status status;
3919
3920	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3921
3922	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3923
3924	return status;
3925}
3926
3927/**
3928 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3929 * @hw: pointer to the hw struct
3930 * @seid: seid for the physical port/switching component/vsi
3931 * @buff: Indirect buffer to hold data parameters and response
3932 * @buff_size: Indirect buffer size
3933 * @opcode: Tx scheduler AQ command opcode
3934 * @cmd_details: pointer to command details structure or NULL
3935 *
3936 * Generic command handler for Tx scheduler AQ commands
3937 **/
3938static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3939				void *buff, u16 buff_size,
3940				 enum i40e_admin_queue_opc opcode,
3941				struct i40e_asq_cmd_details *cmd_details)
3942{
3943	struct i40e_aq_desc desc;
3944	struct i40e_aqc_tx_sched_ind *cmd =
3945		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3946	i40e_status status;
3947	bool cmd_param_flag = false;
3948
3949	switch (opcode) {
3950	case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3951	case i40e_aqc_opc_configure_vsi_tc_bw:
3952	case i40e_aqc_opc_enable_switching_comp_ets:
3953	case i40e_aqc_opc_modify_switching_comp_ets:
3954	case i40e_aqc_opc_disable_switching_comp_ets:
3955	case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3956	case i40e_aqc_opc_configure_switching_comp_bw_config:
3957		cmd_param_flag = true;
3958		break;
3959	case i40e_aqc_opc_query_vsi_bw_config:
3960	case i40e_aqc_opc_query_vsi_ets_sla_config:
3961	case i40e_aqc_opc_query_switching_comp_ets_config:
3962	case i40e_aqc_opc_query_port_ets_config:
3963	case i40e_aqc_opc_query_switching_comp_bw_config:
3964		cmd_param_flag = false;
3965		break;
3966	default:
3967		return I40E_ERR_PARAM;
3968	}
3969
3970	i40e_fill_default_direct_cmd_desc(&desc, opcode);
3971
3972	/* Indirect command */
3973	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3974	if (cmd_param_flag)
3975		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3976	if (buff_size > I40E_AQ_LARGE_BUF)
3977		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3978
3979	desc.datalen = cpu_to_le16(buff_size);
3980
3981	cmd->vsi_seid = cpu_to_le16(seid);
3982
3983	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3984
3985	return status;
3986}
3987
3988/**
3989 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3990 * @hw: pointer to the hw struct
3991 * @seid: VSI seid
3992 * @credit: BW limit credits (0 = disabled)
3993 * @max_credit: Max BW limit credits
3994 * @cmd_details: pointer to command details structure or NULL
3995 **/
3996i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3997				u16 seid, u16 credit, u8 max_credit,
3998				struct i40e_asq_cmd_details *cmd_details)
3999{
4000	struct i40e_aq_desc desc;
4001	struct i40e_aqc_configure_vsi_bw_limit *cmd =
4002		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
4003	i40e_status status;
4004
4005	i40e_fill_default_direct_cmd_desc(&desc,
4006					  i40e_aqc_opc_configure_vsi_bw_limit);
4007
4008	cmd->vsi_seid = cpu_to_le16(seid);
4009	cmd->credit = cpu_to_le16(credit);
4010	cmd->max_credit = max_credit;
4011
4012	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4013
4014	return status;
4015}
4016
4017/**
4018 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4019 * @hw: pointer to the hw struct
4020 * @seid: VSI seid
4021 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4022 * @cmd_details: pointer to command details structure or NULL
4023 **/
4024i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4025			u16 seid,
4026			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4027			struct i40e_asq_cmd_details *cmd_details)
4028{
4029	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4030				    i40e_aqc_opc_configure_vsi_tc_bw,
4031				    cmd_details);
4032}
4033
4034/**
4035 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4036 * @hw: pointer to the hw struct
4037 * @seid: seid of the switching component connected to Physical Port
4038 * @ets_data: Buffer holding ETS parameters
4039 * @opcode: Tx scheduler AQ command opcode
4040 * @cmd_details: pointer to command details structure or NULL
4041 **/
4042i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4043		u16 seid,
4044		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4045		enum i40e_admin_queue_opc opcode,
4046		struct i40e_asq_cmd_details *cmd_details)
4047{
4048	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4049				    sizeof(*ets_data), opcode, cmd_details);
4050}
4051
4052/**
4053 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4054 * @hw: pointer to the hw struct
4055 * @seid: seid of the switching component
4056 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4057 * @cmd_details: pointer to command details structure or NULL
4058 **/
4059i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4060	u16 seid,
4061	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4062	struct i40e_asq_cmd_details *cmd_details)
4063{
4064	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4065			    i40e_aqc_opc_configure_switching_comp_bw_config,
4066			    cmd_details);
4067}
4068
4069/**
4070 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4071 * @hw: pointer to the hw struct
4072 * @seid: seid of the VSI
4073 * @bw_data: Buffer to hold VSI BW configuration
4074 * @cmd_details: pointer to command details structure or NULL
4075 **/
4076i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4077			u16 seid,
4078			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4079			struct i40e_asq_cmd_details *cmd_details)
4080{
4081	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4082				    i40e_aqc_opc_query_vsi_bw_config,
4083				    cmd_details);
4084}
4085
4086/**
4087 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4088 * @hw: pointer to the hw struct
4089 * @seid: seid of the VSI
4090 * @bw_data: Buffer to hold VSI BW configuration per TC
4091 * @cmd_details: pointer to command details structure or NULL
4092 **/
4093i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4094			u16 seid,
4095			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4096			struct i40e_asq_cmd_details *cmd_details)
4097{
4098	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4099				    i40e_aqc_opc_query_vsi_ets_sla_config,
4100				    cmd_details);
4101}
4102
4103/**
4104 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4105 * @hw: pointer to the hw struct
4106 * @seid: seid of the switching component
4107 * @bw_data: Buffer to hold switching component's per TC BW config
4108 * @cmd_details: pointer to command details structure or NULL
4109 **/
4110i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4111		u16 seid,
4112		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4113		struct i40e_asq_cmd_details *cmd_details)
4114{
4115	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4116				   i40e_aqc_opc_query_switching_comp_ets_config,
4117				   cmd_details);
4118}
4119
4120/**
4121 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4122 * @hw: pointer to the hw struct
4123 * @seid: seid of the VSI or switching component connected to Physical Port
4124 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4125 * @cmd_details: pointer to command details structure or NULL
4126 **/
4127i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4128			u16 seid,
4129			struct i40e_aqc_query_port_ets_config_resp *bw_data,
4130			struct i40e_asq_cmd_details *cmd_details)
4131{
4132	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4133				    i40e_aqc_opc_query_port_ets_config,
4134				    cmd_details);
4135}
4136
4137/**
4138 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4139 * @hw: pointer to the hw struct
4140 * @seid: seid of the switching component
4141 * @bw_data: Buffer to hold switching component's BW configuration
4142 * @cmd_details: pointer to command details structure or NULL
4143 **/
4144i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4145		u16 seid,
4146		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4147		struct i40e_asq_cmd_details *cmd_details)
4148{
4149	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4150				    i40e_aqc_opc_query_switching_comp_bw_config,
4151				    cmd_details);
4152}
4153
4154/**
4155 * i40e_validate_filter_settings
4156 * @hw: pointer to the hardware structure
4157 * @settings: Filter control settings
4158 *
4159 * Check and validate the filter control settings passed.
4160 * The function checks for the valid filter/context sizes being
4161 * passed for FCoE and PE.
4162 *
4163 * Returns 0 if the values passed are valid and within
4164 * range else returns an error.
4165 **/
4166static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4167				struct i40e_filter_control_settings *settings)
4168{
4169	u32 fcoe_cntx_size, fcoe_filt_size;
4170	u32 pe_cntx_size, pe_filt_size;
4171	u32 fcoe_fmax;
4172	u32 val;
4173
4174	/* Validate FCoE settings passed */
4175	switch (settings->fcoe_filt_num) {
4176	case I40E_HASH_FILTER_SIZE_1K:
4177	case I40E_HASH_FILTER_SIZE_2K:
4178	case I40E_HASH_FILTER_SIZE_4K:
4179	case I40E_HASH_FILTER_SIZE_8K:
4180	case I40E_HASH_FILTER_SIZE_16K:
4181	case I40E_HASH_FILTER_SIZE_32K:
4182		fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4183		fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4184		break;
4185	default:
4186		return I40E_ERR_PARAM;
4187	}
4188
4189	switch (settings->fcoe_cntx_num) {
4190	case I40E_DMA_CNTX_SIZE_512:
4191	case I40E_DMA_CNTX_SIZE_1K:
4192	case I40E_DMA_CNTX_SIZE_2K:
4193	case I40E_DMA_CNTX_SIZE_4K:
4194		fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4195		fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4196		break;
4197	default:
4198		return I40E_ERR_PARAM;
4199	}
4200
4201	/* Validate PE settings passed */
4202	switch (settings->pe_filt_num) {
4203	case I40E_HASH_FILTER_SIZE_1K:
4204	case I40E_HASH_FILTER_SIZE_2K:
4205	case I40E_HASH_FILTER_SIZE_4K:
4206	case I40E_HASH_FILTER_SIZE_8K:
4207	case I40E_HASH_FILTER_SIZE_16K:
4208	case I40E_HASH_FILTER_SIZE_32K:
4209	case I40E_HASH_FILTER_SIZE_64K:
4210	case I40E_HASH_FILTER_SIZE_128K:
4211	case I40E_HASH_FILTER_SIZE_256K:
4212	case I40E_HASH_FILTER_SIZE_512K:
4213	case I40E_HASH_FILTER_SIZE_1M:
4214		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4215		pe_filt_size <<= (u32)settings->pe_filt_num;
4216		break;
4217	default:
4218		return I40E_ERR_PARAM;
4219	}
4220
4221	switch (settings->pe_cntx_num) {
4222	case I40E_DMA_CNTX_SIZE_512:
4223	case I40E_DMA_CNTX_SIZE_1K:
4224	case I40E_DMA_CNTX_SIZE_2K:
4225	case I40E_DMA_CNTX_SIZE_4K:
4226	case I40E_DMA_CNTX_SIZE_8K:
4227	case I40E_DMA_CNTX_SIZE_16K:
4228	case I40E_DMA_CNTX_SIZE_32K:
4229	case I40E_DMA_CNTX_SIZE_64K:
4230	case I40E_DMA_CNTX_SIZE_128K:
4231	case I40E_DMA_CNTX_SIZE_256K:
4232		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4233		pe_cntx_size <<= (u32)settings->pe_cntx_num;
4234		break;
4235	default:
4236		return I40E_ERR_PARAM;
4237	}
4238
4239	/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4240	val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4241	fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4242		     >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4243	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
4244		return I40E_ERR_INVALID_SIZE;
4245
4246	return 0;
4247}
4248
4249/**
4250 * i40e_set_filter_control
4251 * @hw: pointer to the hardware structure
4252 * @settings: Filter control settings
4253 *
4254 * Set the Queue Filters for PE/FCoE and enable filters required
4255 * for a single PF. It is expected that these settings are programmed
4256 * at the driver initialization time.
4257 **/
4258i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4259				struct i40e_filter_control_settings *settings)
4260{
4261	i40e_status ret = 0;
4262	u32 hash_lut_size = 0;
4263	u32 val;
4264
4265	if (!settings)
4266		return I40E_ERR_PARAM;
4267
4268	/* Validate the input settings */
4269	ret = i40e_validate_filter_settings(hw, settings);
4270	if (ret)
4271		return ret;
4272
4273	/* Read the PF Queue Filter control register */
4274	val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4275
4276	/* Program required PE hash buckets for the PF */
4277	val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4278	val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4279		I40E_PFQF_CTL_0_PEHSIZE_MASK;
4280	/* Program required PE contexts for the PF */
4281	val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4282	val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4283		I40E_PFQF_CTL_0_PEDSIZE_MASK;
4284
4285	/* Program required FCoE hash buckets for the PF */
4286	val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4287	val |= ((u32)settings->fcoe_filt_num <<
4288			I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4289		I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4290	/* Program required FCoE DDP contexts for the PF */
4291	val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4292	val |= ((u32)settings->fcoe_cntx_num <<
4293			I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4294		I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4295
4296	/* Program Hash LUT size for the PF */
4297	val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4298	if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4299		hash_lut_size = 1;
4300	val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4301		I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4302
4303	/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4304	if (settings->enable_fdir)
4305		val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4306	if (settings->enable_ethtype)
4307		val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4308	if (settings->enable_macvlan)
4309		val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4310
4311	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4312
4313	return 0;
4314}
4315
4316/**
4317 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4318 * @hw: pointer to the hw struct
4319 * @mac_addr: MAC address to use in the filter
4320 * @ethtype: Ethertype to use in the filter
4321 * @flags: Flags that needs to be applied to the filter
4322 * @vsi_seid: seid of the control VSI
4323 * @queue: VSI queue number to send the packet to
4324 * @is_add: Add control packet filter if True else remove
4325 * @stats: Structure to hold information on control filter counts
4326 * @cmd_details: pointer to command details structure or NULL
4327 *
4328 * This command will Add or Remove control packet filter for a control VSI.
4329 * In return it will update the total number of perfect filter count in
4330 * the stats member.
4331 **/
4332i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4333				u8 *mac_addr, u16 ethtype, u16 flags,
4334				u16 vsi_seid, u16 queue, bool is_add,
4335				struct i40e_control_filter_stats *stats,
4336				struct i40e_asq_cmd_details *cmd_details)
4337{
4338	struct i40e_aq_desc desc;
4339	struct i40e_aqc_add_remove_control_packet_filter *cmd =
4340		(struct i40e_aqc_add_remove_control_packet_filter *)
4341		&desc.params.raw;
4342	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4343		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
4344		&desc.params.raw;
4345	i40e_status status;
4346
4347	if (vsi_seid == 0)
4348		return I40E_ERR_PARAM;
4349
4350	if (is_add) {
4351		i40e_fill_default_direct_cmd_desc(&desc,
4352				i40e_aqc_opc_add_control_packet_filter);
4353		cmd->queue = cpu_to_le16(queue);
4354	} else {
4355		i40e_fill_default_direct_cmd_desc(&desc,
4356				i40e_aqc_opc_remove_control_packet_filter);
4357	}
4358
4359	if (mac_addr)
4360		ether_addr_copy(cmd->mac, mac_addr);
4361
4362	cmd->etype = cpu_to_le16(ethtype);
4363	cmd->flags = cpu_to_le16(flags);
4364	cmd->seid = cpu_to_le16(vsi_seid);
4365
4366	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4367
4368	if (!status && stats) {
4369		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4370		stats->etype_used = le16_to_cpu(resp->etype_used);
4371		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4372		stats->etype_free = le16_to_cpu(resp->etype_free);
4373	}
4374
4375	return status;
4376}
4377
4378/**
4379 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4380 * @hw: pointer to the hw struct
4381 * @seid: VSI seid to add ethertype filter from
4382 **/
4383void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4384						    u16 seid)
4385{
4386#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4387	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4388		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4389		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4390	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4391	i40e_status status;
4392
4393	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4394						       seid, 0, true, NULL,
4395						       NULL);
4396	if (status)
4397		hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4398}
4399
4400/**
4401 * i40e_aq_alternate_read
4402 * @hw: pointer to the hardware structure
4403 * @reg_addr0: address of first dword to be read
4404 * @reg_val0: pointer for data read from 'reg_addr0'
4405 * @reg_addr1: address of second dword to be read
4406 * @reg_val1: pointer for data read from 'reg_addr1'
4407 *
4408 * Read one or two dwords from alternate structure. Fields are indicated
4409 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4410 * is not passed then only register at 'reg_addr0' is read.
4411 *
4412 **/
4413static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4414					  u32 reg_addr0, u32 *reg_val0,
4415					  u32 reg_addr1, u32 *reg_val1)
4416{
4417	struct i40e_aq_desc desc;
4418	struct i40e_aqc_alternate_write *cmd_resp =
4419		(struct i40e_aqc_alternate_write *)&desc.params.raw;
4420	i40e_status status;
4421
4422	if (!reg_val0)
4423		return I40E_ERR_PARAM;
4424
4425	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4426	cmd_resp->address0 = cpu_to_le32(reg_addr0);
4427	cmd_resp->address1 = cpu_to_le32(reg_addr1);
4428
4429	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4430
4431	if (!status) {
4432		*reg_val0 = le32_to_cpu(cmd_resp->data0);
4433
4434		if (reg_val1)
4435			*reg_val1 = le32_to_cpu(cmd_resp->data1);
4436	}
4437
4438	return status;
4439}
4440
4441/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4442 * i40e_aq_resume_port_tx
4443 * @hw: pointer to the hardware structure
4444 * @cmd_details: pointer to command details structure or NULL
4445 *
4446 * Resume port's Tx traffic
4447 **/
4448i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4449				   struct i40e_asq_cmd_details *cmd_details)
4450{
4451	struct i40e_aq_desc desc;
4452	i40e_status status;
4453
4454	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4455
4456	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4457
4458	return status;
4459}
4460
4461/**
4462 * i40e_set_pci_config_data - store PCI bus info
4463 * @hw: pointer to hardware structure
4464 * @link_status: the link status word from PCI config space
4465 *
4466 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4467 **/
4468void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4469{
4470	hw->bus.type = i40e_bus_type_pci_express;
4471
4472	switch (link_status & PCI_EXP_LNKSTA_NLW) {
4473	case PCI_EXP_LNKSTA_NLW_X1:
4474		hw->bus.width = i40e_bus_width_pcie_x1;
4475		break;
4476	case PCI_EXP_LNKSTA_NLW_X2:
4477		hw->bus.width = i40e_bus_width_pcie_x2;
4478		break;
4479	case PCI_EXP_LNKSTA_NLW_X4:
4480		hw->bus.width = i40e_bus_width_pcie_x4;
4481		break;
4482	case PCI_EXP_LNKSTA_NLW_X8:
4483		hw->bus.width = i40e_bus_width_pcie_x8;
4484		break;
4485	default:
4486		hw->bus.width = i40e_bus_width_unknown;
4487		break;
4488	}
4489
4490	switch (link_status & PCI_EXP_LNKSTA_CLS) {
4491	case PCI_EXP_LNKSTA_CLS_2_5GB:
4492		hw->bus.speed = i40e_bus_speed_2500;
4493		break;
4494	case PCI_EXP_LNKSTA_CLS_5_0GB:
4495		hw->bus.speed = i40e_bus_speed_5000;
4496		break;
4497	case PCI_EXP_LNKSTA_CLS_8_0GB:
4498		hw->bus.speed = i40e_bus_speed_8000;
4499		break;
4500	default:
4501		hw->bus.speed = i40e_bus_speed_unknown;
4502		break;
4503	}
4504}
4505
4506/**
4507 * i40e_aq_debug_dump
4508 * @hw: pointer to the hardware structure
4509 * @cluster_id: specific cluster to dump
4510 * @table_id: table id within cluster
4511 * @start_index: index of line in the block to read
4512 * @buff_size: dump buffer size
4513 * @buff: dump buffer
4514 * @ret_buff_size: actual buffer size returned
4515 * @ret_next_table: next block to read
4516 * @ret_next_index: next index to read
4517 * @cmd_details: pointer to command details structure or NULL
4518 *
4519 * Dump internal FW/HW data for debug purposes.
4520 *
4521 **/
4522i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4523			       u8 table_id, u32 start_index, u16 buff_size,
4524			       void *buff, u16 *ret_buff_size,
4525			       u8 *ret_next_table, u32 *ret_next_index,
4526			       struct i40e_asq_cmd_details *cmd_details)
4527{
4528	struct i40e_aq_desc desc;
4529	struct i40e_aqc_debug_dump_internals *cmd =
4530		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4531	struct i40e_aqc_debug_dump_internals *resp =
4532		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4533	i40e_status status;
4534
4535	if (buff_size == 0 || !buff)
4536		return I40E_ERR_PARAM;
4537
4538	i40e_fill_default_direct_cmd_desc(&desc,
4539					  i40e_aqc_opc_debug_dump_internals);
4540	/* Indirect Command */
4541	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4542	if (buff_size > I40E_AQ_LARGE_BUF)
4543		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4544
4545	cmd->cluster_id = cluster_id;
4546	cmd->table_id = table_id;
4547	cmd->idx = cpu_to_le32(start_index);
4548
4549	desc.datalen = cpu_to_le16(buff_size);
4550
4551	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4552	if (!status) {
4553		if (ret_buff_size)
4554			*ret_buff_size = le16_to_cpu(desc.datalen);
4555		if (ret_next_table)
4556			*ret_next_table = resp->table_id;
4557		if (ret_next_index)
4558			*ret_next_index = le32_to_cpu(resp->idx);
4559	}
4560
4561	return status;
4562}
4563
4564/**
4565 * i40e_read_bw_from_alt_ram
4566 * @hw: pointer to the hardware structure
4567 * @max_bw: pointer for max_bw read
4568 * @min_bw: pointer for min_bw read
4569 * @min_valid: pointer for bool that is true if min_bw is a valid value
4570 * @max_valid: pointer for bool that is true if max_bw is a valid value
4571 *
4572 * Read bw from the alternate ram for the given pf
4573 **/
4574i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4575				      u32 *max_bw, u32 *min_bw,
4576				      bool *min_valid, bool *max_valid)
4577{
4578	i40e_status status;
4579	u32 max_bw_addr, min_bw_addr;
4580
4581	/* Calculate the address of the min/max bw registers */
4582	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4583		      I40E_ALT_STRUCT_MAX_BW_OFFSET +
4584		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4585	min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4586		      I40E_ALT_STRUCT_MIN_BW_OFFSET +
4587		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4588
4589	/* Read the bandwidths from alt ram */
4590	status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4591					min_bw_addr, min_bw);
4592
4593	if (*min_bw & I40E_ALT_BW_VALID_MASK)
4594		*min_valid = true;
4595	else
4596		*min_valid = false;
4597
4598	if (*max_bw & I40E_ALT_BW_VALID_MASK)
4599		*max_valid = true;
4600	else
4601		*max_valid = false;
4602
4603	return status;
4604}
4605
4606/**
4607 * i40e_aq_configure_partition_bw
4608 * @hw: pointer to the hardware structure
4609 * @bw_data: Buffer holding valid pfs and bw limits
4610 * @cmd_details: pointer to command details
4611 *
4612 * Configure partitions guaranteed/max bw
4613 **/
4614i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4615			struct i40e_aqc_configure_partition_bw_data *bw_data,
4616			struct i40e_asq_cmd_details *cmd_details)
4617{
4618	i40e_status status;
4619	struct i40e_aq_desc desc;
4620	u16 bwd_size = sizeof(*bw_data);
4621
4622	i40e_fill_default_direct_cmd_desc(&desc,
4623					  i40e_aqc_opc_configure_partition_bw);
4624
4625	/* Indirect command */
4626	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4627	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4628
4629	if (bwd_size > I40E_AQ_LARGE_BUF)
4630		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4631
4632	desc.datalen = cpu_to_le16(bwd_size);
4633
4634	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4635				       cmd_details);
4636
4637	return status;
4638}
4639
4640/**
4641 * i40e_read_phy_register_clause22
4642 * @hw: pointer to the HW structure
4643 * @reg: register address in the page
4644 * @phy_addr: PHY address on MDIO interface
4645 * @value: PHY register value
4646 *
4647 * Reads specified PHY register value
4648 **/
4649i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4650					    u16 reg, u8 phy_addr, u16 *value)
4651{
4652	i40e_status status = I40E_ERR_TIMEOUT;
4653	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4654	u32 command = 0;
4655	u16 retry = 1000;
4656
4657	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4658		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4659		  (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4660		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4661		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4662	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4663	do {
4664		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4665		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4666			status = 0;
4667			break;
4668		}
4669		udelay(10);
4670		retry--;
4671	} while (retry);
4672
4673	if (status) {
4674		i40e_debug(hw, I40E_DEBUG_PHY,
4675			   "PHY: Can't write command to external PHY.\n");
4676	} else {
4677		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4678		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4679			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4680	}
4681
4682	return status;
4683}
4684
4685/**
4686 * i40e_write_phy_register_clause22
4687 * @hw: pointer to the HW structure
4688 * @reg: register address in the page
4689 * @phy_addr: PHY address on MDIO interface
4690 * @value: PHY register value
4691 *
4692 * Writes specified PHY register value
4693 **/
4694i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4695					     u16 reg, u8 phy_addr, u16 value)
4696{
4697	i40e_status status = I40E_ERR_TIMEOUT;
4698	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4699	u32 command  = 0;
4700	u16 retry = 1000;
4701
4702	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4703	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4704
4705	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4706		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4707		  (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4708		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4709		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4710
4711	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4712	do {
4713		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4714		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4715			status = 0;
4716			break;
4717		}
4718		udelay(10);
4719		retry--;
4720	} while (retry);
4721
4722	return status;
4723}
4724
4725/**
4726 * i40e_read_phy_register_clause45
4727 * @hw: pointer to the HW structure
4728 * @page: registers page number
4729 * @reg: register address in the page
4730 * @phy_addr: PHY address on MDIO interface
4731 * @value: PHY register value
4732 *
4733 * Reads specified PHY register value
4734 **/
4735i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4736				u8 page, u16 reg, u8 phy_addr, u16 *value)
4737{
4738	i40e_status status = I40E_ERR_TIMEOUT;
4739	u32 command = 0;
4740	u16 retry = 1000;
4741	u8 port_num = hw->func_caps.mdio_port_num;
4742
4743	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4744		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4745		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4746		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4747		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4748		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4749		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4750	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4751	do {
4752		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4753		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4754			status = 0;
4755			break;
4756		}
4757		usleep_range(10, 20);
4758		retry--;
4759	} while (retry);
4760
4761	if (status) {
4762		i40e_debug(hw, I40E_DEBUG_PHY,
4763			   "PHY: Can't write command to external PHY.\n");
4764		goto phy_read_end;
4765	}
4766
4767	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4768		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4769		  (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4770		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4771		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4772		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4773	status = I40E_ERR_TIMEOUT;
4774	retry = 1000;
4775	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4776	do {
4777		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4778		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4779			status = 0;
4780			break;
4781		}
4782		usleep_range(10, 20);
4783		retry--;
4784	} while (retry);
4785
4786	if (!status) {
4787		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4788		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4789			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4790	} else {
4791		i40e_debug(hw, I40E_DEBUG_PHY,
4792			   "PHY: Can't read register value from external PHY.\n");
4793	}
4794
4795phy_read_end:
4796	return status;
4797}
4798
4799/**
4800 * i40e_write_phy_register_clause45
4801 * @hw: pointer to the HW structure
4802 * @page: registers page number
4803 * @reg: register address in the page
4804 * @phy_addr: PHY address on MDIO interface
4805 * @value: PHY register value
4806 *
4807 * Writes value to specified PHY register
4808 **/
4809i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4810				u8 page, u16 reg, u8 phy_addr, u16 value)
4811{
4812	i40e_status status = I40E_ERR_TIMEOUT;
4813	u32 command = 0;
4814	u16 retry = 1000;
4815	u8 port_num = hw->func_caps.mdio_port_num;
4816
4817	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4818		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4819		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4820		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4821		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4822		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4823		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4824	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4825	do {
4826		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4827		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4828			status = 0;
4829			break;
4830		}
4831		usleep_range(10, 20);
4832		retry--;
4833	} while (retry);
4834	if (status) {
4835		i40e_debug(hw, I40E_DEBUG_PHY,
4836			   "PHY: Can't write command to external PHY.\n");
4837		goto phy_write_end;
4838	}
4839
4840	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4841	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4842
4843	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4844		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4845		  (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4846		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4847		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4848		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4849	status = I40E_ERR_TIMEOUT;
4850	retry = 1000;
4851	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4852	do {
4853		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4854		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4855			status = 0;
4856			break;
4857		}
4858		usleep_range(10, 20);
4859		retry--;
4860	} while (retry);
4861
4862phy_write_end:
4863	return status;
4864}
4865
4866/**
4867 * i40e_write_phy_register
4868 * @hw: pointer to the HW structure
4869 * @page: registers page number
4870 * @reg: register address in the page
4871 * @phy_addr: PHY address on MDIO interface
4872 * @value: PHY register value
4873 *
4874 * Writes value to specified PHY register
4875 **/
4876i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4877				    u8 page, u16 reg, u8 phy_addr, u16 value)
4878{
4879	i40e_status status;
4880
4881	switch (hw->device_id) {
4882	case I40E_DEV_ID_1G_BASE_T_X722:
4883		status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4884							  value);
4885		break;
 
 
4886	case I40E_DEV_ID_10G_BASE_T:
4887	case I40E_DEV_ID_10G_BASE_T4:
 
4888	case I40E_DEV_ID_10G_BASE_T_X722:
4889	case I40E_DEV_ID_25G_B:
4890	case I40E_DEV_ID_25G_SFP28:
4891		status = i40e_write_phy_register_clause45(hw, page, reg,
4892							  phy_addr, value);
4893		break;
4894	default:
4895		status = I40E_ERR_UNKNOWN_PHY;
4896		break;
4897	}
4898
4899	return status;
4900}
4901
4902/**
4903 * i40e_read_phy_register
4904 * @hw: pointer to the HW structure
4905 * @page: registers page number
4906 * @reg: register address in the page
4907 * @phy_addr: PHY address on MDIO interface
4908 * @value: PHY register value
4909 *
4910 * Reads specified PHY register value
4911 **/
4912i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4913				   u8 page, u16 reg, u8 phy_addr, u16 *value)
4914{
4915	i40e_status status;
4916
4917	switch (hw->device_id) {
4918	case I40E_DEV_ID_1G_BASE_T_X722:
4919		status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4920							 value);
4921		break;
 
 
4922	case I40E_DEV_ID_10G_BASE_T:
4923	case I40E_DEV_ID_10G_BASE_T4:
4924	case I40E_DEV_ID_10G_BASE_T_BC:
4925	case I40E_DEV_ID_10G_BASE_T_X722:
4926	case I40E_DEV_ID_25G_B:
4927	case I40E_DEV_ID_25G_SFP28:
4928		status = i40e_read_phy_register_clause45(hw, page, reg,
4929							 phy_addr, value);
4930		break;
4931	default:
4932		status = I40E_ERR_UNKNOWN_PHY;
4933		break;
4934	}
4935
4936	return status;
4937}
4938
4939/**
4940 * i40e_get_phy_address
4941 * @hw: pointer to the HW structure
4942 * @dev_num: PHY port num that address we want
4943 *
4944 * Gets PHY address for current port
4945 **/
4946u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4947{
4948	u8 port_num = hw->func_caps.mdio_port_num;
4949	u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4950
4951	return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4952}
4953
4954/**
4955 * i40e_blink_phy_led
4956 * @hw: pointer to the HW structure
4957 * @time: time how long led will blinks in secs
4958 * @interval: gap between LED on and off in msecs
4959 *
4960 * Blinks PHY link LED
4961 **/
4962i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
4963				    u32 time, u32 interval)
4964{
4965	i40e_status status = 0;
4966	u32 i;
4967	u16 led_ctl;
4968	u16 gpio_led_port;
4969	u16 led_reg;
4970	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4971	u8 phy_addr = 0;
4972	u8 port_num;
4973
4974	i = rd32(hw, I40E_PFGEN_PORTNUM);
4975	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4976	phy_addr = i40e_get_phy_address(hw, port_num);
4977
4978	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4979	     led_addr++) {
4980		status = i40e_read_phy_register_clause45(hw,
4981							 I40E_PHY_COM_REG_PAGE,
4982							 led_addr, phy_addr,
4983							 &led_reg);
4984		if (status)
4985			goto phy_blinking_end;
4986		led_ctl = led_reg;
4987		if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4988			led_reg = 0;
4989			status = i40e_write_phy_register_clause45(hw,
4990							 I40E_PHY_COM_REG_PAGE,
4991							 led_addr, phy_addr,
4992							 led_reg);
4993			if (status)
4994				goto phy_blinking_end;
4995			break;
4996		}
4997	}
4998
4999	if (time > 0 && interval > 0) {
5000		for (i = 0; i < time * 1000; i += interval) {
5001			status = i40e_read_phy_register_clause45(hw,
5002						I40E_PHY_COM_REG_PAGE,
5003						led_addr, phy_addr, &led_reg);
5004			if (status)
5005				goto restore_config;
5006			if (led_reg & I40E_PHY_LED_MANUAL_ON)
5007				led_reg = 0;
5008			else
5009				led_reg = I40E_PHY_LED_MANUAL_ON;
5010			status = i40e_write_phy_register_clause45(hw,
5011						I40E_PHY_COM_REG_PAGE,
5012						led_addr, phy_addr, led_reg);
5013			if (status)
5014				goto restore_config;
5015			msleep(interval);
5016		}
5017	}
5018
5019restore_config:
5020	status = i40e_write_phy_register_clause45(hw,
5021						  I40E_PHY_COM_REG_PAGE,
5022						  led_addr, phy_addr, led_ctl);
5023
5024phy_blinking_end:
5025	return status;
5026}
5027
5028/**
5029 * i40e_led_get_reg - read LED register
5030 * @hw: pointer to the HW structure
5031 * @led_addr: LED register address
5032 * @reg_val: read register value
5033 **/
5034static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5035					      u32 *reg_val)
5036{
5037	enum i40e_status_code status;
5038	u8 phy_addr = 0;
5039	u8 port_num;
5040	u32 i;
5041
5042	*reg_val = 0;
5043	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5044		status =
5045		       i40e_aq_get_phy_register(hw,
5046						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5047						I40E_PHY_COM_REG_PAGE,
5048						I40E_PHY_LED_PROV_REG_1,
5049						reg_val, NULL);
5050	} else {
5051		i = rd32(hw, I40E_PFGEN_PORTNUM);
5052		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5053		phy_addr = i40e_get_phy_address(hw, port_num);
5054		status = i40e_read_phy_register_clause45(hw,
5055							 I40E_PHY_COM_REG_PAGE,
5056							 led_addr, phy_addr,
5057							 (u16 *)reg_val);
5058	}
5059	return status;
5060}
5061
5062/**
5063 * i40e_led_set_reg - write LED register
5064 * @hw: pointer to the HW structure
5065 * @led_addr: LED register address
5066 * @reg_val: register value to write
5067 **/
5068static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5069					      u32 reg_val)
5070{
5071	enum i40e_status_code status;
5072	u8 phy_addr = 0;
5073	u8 port_num;
5074	u32 i;
5075
5076	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5077		status =
5078		       i40e_aq_set_phy_register(hw,
5079						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5080						I40E_PHY_COM_REG_PAGE,
5081						I40E_PHY_LED_PROV_REG_1,
5082						reg_val, NULL);
5083	} else {
5084		i = rd32(hw, I40E_PFGEN_PORTNUM);
5085		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5086		phy_addr = i40e_get_phy_address(hw, port_num);
5087		status = i40e_write_phy_register_clause45(hw,
5088							  I40E_PHY_COM_REG_PAGE,
5089							  led_addr, phy_addr,
5090							  (u16)reg_val);
5091	}
5092
5093	return status;
5094}
5095
5096/**
5097 * i40e_led_get_phy - return current on/off mode
5098 * @hw: pointer to the hw struct
5099 * @led_addr: address of led register to use
5100 * @val: original value of register to use
5101 *
5102 **/
5103i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5104			     u16 *val)
5105{
5106	i40e_status status = 0;
5107	u16 gpio_led_port;
5108	u8 phy_addr = 0;
5109	u16 reg_val;
5110	u16 temp_addr;
5111	u8 port_num;
5112	u32 i;
5113	u32 reg_val_aq;
5114
5115	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5116		status =
5117		      i40e_aq_get_phy_register(hw,
5118					       I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5119					       I40E_PHY_COM_REG_PAGE,
5120					       I40E_PHY_LED_PROV_REG_1,
5121					       &reg_val_aq, NULL);
5122		if (status == I40E_SUCCESS)
5123			*val = (u16)reg_val_aq;
5124		return status;
5125	}
5126	temp_addr = I40E_PHY_LED_PROV_REG_1;
5127	i = rd32(hw, I40E_PFGEN_PORTNUM);
5128	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5129	phy_addr = i40e_get_phy_address(hw, port_num);
5130
5131	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5132	     temp_addr++) {
5133		status = i40e_read_phy_register_clause45(hw,
5134							 I40E_PHY_COM_REG_PAGE,
5135							 temp_addr, phy_addr,
5136							 &reg_val);
5137		if (status)
5138			return status;
5139		*val = reg_val;
5140		if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5141			*led_addr = temp_addr;
5142			break;
5143		}
5144	}
5145	return status;
5146}
5147
5148/**
5149 * i40e_led_set_phy
5150 * @hw: pointer to the HW structure
5151 * @on: true or false
5152 * @led_addr: address of led register to use
5153 * @mode: original val plus bit for set or ignore
5154 *
5155 * Set led's on or off when controlled by the PHY
5156 *
5157 **/
5158i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5159			     u16 led_addr, u32 mode)
5160{
5161	i40e_status status = 0;
5162	u32 led_ctl = 0;
5163	u32 led_reg = 0;
5164
5165	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5166	if (status)
5167		return status;
5168	led_ctl = led_reg;
5169	if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5170		led_reg = 0;
5171		status = i40e_led_set_reg(hw, led_addr, led_reg);
5172		if (status)
5173			return status;
5174	}
5175	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5176	if (status)
5177		goto restore_config;
5178	if (on)
5179		led_reg = I40E_PHY_LED_MANUAL_ON;
5180	else
5181		led_reg = 0;
5182
5183	status = i40e_led_set_reg(hw, led_addr, led_reg);
5184	if (status)
5185		goto restore_config;
5186	if (mode & I40E_PHY_LED_MODE_ORIG) {
5187		led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5188		status = i40e_led_set_reg(hw, led_addr, led_ctl);
5189	}
5190	return status;
5191
5192restore_config:
5193	status = i40e_led_set_reg(hw, led_addr, led_ctl);
5194	return status;
5195}
5196
5197/**
5198 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5199 * @hw: pointer to the hw struct
5200 * @reg_addr: register address
5201 * @reg_val: ptr to register value
5202 * @cmd_details: pointer to command details structure or NULL
5203 *
5204 * Use the firmware to read the Rx control register,
5205 * especially useful if the Rx unit is under heavy pressure
5206 **/
5207i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5208				u32 reg_addr, u32 *reg_val,
5209				struct i40e_asq_cmd_details *cmd_details)
5210{
5211	struct i40e_aq_desc desc;
5212	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5213		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5214	i40e_status status;
5215
5216	if (!reg_val)
5217		return I40E_ERR_PARAM;
5218
5219	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5220
5221	cmd_resp->address = cpu_to_le32(reg_addr);
5222
5223	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5224
5225	if (status == 0)
5226		*reg_val = le32_to_cpu(cmd_resp->value);
5227
5228	return status;
5229}
5230
5231/**
5232 * i40e_read_rx_ctl - read from an Rx control register
5233 * @hw: pointer to the hw struct
5234 * @reg_addr: register address
5235 **/
5236u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5237{
5238	i40e_status status = 0;
5239	bool use_register;
5240	int retry = 5;
5241	u32 val = 0;
5242
5243	use_register = (((hw->aq.api_maj_ver == 1) &&
5244			(hw->aq.api_min_ver < 5)) ||
5245			(hw->mac.type == I40E_MAC_X722));
5246	if (!use_register) {
5247do_retry:
5248		status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5249		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5250			usleep_range(1000, 2000);
5251			retry--;
5252			goto do_retry;
5253		}
5254	}
5255
5256	/* if the AQ access failed, try the old-fashioned way */
5257	if (status || use_register)
5258		val = rd32(hw, reg_addr);
5259
5260	return val;
5261}
5262
5263/**
5264 * i40e_aq_rx_ctl_write_register
5265 * @hw: pointer to the hw struct
5266 * @reg_addr: register address
5267 * @reg_val: register value
5268 * @cmd_details: pointer to command details structure or NULL
5269 *
5270 * Use the firmware to write to an Rx control register,
5271 * especially useful if the Rx unit is under heavy pressure
5272 **/
5273i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5274				u32 reg_addr, u32 reg_val,
5275				struct i40e_asq_cmd_details *cmd_details)
5276{
5277	struct i40e_aq_desc desc;
5278	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5279		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5280	i40e_status status;
5281
5282	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5283
5284	cmd->address = cpu_to_le32(reg_addr);
5285	cmd->value = cpu_to_le32(reg_val);
5286
5287	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5288
5289	return status;
5290}
5291
5292/**
5293 * i40e_write_rx_ctl - write to an Rx control register
5294 * @hw: pointer to the hw struct
5295 * @reg_addr: register address
5296 * @reg_val: register value
5297 **/
5298void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5299{
5300	i40e_status status = 0;
5301	bool use_register;
5302	int retry = 5;
5303
5304	use_register = (((hw->aq.api_maj_ver == 1) &&
5305			(hw->aq.api_min_ver < 5)) ||
5306			(hw->mac.type == I40E_MAC_X722));
5307	if (!use_register) {
5308do_retry:
5309		status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5310						       reg_val, NULL);
5311		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5312			usleep_range(1000, 2000);
5313			retry--;
5314			goto do_retry;
5315		}
5316	}
5317
5318	/* if the AQ access failed, try the old-fashioned way */
5319	if (status || use_register)
5320		wr32(hw, reg_addr, reg_val);
5321}
5322
5323/**
5324 * i40e_aq_set_phy_register
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5325 * @hw: pointer to the hw struct
5326 * @phy_select: select which phy should be accessed
5327 * @dev_addr: PHY device address
 
 
 
5328 * @reg_addr: PHY register address
5329 * @reg_val: new register value
5330 * @cmd_details: pointer to command details structure or NULL
5331 *
5332 * Write the external PHY register.
 
 
5333 **/
5334i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
5335				     u8 phy_select, u8 dev_addr,
5336				     u32 reg_addr, u32 reg_val,
5337				     struct i40e_asq_cmd_details *cmd_details)
 
5338{
5339	struct i40e_aq_desc desc;
5340	struct i40e_aqc_phy_register_access *cmd =
5341		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5342	i40e_status status;
5343
5344	i40e_fill_default_direct_cmd_desc(&desc,
5345					  i40e_aqc_opc_set_phy_register);
5346
5347	cmd->phy_interface = phy_select;
5348	cmd->dev_address = dev_addr;
5349	cmd->reg_address = cpu_to_le32(reg_addr);
5350	cmd->reg_value = cpu_to_le32(reg_val);
5351
 
 
 
 
 
5352	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5353
5354	return status;
5355}
5356
5357/**
5358 * i40e_aq_get_phy_register
5359 * @hw: pointer to the hw struct
5360 * @phy_select: select which phy should be accessed
5361 * @dev_addr: PHY device address
 
 
 
5362 * @reg_addr: PHY register address
5363 * @reg_val: read register value
5364 * @cmd_details: pointer to command details structure or NULL
5365 *
5366 * Read the external PHY register.
 
 
5367 **/
5368i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
5369				     u8 phy_select, u8 dev_addr,
5370				     u32 reg_addr, u32 *reg_val,
5371				     struct i40e_asq_cmd_details *cmd_details)
 
5372{
5373	struct i40e_aq_desc desc;
5374	struct i40e_aqc_phy_register_access *cmd =
5375		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5376	i40e_status status;
5377
5378	i40e_fill_default_direct_cmd_desc(&desc,
5379					  i40e_aqc_opc_get_phy_register);
5380
5381	cmd->phy_interface = phy_select;
5382	cmd->dev_address = dev_addr;
5383	cmd->reg_address = cpu_to_le32(reg_addr);
 
 
 
 
 
5384
5385	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5386	if (!status)
5387		*reg_val = le32_to_cpu(cmd->reg_value);
5388
5389	return status;
5390}
5391
5392/**
5393 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5394 * @hw: pointer to the hw struct
5395 * @buff: command buffer (size in bytes = buff_size)
5396 * @buff_size: buffer size in bytes
5397 * @track_id: package tracking id
5398 * @error_offset: returns error offset
5399 * @error_info: returns error information
5400 * @cmd_details: pointer to command details structure or NULL
5401 **/
5402enum
5403i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5404				   u16 buff_size, u32 track_id,
5405				   u32 *error_offset, u32 *error_info,
5406				   struct i40e_asq_cmd_details *cmd_details)
5407{
5408	struct i40e_aq_desc desc;
5409	struct i40e_aqc_write_personalization_profile *cmd =
5410		(struct i40e_aqc_write_personalization_profile *)
5411		&desc.params.raw;
5412	struct i40e_aqc_write_ddp_resp *resp;
5413	i40e_status status;
5414
5415	i40e_fill_default_direct_cmd_desc(&desc,
5416					  i40e_aqc_opc_write_personalization_profile);
5417
5418	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5419	if (buff_size > I40E_AQ_LARGE_BUF)
5420		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5421
5422	desc.datalen = cpu_to_le16(buff_size);
5423
5424	cmd->profile_track_id = cpu_to_le32(track_id);
5425
5426	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5427	if (!status) {
5428		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5429		if (error_offset)
5430			*error_offset = le32_to_cpu(resp->error_offset);
5431		if (error_info)
5432			*error_info = le32_to_cpu(resp->error_info);
5433	}
5434
5435	return status;
5436}
5437
5438/**
5439 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5440 * @hw: pointer to the hw struct
5441 * @buff: command buffer (size in bytes = buff_size)
5442 * @buff_size: buffer size in bytes
5443 * @flags: AdminQ command flags
5444 * @cmd_details: pointer to command details structure or NULL
5445 **/
5446enum
5447i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5448				      u16 buff_size, u8 flags,
5449				      struct i40e_asq_cmd_details *cmd_details)
5450{
5451	struct i40e_aq_desc desc;
5452	struct i40e_aqc_get_applied_profiles *cmd =
5453		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5454	i40e_status status;
5455
5456	i40e_fill_default_direct_cmd_desc(&desc,
5457					  i40e_aqc_opc_get_personalization_profile_list);
5458
5459	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5460	if (buff_size > I40E_AQ_LARGE_BUF)
5461		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5462	desc.datalen = cpu_to_le16(buff_size);
5463
5464	cmd->flags = flags;
5465
5466	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5467
5468	return status;
5469}
5470
5471/**
5472 * i40e_find_segment_in_package
5473 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5474 * @pkg_hdr: pointer to the package header to be searched
5475 *
5476 * This function searches a package file for a particular segment type. On
5477 * success it returns a pointer to the segment header, otherwise it will
5478 * return NULL.
5479 **/
5480struct i40e_generic_seg_header *
5481i40e_find_segment_in_package(u32 segment_type,
5482			     struct i40e_package_header *pkg_hdr)
5483{
5484	struct i40e_generic_seg_header *segment;
5485	u32 i;
5486
5487	/* Search all package segments for the requested segment type */
5488	for (i = 0; i < pkg_hdr->segment_count; i++) {
5489		segment =
5490			(struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5491			 pkg_hdr->segment_offset[i]);
5492
5493		if (segment->type == segment_type)
5494			return segment;
5495	}
5496
5497	return NULL;
5498}
5499
5500/* Get section table in profile */
5501#define I40E_SECTION_TABLE(profile, sec_tbl)				\
5502	do {								\
5503		struct i40e_profile_segment *p = (profile);		\
5504		u32 count;						\
5505		u32 *nvm;						\
5506		count = p->device_table_count;				\
5507		nvm = (u32 *)&p->device_table[count];			\
5508		sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5509	} while (0)
5510
5511/* Get section header in profile */
5512#define I40E_SECTION_HEADER(profile, offset)				\
5513	(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5514
5515/**
5516 * i40e_find_section_in_profile
5517 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5518 * @profile: pointer to the i40e segment header to be searched
5519 *
5520 * This function searches i40e segment for a particular section type. On
5521 * success it returns a pointer to the section header, otherwise it will
5522 * return NULL.
5523 **/
5524struct i40e_profile_section_header *
5525i40e_find_section_in_profile(u32 section_type,
5526			     struct i40e_profile_segment *profile)
5527{
5528	struct i40e_profile_section_header *sec;
5529	struct i40e_section_table *sec_tbl;
5530	u32 sec_off;
5531	u32 i;
5532
5533	if (profile->header.type != SEGMENT_TYPE_I40E)
5534		return NULL;
5535
5536	I40E_SECTION_TABLE(profile, sec_tbl);
5537
5538	for (i = 0; i < sec_tbl->section_count; i++) {
5539		sec_off = sec_tbl->section_offset[i];
5540		sec = I40E_SECTION_HEADER(profile, sec_off);
5541		if (sec->section.type == section_type)
5542			return sec;
5543	}
5544
5545	return NULL;
5546}
5547
5548/**
5549 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5550 * @hw: pointer to the hw struct
5551 * @aq: command buffer containing all data to execute AQ
5552 **/
5553static enum
5554i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5555					  struct i40e_profile_aq_section *aq)
5556{
5557	i40e_status status;
5558	struct i40e_aq_desc desc;
5559	u8 *msg = NULL;
5560	u16 msglen;
5561
5562	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5563	desc.flags |= cpu_to_le16(aq->flags);
5564	memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5565
5566	msglen = aq->datalen;
5567	if (msglen) {
5568		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5569						I40E_AQ_FLAG_RD));
5570		if (msglen > I40E_AQ_LARGE_BUF)
5571			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5572		desc.datalen = cpu_to_le16(msglen);
5573		msg = &aq->data[0];
5574	}
5575
5576	status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5577
5578	if (status) {
5579		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5580			   "unable to exec DDP AQ opcode %u, error %d\n",
5581			   aq->opcode, status);
5582		return status;
5583	}
5584
5585	/* copy returned desc to aq_buf */
5586	memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5587
5588	return 0;
5589}
5590
5591/**
5592 * i40e_validate_profile
5593 * @hw: pointer to the hardware structure
5594 * @profile: pointer to the profile segment of the package to be validated
5595 * @track_id: package tracking id
5596 * @rollback: flag if the profile is for rollback.
5597 *
5598 * Validates supported devices and profile's sections.
5599 */
5600static enum i40e_status_code
5601i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5602		      u32 track_id, bool rollback)
5603{
5604	struct i40e_profile_section_header *sec = NULL;
5605	i40e_status status = 0;
5606	struct i40e_section_table *sec_tbl;
5607	u32 vendor_dev_id;
5608	u32 dev_cnt;
5609	u32 sec_off;
5610	u32 i;
5611
5612	if (track_id == I40E_DDP_TRACKID_INVALID) {
5613		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5614		return I40E_NOT_SUPPORTED;
5615	}
5616
5617	dev_cnt = profile->device_table_count;
5618	for (i = 0; i < dev_cnt; i++) {
5619		vendor_dev_id = profile->device_table[i].vendor_dev_id;
5620		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5621		    hw->device_id == (vendor_dev_id & 0xFFFF))
5622			break;
5623	}
5624	if (dev_cnt && i == dev_cnt) {
5625		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5626			   "Device doesn't support DDP\n");
5627		return I40E_ERR_DEVICE_NOT_SUPPORTED;
5628	}
5629
5630	I40E_SECTION_TABLE(profile, sec_tbl);
5631
5632	/* Validate sections types */
5633	for (i = 0; i < sec_tbl->section_count; i++) {
5634		sec_off = sec_tbl->section_offset[i];
5635		sec = I40E_SECTION_HEADER(profile, sec_off);
5636		if (rollback) {
5637			if (sec->section.type == SECTION_TYPE_MMIO ||
5638			    sec->section.type == SECTION_TYPE_AQ ||
5639			    sec->section.type == SECTION_TYPE_RB_AQ) {
5640				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5641					   "Not a roll-back package\n");
5642				return I40E_NOT_SUPPORTED;
5643			}
5644		} else {
5645			if (sec->section.type == SECTION_TYPE_RB_AQ ||
5646			    sec->section.type == SECTION_TYPE_RB_MMIO) {
5647				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5648					   "Not an original package\n");
5649				return I40E_NOT_SUPPORTED;
5650			}
5651		}
5652	}
5653
5654	return status;
5655}
5656
5657/**
5658 * i40e_write_profile
5659 * @hw: pointer to the hardware structure
5660 * @profile: pointer to the profile segment of the package to be downloaded
5661 * @track_id: package tracking id
5662 *
5663 * Handles the download of a complete package.
5664 */
5665enum i40e_status_code
5666i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5667		   u32 track_id)
5668{
5669	i40e_status status = 0;
5670	struct i40e_section_table *sec_tbl;
5671	struct i40e_profile_section_header *sec = NULL;
5672	struct i40e_profile_aq_section *ddp_aq;
5673	u32 section_size = 0;
5674	u32 offset = 0, info = 0;
5675	u32 sec_off;
5676	u32 i;
5677
5678	status = i40e_validate_profile(hw, profile, track_id, false);
5679	if (status)
5680		return status;
5681
5682	I40E_SECTION_TABLE(profile, sec_tbl);
5683
5684	for (i = 0; i < sec_tbl->section_count; i++) {
5685		sec_off = sec_tbl->section_offset[i];
5686		sec = I40E_SECTION_HEADER(profile, sec_off);
5687		/* Process generic admin command */
5688		if (sec->section.type == SECTION_TYPE_AQ) {
5689			ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5690			status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5691			if (status) {
5692				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5693					   "Failed to execute aq: section %d, opcode %u\n",
5694					   i, ddp_aq->opcode);
5695				break;
5696			}
5697			sec->section.type = SECTION_TYPE_RB_AQ;
5698		}
5699
5700		/* Skip any non-mmio sections */
5701		if (sec->section.type != SECTION_TYPE_MMIO)
5702			continue;
5703
5704		section_size = sec->section.size +
5705			sizeof(struct i40e_profile_section_header);
5706
5707		/* Write MMIO section */
5708		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5709					   track_id, &offset, &info, NULL);
5710		if (status) {
5711			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5712				   "Failed to write profile: section %d, offset %d, info %d\n",
5713				   i, offset, info);
5714			break;
5715		}
5716	}
5717	return status;
5718}
5719
5720/**
5721 * i40e_rollback_profile
5722 * @hw: pointer to the hardware structure
5723 * @profile: pointer to the profile segment of the package to be removed
5724 * @track_id: package tracking id
5725 *
5726 * Rolls back previously loaded package.
5727 */
5728enum i40e_status_code
5729i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5730		      u32 track_id)
5731{
5732	struct i40e_profile_section_header *sec = NULL;
5733	i40e_status status = 0;
5734	struct i40e_section_table *sec_tbl;
5735	u32 offset = 0, info = 0;
5736	u32 section_size = 0;
5737	u32 sec_off;
5738	int i;
5739
5740	status = i40e_validate_profile(hw, profile, track_id, true);
5741	if (status)
5742		return status;
5743
5744	I40E_SECTION_TABLE(profile, sec_tbl);
5745
5746	/* For rollback write sections in reverse */
5747	for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5748		sec_off = sec_tbl->section_offset[i];
5749		sec = I40E_SECTION_HEADER(profile, sec_off);
5750
5751		/* Skip any non-rollback sections */
5752		if (sec->section.type != SECTION_TYPE_RB_MMIO)
5753			continue;
5754
5755		section_size = sec->section.size +
5756			sizeof(struct i40e_profile_section_header);
5757
5758		/* Write roll-back MMIO section */
5759		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5760					   track_id, &offset, &info, NULL);
5761		if (status) {
5762			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5763				   "Failed to write profile: section %d, offset %d, info %d\n",
5764				   i, offset, info);
5765			break;
5766		}
5767	}
5768	return status;
5769}
5770
5771/**
5772 * i40e_add_pinfo_to_list
5773 * @hw: pointer to the hardware structure
5774 * @profile: pointer to the profile segment of the package
5775 * @profile_info_sec: buffer for information section
5776 * @track_id: package tracking id
5777 *
5778 * Register a profile to the list of loaded profiles.
5779 */
5780enum i40e_status_code
5781i40e_add_pinfo_to_list(struct i40e_hw *hw,
5782		       struct i40e_profile_segment *profile,
5783		       u8 *profile_info_sec, u32 track_id)
5784{
5785	i40e_status status = 0;
5786	struct i40e_profile_section_header *sec = NULL;
5787	struct i40e_profile_info *pinfo;
5788	u32 offset = 0, info = 0;
5789
5790	sec = (struct i40e_profile_section_header *)profile_info_sec;
5791	sec->tbl_size = 1;
5792	sec->data_end = sizeof(struct i40e_profile_section_header) +
5793			sizeof(struct i40e_profile_info);
5794	sec->section.type = SECTION_TYPE_INFO;
5795	sec->section.offset = sizeof(struct i40e_profile_section_header);
5796	sec->section.size = sizeof(struct i40e_profile_info);
5797	pinfo = (struct i40e_profile_info *)(profile_info_sec +
5798					     sec->section.offset);
5799	pinfo->track_id = track_id;
5800	pinfo->version = profile->version;
5801	pinfo->op = I40E_DDP_ADD_TRACKID;
5802	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5803
5804	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5805				   track_id, &offset, &info, NULL);
5806
5807	return status;
5808}
5809
5810/**
5811 * i40e_aq_add_cloud_filters
5812 * @hw: pointer to the hardware structure
5813 * @seid: VSI seid to add cloud filters from
5814 * @filters: Buffer which contains the filters to be added
5815 * @filter_count: number of filters contained in the buffer
5816 *
5817 * Set the cloud filters for a given VSI.  The contents of the
5818 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5819 * of the function.
5820 *
5821 **/
5822enum i40e_status_code
5823i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5824			  struct i40e_aqc_cloud_filters_element_data *filters,
5825			  u8 filter_count)
5826{
5827	struct i40e_aq_desc desc;
5828	struct i40e_aqc_add_remove_cloud_filters *cmd =
5829	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5830	enum i40e_status_code status;
5831	u16 buff_len;
5832
5833	i40e_fill_default_direct_cmd_desc(&desc,
5834					  i40e_aqc_opc_add_cloud_filters);
5835
5836	buff_len = filter_count * sizeof(*filters);
5837	desc.datalen = cpu_to_le16(buff_len);
5838	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5839	cmd->num_filters = filter_count;
5840	cmd->seid = cpu_to_le16(seid);
5841
5842	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5843
5844	return status;
5845}
5846
5847/**
5848 * i40e_aq_add_cloud_filters_bb
5849 * @hw: pointer to the hardware structure
5850 * @seid: VSI seid to add cloud filters from
5851 * @filters: Buffer which contains the filters in big buffer to be added
5852 * @filter_count: number of filters contained in the buffer
5853 *
5854 * Set the big buffer cloud filters for a given VSI.  The contents of the
5855 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5856 * function.
5857 *
5858 **/
5859enum i40e_status_code
5860i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5861			     struct i40e_aqc_cloud_filters_element_bb *filters,
5862			     u8 filter_count)
5863{
5864	struct i40e_aq_desc desc;
5865	struct i40e_aqc_add_remove_cloud_filters *cmd =
5866	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5867	i40e_status status;
5868	u16 buff_len;
5869	int i;
5870
5871	i40e_fill_default_direct_cmd_desc(&desc,
5872					  i40e_aqc_opc_add_cloud_filters);
5873
5874	buff_len = filter_count * sizeof(*filters);
5875	desc.datalen = cpu_to_le16(buff_len);
5876	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5877	cmd->num_filters = filter_count;
5878	cmd->seid = cpu_to_le16(seid);
5879	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5880
5881	for (i = 0; i < filter_count; i++) {
5882		u16 tnl_type;
5883		u32 ti;
5884
5885		tnl_type = (le16_to_cpu(filters[i].element.flags) &
5886			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5887			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5888
5889		/* Due to hardware eccentricities, the VNI for Geneve is shifted
5890		 * one more byte further than normally used for Tenant ID in
5891		 * other tunnel types.
5892		 */
5893		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5894			ti = le32_to_cpu(filters[i].element.tenant_id);
5895			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5896		}
5897	}
5898
5899	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5900
5901	return status;
5902}
5903
5904/**
5905 * i40e_aq_rem_cloud_filters
5906 * @hw: pointer to the hardware structure
5907 * @seid: VSI seid to remove cloud filters from
5908 * @filters: Buffer which contains the filters to be removed
5909 * @filter_count: number of filters contained in the buffer
5910 *
5911 * Remove the cloud filters for a given VSI.  The contents of the
5912 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5913 * of the function.
5914 *
5915 **/
5916enum i40e_status_code
5917i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5918			  struct i40e_aqc_cloud_filters_element_data *filters,
5919			  u8 filter_count)
5920{
5921	struct i40e_aq_desc desc;
5922	struct i40e_aqc_add_remove_cloud_filters *cmd =
5923	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5924	enum i40e_status_code status;
5925	u16 buff_len;
5926
5927	i40e_fill_default_direct_cmd_desc(&desc,
5928					  i40e_aqc_opc_remove_cloud_filters);
5929
5930	buff_len = filter_count * sizeof(*filters);
5931	desc.datalen = cpu_to_le16(buff_len);
5932	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5933	cmd->num_filters = filter_count;
5934	cmd->seid = cpu_to_le16(seid);
5935
5936	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5937
5938	return status;
5939}
5940
5941/**
5942 * i40e_aq_rem_cloud_filters_bb
5943 * @hw: pointer to the hardware structure
5944 * @seid: VSI seid to remove cloud filters from
5945 * @filters: Buffer which contains the filters in big buffer to be removed
5946 * @filter_count: number of filters contained in the buffer
5947 *
5948 * Remove the big buffer cloud filters for a given VSI.  The contents of the
5949 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5950 * function.
5951 *
5952 **/
5953enum i40e_status_code
5954i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5955			     struct i40e_aqc_cloud_filters_element_bb *filters,
5956			     u8 filter_count)
5957{
5958	struct i40e_aq_desc desc;
5959	struct i40e_aqc_add_remove_cloud_filters *cmd =
5960	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5961	i40e_status status;
5962	u16 buff_len;
5963	int i;
5964
5965	i40e_fill_default_direct_cmd_desc(&desc,
5966					  i40e_aqc_opc_remove_cloud_filters);
5967
5968	buff_len = filter_count * sizeof(*filters);
5969	desc.datalen = cpu_to_le16(buff_len);
5970	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5971	cmd->num_filters = filter_count;
5972	cmd->seid = cpu_to_le16(seid);
5973	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5974
5975	for (i = 0; i < filter_count; i++) {
5976		u16 tnl_type;
5977		u32 ti;
5978
5979		tnl_type = (le16_to_cpu(filters[i].element.flags) &
5980			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5981			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5982
5983		/* Due to hardware eccentricities, the VNI for Geneve is shifted
5984		 * one more byte further than normally used for Tenant ID in
5985		 * other tunnel types.
5986		 */
5987		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5988			ti = le32_to_cpu(filters[i].element.tenant_id);
5989			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5990		}
5991	}
5992
5993	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5994
5995	return status;
5996}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2021 Intel Corporation. */
   3
   4#include "i40e.h"
   5#include "i40e_type.h"
   6#include "i40e_adminq.h"
   7#include "i40e_prototype.h"
   8#include <linux/avf/virtchnl.h>
   9
  10/**
  11 * i40e_set_mac_type - Sets MAC type
  12 * @hw: pointer to the HW structure
  13 *
  14 * This function sets the mac type of the adapter based on the
  15 * vendor ID and device ID stored in the hw structure.
  16 **/
  17i40e_status i40e_set_mac_type(struct i40e_hw *hw)
  18{
  19	i40e_status status = 0;
  20
  21	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
  22		switch (hw->device_id) {
  23		case I40E_DEV_ID_SFP_XL710:
  24		case I40E_DEV_ID_QEMU:
  25		case I40E_DEV_ID_KX_B:
  26		case I40E_DEV_ID_KX_C:
  27		case I40E_DEV_ID_QSFP_A:
  28		case I40E_DEV_ID_QSFP_B:
  29		case I40E_DEV_ID_QSFP_C:
  30		case I40E_DEV_ID_1G_BASE_T_BC:
  31		case I40E_DEV_ID_5G_BASE_T_BC:
  32		case I40E_DEV_ID_10G_BASE_T:
  33		case I40E_DEV_ID_10G_BASE_T4:
  34		case I40E_DEV_ID_10G_BASE_T_BC:
  35		case I40E_DEV_ID_10G_B:
  36		case I40E_DEV_ID_10G_SFP:
  37		case I40E_DEV_ID_20G_KR2:
  38		case I40E_DEV_ID_20G_KR2_A:
  39		case I40E_DEV_ID_25G_B:
  40		case I40E_DEV_ID_25G_SFP28:
  41		case I40E_DEV_ID_X710_N3000:
  42		case I40E_DEV_ID_XXV710_N3000:
  43			hw->mac.type = I40E_MAC_XL710;
  44			break;
  45		case I40E_DEV_ID_KX_X722:
  46		case I40E_DEV_ID_QSFP_X722:
  47		case I40E_DEV_ID_SFP_X722:
  48		case I40E_DEV_ID_1G_BASE_T_X722:
  49		case I40E_DEV_ID_10G_BASE_T_X722:
  50		case I40E_DEV_ID_SFP_I_X722:
  51		case I40E_DEV_ID_SFP_X722_A:
  52			hw->mac.type = I40E_MAC_X722;
  53			break;
  54		default:
  55			hw->mac.type = I40E_MAC_GENERIC;
  56			break;
  57		}
  58	} else {
  59		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
  60	}
  61
  62	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
  63		  hw->mac.type, status);
  64	return status;
  65}
  66
  67/**
  68 * i40e_aq_str - convert AQ err code to a string
  69 * @hw: pointer to the HW structure
  70 * @aq_err: the AQ error code to convert
  71 **/
  72const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
  73{
  74	switch (aq_err) {
  75	case I40E_AQ_RC_OK:
  76		return "OK";
  77	case I40E_AQ_RC_EPERM:
  78		return "I40E_AQ_RC_EPERM";
  79	case I40E_AQ_RC_ENOENT:
  80		return "I40E_AQ_RC_ENOENT";
  81	case I40E_AQ_RC_ESRCH:
  82		return "I40E_AQ_RC_ESRCH";
  83	case I40E_AQ_RC_EINTR:
  84		return "I40E_AQ_RC_EINTR";
  85	case I40E_AQ_RC_EIO:
  86		return "I40E_AQ_RC_EIO";
  87	case I40E_AQ_RC_ENXIO:
  88		return "I40E_AQ_RC_ENXIO";
  89	case I40E_AQ_RC_E2BIG:
  90		return "I40E_AQ_RC_E2BIG";
  91	case I40E_AQ_RC_EAGAIN:
  92		return "I40E_AQ_RC_EAGAIN";
  93	case I40E_AQ_RC_ENOMEM:
  94		return "I40E_AQ_RC_ENOMEM";
  95	case I40E_AQ_RC_EACCES:
  96		return "I40E_AQ_RC_EACCES";
  97	case I40E_AQ_RC_EFAULT:
  98		return "I40E_AQ_RC_EFAULT";
  99	case I40E_AQ_RC_EBUSY:
 100		return "I40E_AQ_RC_EBUSY";
 101	case I40E_AQ_RC_EEXIST:
 102		return "I40E_AQ_RC_EEXIST";
 103	case I40E_AQ_RC_EINVAL:
 104		return "I40E_AQ_RC_EINVAL";
 105	case I40E_AQ_RC_ENOTTY:
 106		return "I40E_AQ_RC_ENOTTY";
 107	case I40E_AQ_RC_ENOSPC:
 108		return "I40E_AQ_RC_ENOSPC";
 109	case I40E_AQ_RC_ENOSYS:
 110		return "I40E_AQ_RC_ENOSYS";
 111	case I40E_AQ_RC_ERANGE:
 112		return "I40E_AQ_RC_ERANGE";
 113	case I40E_AQ_RC_EFLUSHED:
 114		return "I40E_AQ_RC_EFLUSHED";
 115	case I40E_AQ_RC_BAD_ADDR:
 116		return "I40E_AQ_RC_BAD_ADDR";
 117	case I40E_AQ_RC_EMODE:
 118		return "I40E_AQ_RC_EMODE";
 119	case I40E_AQ_RC_EFBIG:
 120		return "I40E_AQ_RC_EFBIG";
 121	}
 122
 123	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
 124	return hw->err_str;
 125}
 126
 127/**
 128 * i40e_stat_str - convert status err code to a string
 129 * @hw: pointer to the HW structure
 130 * @stat_err: the status error code to convert
 131 **/
 132const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 133{
 134	switch (stat_err) {
 135	case 0:
 136		return "OK";
 137	case I40E_ERR_NVM:
 138		return "I40E_ERR_NVM";
 139	case I40E_ERR_NVM_CHECKSUM:
 140		return "I40E_ERR_NVM_CHECKSUM";
 141	case I40E_ERR_PHY:
 142		return "I40E_ERR_PHY";
 143	case I40E_ERR_CONFIG:
 144		return "I40E_ERR_CONFIG";
 145	case I40E_ERR_PARAM:
 146		return "I40E_ERR_PARAM";
 147	case I40E_ERR_MAC_TYPE:
 148		return "I40E_ERR_MAC_TYPE";
 149	case I40E_ERR_UNKNOWN_PHY:
 150		return "I40E_ERR_UNKNOWN_PHY";
 151	case I40E_ERR_LINK_SETUP:
 152		return "I40E_ERR_LINK_SETUP";
 153	case I40E_ERR_ADAPTER_STOPPED:
 154		return "I40E_ERR_ADAPTER_STOPPED";
 155	case I40E_ERR_INVALID_MAC_ADDR:
 156		return "I40E_ERR_INVALID_MAC_ADDR";
 157	case I40E_ERR_DEVICE_NOT_SUPPORTED:
 158		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
 159	case I40E_ERR_PRIMARY_REQUESTS_PENDING:
 160		return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
 161	case I40E_ERR_INVALID_LINK_SETTINGS:
 162		return "I40E_ERR_INVALID_LINK_SETTINGS";
 163	case I40E_ERR_AUTONEG_NOT_COMPLETE:
 164		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
 165	case I40E_ERR_RESET_FAILED:
 166		return "I40E_ERR_RESET_FAILED";
 167	case I40E_ERR_SWFW_SYNC:
 168		return "I40E_ERR_SWFW_SYNC";
 169	case I40E_ERR_NO_AVAILABLE_VSI:
 170		return "I40E_ERR_NO_AVAILABLE_VSI";
 171	case I40E_ERR_NO_MEMORY:
 172		return "I40E_ERR_NO_MEMORY";
 173	case I40E_ERR_BAD_PTR:
 174		return "I40E_ERR_BAD_PTR";
 175	case I40E_ERR_RING_FULL:
 176		return "I40E_ERR_RING_FULL";
 177	case I40E_ERR_INVALID_PD_ID:
 178		return "I40E_ERR_INVALID_PD_ID";
 179	case I40E_ERR_INVALID_QP_ID:
 180		return "I40E_ERR_INVALID_QP_ID";
 181	case I40E_ERR_INVALID_CQ_ID:
 182		return "I40E_ERR_INVALID_CQ_ID";
 183	case I40E_ERR_INVALID_CEQ_ID:
 184		return "I40E_ERR_INVALID_CEQ_ID";
 185	case I40E_ERR_INVALID_AEQ_ID:
 186		return "I40E_ERR_INVALID_AEQ_ID";
 187	case I40E_ERR_INVALID_SIZE:
 188		return "I40E_ERR_INVALID_SIZE";
 189	case I40E_ERR_INVALID_ARP_INDEX:
 190		return "I40E_ERR_INVALID_ARP_INDEX";
 191	case I40E_ERR_INVALID_FPM_FUNC_ID:
 192		return "I40E_ERR_INVALID_FPM_FUNC_ID";
 193	case I40E_ERR_QP_INVALID_MSG_SIZE:
 194		return "I40E_ERR_QP_INVALID_MSG_SIZE";
 195	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
 196		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
 197	case I40E_ERR_INVALID_FRAG_COUNT:
 198		return "I40E_ERR_INVALID_FRAG_COUNT";
 199	case I40E_ERR_QUEUE_EMPTY:
 200		return "I40E_ERR_QUEUE_EMPTY";
 201	case I40E_ERR_INVALID_ALIGNMENT:
 202		return "I40E_ERR_INVALID_ALIGNMENT";
 203	case I40E_ERR_FLUSHED_QUEUE:
 204		return "I40E_ERR_FLUSHED_QUEUE";
 205	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
 206		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
 207	case I40E_ERR_INVALID_IMM_DATA_SIZE:
 208		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
 209	case I40E_ERR_TIMEOUT:
 210		return "I40E_ERR_TIMEOUT";
 211	case I40E_ERR_OPCODE_MISMATCH:
 212		return "I40E_ERR_OPCODE_MISMATCH";
 213	case I40E_ERR_CQP_COMPL_ERROR:
 214		return "I40E_ERR_CQP_COMPL_ERROR";
 215	case I40E_ERR_INVALID_VF_ID:
 216		return "I40E_ERR_INVALID_VF_ID";
 217	case I40E_ERR_INVALID_HMCFN_ID:
 218		return "I40E_ERR_INVALID_HMCFN_ID";
 219	case I40E_ERR_BACKING_PAGE_ERROR:
 220		return "I40E_ERR_BACKING_PAGE_ERROR";
 221	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
 222		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
 223	case I40E_ERR_INVALID_PBLE_INDEX:
 224		return "I40E_ERR_INVALID_PBLE_INDEX";
 225	case I40E_ERR_INVALID_SD_INDEX:
 226		return "I40E_ERR_INVALID_SD_INDEX";
 227	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
 228		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
 229	case I40E_ERR_INVALID_SD_TYPE:
 230		return "I40E_ERR_INVALID_SD_TYPE";
 231	case I40E_ERR_MEMCPY_FAILED:
 232		return "I40E_ERR_MEMCPY_FAILED";
 233	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
 234		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
 235	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
 236		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
 237	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
 238		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
 239	case I40E_ERR_SRQ_ENABLED:
 240		return "I40E_ERR_SRQ_ENABLED";
 241	case I40E_ERR_ADMIN_QUEUE_ERROR:
 242		return "I40E_ERR_ADMIN_QUEUE_ERROR";
 243	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
 244		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
 245	case I40E_ERR_BUF_TOO_SHORT:
 246		return "I40E_ERR_BUF_TOO_SHORT";
 247	case I40E_ERR_ADMIN_QUEUE_FULL:
 248		return "I40E_ERR_ADMIN_QUEUE_FULL";
 249	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
 250		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
 251	case I40E_ERR_BAD_IWARP_CQE:
 252		return "I40E_ERR_BAD_IWARP_CQE";
 253	case I40E_ERR_NVM_BLANK_MODE:
 254		return "I40E_ERR_NVM_BLANK_MODE";
 255	case I40E_ERR_NOT_IMPLEMENTED:
 256		return "I40E_ERR_NOT_IMPLEMENTED";
 257	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
 258		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
 259	case I40E_ERR_DIAG_TEST_FAILED:
 260		return "I40E_ERR_DIAG_TEST_FAILED";
 261	case I40E_ERR_NOT_READY:
 262		return "I40E_ERR_NOT_READY";
 263	case I40E_NOT_SUPPORTED:
 264		return "I40E_NOT_SUPPORTED";
 265	case I40E_ERR_FIRMWARE_API_VERSION:
 266		return "I40E_ERR_FIRMWARE_API_VERSION";
 267	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
 268		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
 269	}
 270
 271	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
 272	return hw->err_str;
 273}
 274
 275/**
 276 * i40e_debug_aq
 277 * @hw: debug mask related to admin queue
 278 * @mask: debug mask
 279 * @desc: pointer to admin queue descriptor
 280 * @buffer: pointer to command buffer
 281 * @buf_len: max length of buffer
 282 *
 283 * Dumps debug log about adminq command with descriptor contents.
 284 **/
 285void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 286		   void *buffer, u16 buf_len)
 287{
 288	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
 289	u32 effective_mask = hw->debug_mask & mask;
 290	char prefix[27];
 291	u16 len;
 292	u8 *buf = (u8 *)buffer;
 293
 294	if (!effective_mask || !desc)
 295		return;
 296
 297	len = le16_to_cpu(aq_desc->datalen);
 298
 299	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 300		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
 301		   le16_to_cpu(aq_desc->opcode),
 302		   le16_to_cpu(aq_desc->flags),
 303		   le16_to_cpu(aq_desc->datalen),
 304		   le16_to_cpu(aq_desc->retval));
 305	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 306		   "\tcookie (h,l) 0x%08X 0x%08X\n",
 307		   le32_to_cpu(aq_desc->cookie_high),
 308		   le32_to_cpu(aq_desc->cookie_low));
 309	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 310		   "\tparam (0,1)  0x%08X 0x%08X\n",
 311		   le32_to_cpu(aq_desc->params.internal.param0),
 312		   le32_to_cpu(aq_desc->params.internal.param1));
 313	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
 314		   "\taddr (h,l)   0x%08X 0x%08X\n",
 315		   le32_to_cpu(aq_desc->params.external.addr_high),
 316		   le32_to_cpu(aq_desc->params.external.addr_low));
 317
 318	if (buffer && buf_len != 0 && len != 0 &&
 319	    (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
 320		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
 321		if (buf_len < len)
 322			len = buf_len;
 323
 324		snprintf(prefix, sizeof(prefix),
 325			 "i40e %02x:%02x.%x: \t0x",
 326			 hw->bus.bus_id,
 327			 hw->bus.device,
 328			 hw->bus.func);
 329
 330		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
 331			       16, 1, buf, len, false);
 332	}
 333}
 334
 335/**
 336 * i40e_check_asq_alive
 337 * @hw: pointer to the hw struct
 338 *
 339 * Returns true if Queue is enabled else false.
 340 **/
 341bool i40e_check_asq_alive(struct i40e_hw *hw)
 342{
 343	if (hw->aq.asq.len)
 344		return !!(rd32(hw, hw->aq.asq.len) &
 345			  I40E_PF_ATQLEN_ATQENABLE_MASK);
 346	else
 347		return false;
 348}
 349
 350/**
 351 * i40e_aq_queue_shutdown
 352 * @hw: pointer to the hw struct
 353 * @unloading: is the driver unloading itself
 354 *
 355 * Tell the Firmware that we're shutting down the AdminQ and whether
 356 * or not the driver is unloading as well.
 357 **/
 358i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
 359					     bool unloading)
 360{
 361	struct i40e_aq_desc desc;
 362	struct i40e_aqc_queue_shutdown *cmd =
 363		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
 364	i40e_status status;
 365
 366	i40e_fill_default_direct_cmd_desc(&desc,
 367					  i40e_aqc_opc_queue_shutdown);
 368
 369	if (unloading)
 370		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
 371	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
 372
 373	return status;
 374}
 375
 376/**
 377 * i40e_aq_get_set_rss_lut
 378 * @hw: pointer to the hardware structure
 379 * @vsi_id: vsi fw index
 380 * @pf_lut: for PF table set true, for VSI table set false
 381 * @lut: pointer to the lut buffer provided by the caller
 382 * @lut_size: size of the lut buffer
 383 * @set: set true to set the table, false to get the table
 384 *
 385 * Internal function to get or set RSS look up table
 386 **/
 387static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
 388					   u16 vsi_id, bool pf_lut,
 389					   u8 *lut, u16 lut_size,
 390					   bool set)
 391{
 392	i40e_status status;
 393	struct i40e_aq_desc desc;
 394	struct i40e_aqc_get_set_rss_lut *cmd_resp =
 395		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
 396
 397	if (set)
 398		i40e_fill_default_direct_cmd_desc(&desc,
 399						  i40e_aqc_opc_set_rss_lut);
 400	else
 401		i40e_fill_default_direct_cmd_desc(&desc,
 402						  i40e_aqc_opc_get_rss_lut);
 403
 404	/* Indirect command */
 405	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
 406	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
 407
 408	cmd_resp->vsi_id =
 409			cpu_to_le16((u16)((vsi_id <<
 410					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
 411					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
 412	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
 413
 414	if (pf_lut)
 415		cmd_resp->flags |= cpu_to_le16((u16)
 416					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
 417					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 418					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 419	else
 420		cmd_resp->flags |= cpu_to_le16((u16)
 421					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
 422					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 423					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 424
 425	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
 426
 427	return status;
 428}
 429
 430/**
 431 * i40e_aq_get_rss_lut
 432 * @hw: pointer to the hardware structure
 433 * @vsi_id: vsi fw index
 434 * @pf_lut: for PF table set true, for VSI table set false
 435 * @lut: pointer to the lut buffer provided by the caller
 436 * @lut_size: size of the lut buffer
 437 *
 438 * get the RSS lookup table, PF or VSI type
 439 **/
 440i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
 441				bool pf_lut, u8 *lut, u16 lut_size)
 442{
 443	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
 444				       false);
 445}
 446
 447/**
 448 * i40e_aq_set_rss_lut
 449 * @hw: pointer to the hardware structure
 450 * @vsi_id: vsi fw index
 451 * @pf_lut: for PF table set true, for VSI table set false
 452 * @lut: pointer to the lut buffer provided by the caller
 453 * @lut_size: size of the lut buffer
 454 *
 455 * set the RSS lookup table, PF or VSI type
 456 **/
 457i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
 458				bool pf_lut, u8 *lut, u16 lut_size)
 459{
 460	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
 461}
 462
 463/**
 464 * i40e_aq_get_set_rss_key
 465 * @hw: pointer to the hw struct
 466 * @vsi_id: vsi fw index
 467 * @key: pointer to key info struct
 468 * @set: set true to set the key, false to get the key
 469 *
 470 * get the RSS key per VSI
 471 **/
 472static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
 473				      u16 vsi_id,
 474				      struct i40e_aqc_get_set_rss_key_data *key,
 475				      bool set)
 476{
 477	i40e_status status;
 478	struct i40e_aq_desc desc;
 479	struct i40e_aqc_get_set_rss_key *cmd_resp =
 480			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
 481	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
 482
 483	if (set)
 484		i40e_fill_default_direct_cmd_desc(&desc,
 485						  i40e_aqc_opc_set_rss_key);
 486	else
 487		i40e_fill_default_direct_cmd_desc(&desc,
 488						  i40e_aqc_opc_get_rss_key);
 489
 490	/* Indirect command */
 491	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
 492	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
 493
 494	cmd_resp->vsi_id =
 495			cpu_to_le16((u16)((vsi_id <<
 496					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
 497					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
 498	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
 499
 500	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
 501
 502	return status;
 503}
 504
 505/**
 506 * i40e_aq_get_rss_key
 507 * @hw: pointer to the hw struct
 508 * @vsi_id: vsi fw index
 509 * @key: pointer to key info struct
 510 *
 511 **/
 512i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
 513				u16 vsi_id,
 514				struct i40e_aqc_get_set_rss_key_data *key)
 515{
 516	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
 517}
 518
 519/**
 520 * i40e_aq_set_rss_key
 521 * @hw: pointer to the hw struct
 522 * @vsi_id: vsi fw index
 523 * @key: pointer to key info struct
 524 *
 525 * set the RSS key per VSI
 526 **/
 527i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
 528				u16 vsi_id,
 529				struct i40e_aqc_get_set_rss_key_data *key)
 530{
 531	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
 532}
 533
 534/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
 535 * hardware to a bit-field that can be used by SW to more easily determine the
 536 * packet type.
 537 *
 538 * Macros are used to shorten the table lines and make this table human
 539 * readable.
 540 *
 541 * We store the PTYPE in the top byte of the bit field - this is just so that
 542 * we can check that the table doesn't have a row missing, as the index into
 543 * the table should be the PTYPE.
 544 *
 545 * Typical work flow:
 546 *
 547 * IF NOT i40e_ptype_lookup[ptype].known
 548 * THEN
 549 *      Packet is unknown
 550 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
 551 *      Use the rest of the fields to look at the tunnels, inner protocols, etc
 552 * ELSE
 553 *      Use the enum i40e_rx_l2_ptype to decode the packet type
 554 * ENDIF
 555 */
 556
 557/* macro to make the table lines short, use explicit indexing with [PTYPE] */
 558#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
 559	[PTYPE] = { \
 560		1, \
 561		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
 562		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
 563		I40E_RX_PTYPE_##OUTER_FRAG, \
 564		I40E_RX_PTYPE_TUNNEL_##T, \
 565		I40E_RX_PTYPE_TUNNEL_END_##TE, \
 566		I40E_RX_PTYPE_##TEF, \
 567		I40E_RX_PTYPE_INNER_PROT_##I, \
 568		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
 569
 570#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 
 571
 572/* shorter macros makes the table fit but are terse */
 573#define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
 574#define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
 575#define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
 576
 577/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
 578struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
 579	/* L2 Packet types */
 580	I40E_PTT_UNUSED_ENTRY(0),
 581	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 582	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
 583	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 584	I40E_PTT_UNUSED_ENTRY(4),
 585	I40E_PTT_UNUSED_ENTRY(5),
 586	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 587	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 588	I40E_PTT_UNUSED_ENTRY(8),
 589	I40E_PTT_UNUSED_ENTRY(9),
 590	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
 591	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
 592	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 593	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 594	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 595	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 596	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 597	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 598	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 599	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 600	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 601	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
 602
 603	/* Non Tunneled IPv4 */
 604	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
 605	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
 606	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
 607	I40E_PTT_UNUSED_ENTRY(25),
 608	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
 609	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
 610	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
 611
 612	/* IPv4 --> IPv4 */
 613	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
 614	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
 615	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
 616	I40E_PTT_UNUSED_ENTRY(32),
 617	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
 618	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
 619	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
 620
 621	/* IPv4 --> IPv6 */
 622	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
 623	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
 624	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
 625	I40E_PTT_UNUSED_ENTRY(39),
 626	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
 627	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
 628	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
 629
 630	/* IPv4 --> GRE/NAT */
 631	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
 632
 633	/* IPv4 --> GRE/NAT --> IPv4 */
 634	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
 635	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
 636	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
 637	I40E_PTT_UNUSED_ENTRY(47),
 638	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
 639	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
 640	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
 641
 642	/* IPv4 --> GRE/NAT --> IPv6 */
 643	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
 644	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
 645	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
 646	I40E_PTT_UNUSED_ENTRY(54),
 647	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
 648	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
 649	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
 650
 651	/* IPv4 --> GRE/NAT --> MAC */
 652	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
 653
 654	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
 655	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
 656	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
 657	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
 658	I40E_PTT_UNUSED_ENTRY(62),
 659	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
 660	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
 661	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
 662
 663	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
 664	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
 665	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
 666	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
 667	I40E_PTT_UNUSED_ENTRY(69),
 668	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
 669	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
 670	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
 671
 672	/* IPv4 --> GRE/NAT --> MAC/VLAN */
 673	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
 674
 675	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
 676	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
 677	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
 678	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
 679	I40E_PTT_UNUSED_ENTRY(77),
 680	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
 681	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
 682	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
 683
 684	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
 685	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
 686	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
 687	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
 688	I40E_PTT_UNUSED_ENTRY(84),
 689	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
 690	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
 691	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
 692
 693	/* Non Tunneled IPv6 */
 694	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
 695	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
 696	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
 697	I40E_PTT_UNUSED_ENTRY(91),
 698	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
 699	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
 700	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
 701
 702	/* IPv6 --> IPv4 */
 703	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
 704	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
 705	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
 706	I40E_PTT_UNUSED_ENTRY(98),
 707	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
 708	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
 709	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
 710
 711	/* IPv6 --> IPv6 */
 712	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
 713	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
 714	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
 715	I40E_PTT_UNUSED_ENTRY(105),
 716	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
 717	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
 718	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
 719
 720	/* IPv6 --> GRE/NAT */
 721	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
 722
 723	/* IPv6 --> GRE/NAT -> IPv4 */
 724	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
 725	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
 726	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
 727	I40E_PTT_UNUSED_ENTRY(113),
 728	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
 729	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
 730	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
 731
 732	/* IPv6 --> GRE/NAT -> IPv6 */
 733	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
 734	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
 735	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
 736	I40E_PTT_UNUSED_ENTRY(120),
 737	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
 738	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
 739	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
 740
 741	/* IPv6 --> GRE/NAT -> MAC */
 742	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
 743
 744	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
 745	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
 746	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
 747	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
 748	I40E_PTT_UNUSED_ENTRY(128),
 749	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
 750	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
 751	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
 752
 753	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
 754	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
 755	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
 756	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
 757	I40E_PTT_UNUSED_ENTRY(135),
 758	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
 759	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
 760	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
 761
 762	/* IPv6 --> GRE/NAT -> MAC/VLAN */
 763	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
 764
 765	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
 766	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
 767	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
 768	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
 769	I40E_PTT_UNUSED_ENTRY(143),
 770	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
 771	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
 772	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
 773
 774	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
 775	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
 776	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
 777	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
 778	I40E_PTT_UNUSED_ENTRY(150),
 779	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
 780	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
 781	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
 782
 783	/* unused entries */
 784	[154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 785};
 786
 787/**
 788 * i40e_init_shared_code - Initialize the shared code
 789 * @hw: pointer to hardware structure
 790 *
 791 * This assigns the MAC type and PHY code and inits the NVM.
 792 * Does not touch the hardware. This function must be called prior to any
 793 * other function in the shared code. The i40e_hw structure should be
 794 * memset to 0 prior to calling this function.  The following fields in
 795 * hw structure should be filled in prior to calling this function:
 796 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
 797 * subsystem_vendor_id, and revision_id
 798 **/
 799i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 800{
 801	i40e_status status = 0;
 802	u32 port, ari, func_rid;
 803
 804	i40e_set_mac_type(hw);
 805
 806	switch (hw->mac.type) {
 807	case I40E_MAC_XL710:
 808	case I40E_MAC_X722:
 809		break;
 810	default:
 811		return I40E_ERR_DEVICE_NOT_SUPPORTED;
 812	}
 813
 814	hw->phy.get_link_info = true;
 815
 816	/* Determine port number and PF number*/
 817	port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
 818					   >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
 819	hw->port = (u8)port;
 820	ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
 821						 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
 822	func_rid = rd32(hw, I40E_PF_FUNC_RID);
 823	if (ari)
 824		hw->pf_id = (u8)(func_rid & 0xff);
 825	else
 826		hw->pf_id = (u8)(func_rid & 0x7);
 827
 
 
 
 
 828	status = i40e_init_nvm(hw);
 829	return status;
 830}
 831
 832/**
 833 * i40e_aq_mac_address_read - Retrieve the MAC addresses
 834 * @hw: pointer to the hw struct
 835 * @flags: a return indicator of what addresses were added to the addr store
 836 * @addrs: the requestor's mac addr store
 837 * @cmd_details: pointer to command details structure or NULL
 838 **/
 839static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
 840				   u16 *flags,
 841				   struct i40e_aqc_mac_address_read_data *addrs,
 842				   struct i40e_asq_cmd_details *cmd_details)
 843{
 844	struct i40e_aq_desc desc;
 845	struct i40e_aqc_mac_address_read *cmd_data =
 846		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
 847	i40e_status status;
 848
 849	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
 850	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
 851
 852	status = i40e_asq_send_command(hw, &desc, addrs,
 853				       sizeof(*addrs), cmd_details);
 854	*flags = le16_to_cpu(cmd_data->command_flags);
 855
 856	return status;
 857}
 858
 859/**
 860 * i40e_aq_mac_address_write - Change the MAC addresses
 861 * @hw: pointer to the hw struct
 862 * @flags: indicates which MAC to be written
 863 * @mac_addr: address to write
 864 * @cmd_details: pointer to command details structure or NULL
 865 **/
 866i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
 867				    u16 flags, u8 *mac_addr,
 868				    struct i40e_asq_cmd_details *cmd_details)
 869{
 870	struct i40e_aq_desc desc;
 871	struct i40e_aqc_mac_address_write *cmd_data =
 872		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
 873	i40e_status status;
 874
 875	i40e_fill_default_direct_cmd_desc(&desc,
 876					  i40e_aqc_opc_mac_address_write);
 877	cmd_data->command_flags = cpu_to_le16(flags);
 878	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
 879	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
 880					((u32)mac_addr[3] << 16) |
 881					((u32)mac_addr[4] << 8) |
 882					mac_addr[5]);
 883
 884	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 885
 886	return status;
 887}
 888
 889/**
 890 * i40e_get_mac_addr - get MAC address
 891 * @hw: pointer to the HW structure
 892 * @mac_addr: pointer to MAC address
 893 *
 894 * Reads the adapter's MAC address from register
 895 **/
 896i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 897{
 898	struct i40e_aqc_mac_address_read_data addrs;
 899	i40e_status status;
 900	u16 flags = 0;
 901
 902	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
 903
 904	if (flags & I40E_AQC_LAN_ADDR_VALID)
 905		ether_addr_copy(mac_addr, addrs.pf_lan_mac);
 906
 907	return status;
 908}
 909
 910/**
 911 * i40e_get_port_mac_addr - get Port MAC address
 912 * @hw: pointer to the HW structure
 913 * @mac_addr: pointer to Port MAC address
 914 *
 915 * Reads the adapter's Port MAC address
 916 **/
 917i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 918{
 919	struct i40e_aqc_mac_address_read_data addrs;
 920	i40e_status status;
 921	u16 flags = 0;
 922
 923	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
 924	if (status)
 925		return status;
 926
 927	if (flags & I40E_AQC_PORT_ADDR_VALID)
 928		ether_addr_copy(mac_addr, addrs.port_mac);
 929	else
 930		status = I40E_ERR_INVALID_MAC_ADDR;
 931
 932	return status;
 933}
 934
 935/**
 936 * i40e_pre_tx_queue_cfg - pre tx queue configure
 937 * @hw: pointer to the HW structure
 938 * @queue: target PF queue index
 939 * @enable: state change request
 940 *
 941 * Handles hw requirement to indicate intention to enable
 942 * or disable target queue.
 943 **/
 944void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
 945{
 946	u32 abs_queue_idx = hw->func_caps.base_queue + queue;
 947	u32 reg_block = 0;
 948	u32 reg_val;
 949
 950	if (abs_queue_idx >= 128) {
 951		reg_block = abs_queue_idx / 128;
 952		abs_queue_idx %= 128;
 953	}
 954
 955	reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
 956	reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
 957	reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
 958
 959	if (enable)
 960		reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
 961	else
 962		reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
 963
 964	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
 965}
 966
 967/**
 968 *  i40e_read_pba_string - Reads part number string from EEPROM
 969 *  @hw: pointer to hardware structure
 970 *  @pba_num: stores the part number string from the EEPROM
 971 *  @pba_num_size: part number string buffer length
 972 *
 973 *  Reads the part number string from the EEPROM.
 974 **/
 975i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
 976				 u32 pba_num_size)
 977{
 978	i40e_status status = 0;
 979	u16 pba_word = 0;
 980	u16 pba_size = 0;
 981	u16 pba_ptr = 0;
 982	u16 i = 0;
 983
 984	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
 985	if (status || (pba_word != 0xFAFA)) {
 986		hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
 987		return status;
 988	}
 989
 990	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
 991	if (status) {
 992		hw_dbg(hw, "Failed to read PBA Block pointer.\n");
 993		return status;
 994	}
 995
 996	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
 997	if (status) {
 998		hw_dbg(hw, "Failed to read PBA Block size.\n");
 999		return status;
1000	}
1001
1002	/* Subtract one to get PBA word count (PBA Size word is included in
1003	 * total size)
1004	 */
1005	pba_size--;
1006	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1007		hw_dbg(hw, "Buffer too small for PBA data.\n");
1008		return I40E_ERR_PARAM;
1009	}
1010
1011	for (i = 0; i < pba_size; i++) {
1012		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1013		if (status) {
1014			hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1015			return status;
1016		}
1017
1018		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1019		pba_num[(i * 2) + 1] = pba_word & 0xFF;
1020	}
1021	pba_num[(pba_size * 2)] = '\0';
1022
1023	return status;
1024}
1025
1026/**
1027 * i40e_get_media_type - Gets media type
1028 * @hw: pointer to the hardware structure
1029 **/
1030static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1031{
1032	enum i40e_media_type media;
1033
1034	switch (hw->phy.link_info.phy_type) {
1035	case I40E_PHY_TYPE_10GBASE_SR:
1036	case I40E_PHY_TYPE_10GBASE_LR:
1037	case I40E_PHY_TYPE_1000BASE_SX:
1038	case I40E_PHY_TYPE_1000BASE_LX:
1039	case I40E_PHY_TYPE_40GBASE_SR4:
1040	case I40E_PHY_TYPE_40GBASE_LR4:
1041	case I40E_PHY_TYPE_25GBASE_LR:
1042	case I40E_PHY_TYPE_25GBASE_SR:
1043		media = I40E_MEDIA_TYPE_FIBER;
1044		break;
1045	case I40E_PHY_TYPE_100BASE_TX:
1046	case I40E_PHY_TYPE_1000BASE_T:
1047	case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1048	case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1049	case I40E_PHY_TYPE_10GBASE_T:
1050		media = I40E_MEDIA_TYPE_BASET;
1051		break;
1052	case I40E_PHY_TYPE_10GBASE_CR1_CU:
1053	case I40E_PHY_TYPE_40GBASE_CR4_CU:
1054	case I40E_PHY_TYPE_10GBASE_CR1:
1055	case I40E_PHY_TYPE_40GBASE_CR4:
1056	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1057	case I40E_PHY_TYPE_40GBASE_AOC:
1058	case I40E_PHY_TYPE_10GBASE_AOC:
1059	case I40E_PHY_TYPE_25GBASE_CR:
1060	case I40E_PHY_TYPE_25GBASE_AOC:
1061	case I40E_PHY_TYPE_25GBASE_ACC:
1062		media = I40E_MEDIA_TYPE_DA;
1063		break;
1064	case I40E_PHY_TYPE_1000BASE_KX:
1065	case I40E_PHY_TYPE_10GBASE_KX4:
1066	case I40E_PHY_TYPE_10GBASE_KR:
1067	case I40E_PHY_TYPE_40GBASE_KR4:
1068	case I40E_PHY_TYPE_20GBASE_KR2:
1069	case I40E_PHY_TYPE_25GBASE_KR:
1070		media = I40E_MEDIA_TYPE_BACKPLANE;
1071		break;
1072	case I40E_PHY_TYPE_SGMII:
1073	case I40E_PHY_TYPE_XAUI:
1074	case I40E_PHY_TYPE_XFI:
1075	case I40E_PHY_TYPE_XLAUI:
1076	case I40E_PHY_TYPE_XLPPI:
1077	default:
1078		media = I40E_MEDIA_TYPE_UNKNOWN;
1079		break;
1080	}
1081
1082	return media;
1083}
1084
1085/**
1086 * i40e_poll_globr - Poll for Global Reset completion
1087 * @hw: pointer to the hardware structure
1088 * @retry_limit: how many times to retry before failure
1089 **/
1090static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1091				   u32 retry_limit)
1092{
1093	u32 cnt, reg = 0;
1094
1095	for (cnt = 0; cnt < retry_limit; cnt++) {
1096		reg = rd32(hw, I40E_GLGEN_RSTAT);
1097		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1098			return 0;
1099		msleep(100);
1100	}
1101
1102	hw_dbg(hw, "Global reset failed.\n");
1103	hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1104
1105	return I40E_ERR_RESET_FAILED;
1106}
1107
1108#define I40E_PF_RESET_WAIT_COUNT_A0	200
1109#define I40E_PF_RESET_WAIT_COUNT	200
1110/**
1111 * i40e_pf_reset - Reset the PF
1112 * @hw: pointer to the hardware structure
1113 *
1114 * Assuming someone else has triggered a global reset,
1115 * assure the global reset is complete and then reset the PF
1116 **/
1117i40e_status i40e_pf_reset(struct i40e_hw *hw)
1118{
1119	u32 cnt = 0;
1120	u32 cnt1 = 0;
1121	u32 reg = 0;
1122	u32 grst_del;
1123
1124	/* Poll for Global Reset steady state in case of recent GRST.
1125	 * The grst delay value is in 100ms units, and we'll wait a
1126	 * couple counts longer to be sure we don't just miss the end.
1127	 */
1128	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1129		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1130		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1131
1132	/* It can take upto 15 secs for GRST steady state.
1133	 * Bump it to 16 secs max to be safe.
1134	 */
1135	grst_del = grst_del * 20;
1136
1137	for (cnt = 0; cnt < grst_del; cnt++) {
1138		reg = rd32(hw, I40E_GLGEN_RSTAT);
1139		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1140			break;
1141		msleep(100);
1142	}
1143	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1144		hw_dbg(hw, "Global reset polling failed to complete.\n");
1145		return I40E_ERR_RESET_FAILED;
1146	}
1147
1148	/* Now Wait for the FW to be ready */
1149	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1150		reg = rd32(hw, I40E_GLNVM_ULD);
1151		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1152			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1153		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1154			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1155			hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1156			break;
1157		}
1158		usleep_range(10000, 20000);
1159	}
1160	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1161		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1162		hw_dbg(hw, "wait for FW Reset complete timedout\n");
1163		hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1164		return I40E_ERR_RESET_FAILED;
1165	}
1166
1167	/* If there was a Global Reset in progress when we got here,
1168	 * we don't need to do the PF Reset
1169	 */
1170	if (!cnt) {
1171		u32 reg2 = 0;
1172		if (hw->revision_id == 0)
1173			cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1174		else
1175			cnt = I40E_PF_RESET_WAIT_COUNT;
1176		reg = rd32(hw, I40E_PFGEN_CTRL);
1177		wr32(hw, I40E_PFGEN_CTRL,
1178		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1179		for (; cnt; cnt--) {
1180			reg = rd32(hw, I40E_PFGEN_CTRL);
1181			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1182				break;
1183			reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1184			if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1185				break;
1186			usleep_range(1000, 2000);
1187		}
1188		if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1189			if (i40e_poll_globr(hw, grst_del))
1190				return I40E_ERR_RESET_FAILED;
1191		} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1192			hw_dbg(hw, "PF reset polling failed to complete.\n");
1193			return I40E_ERR_RESET_FAILED;
1194		}
1195	}
1196
1197	i40e_clear_pxe_mode(hw);
1198
1199	return 0;
1200}
1201
1202/**
1203 * i40e_clear_hw - clear out any left over hw state
1204 * @hw: pointer to the hw struct
1205 *
1206 * Clear queues and interrupts, typically called at init time,
1207 * but after the capabilities have been found so we know how many
1208 * queues and msix vectors have been allocated.
1209 **/
1210void i40e_clear_hw(struct i40e_hw *hw)
1211{
1212	u32 num_queues, base_queue;
1213	u32 num_pf_int;
1214	u32 num_vf_int;
1215	u32 num_vfs;
1216	u32 i, j;
1217	u32 val;
1218	u32 eol = 0x7ff;
1219
1220	/* get number of interrupts, queues, and VFs */
1221	val = rd32(hw, I40E_GLPCI_CNF2);
1222	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1223		     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1224	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1225		     I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1226
1227	val = rd32(hw, I40E_PFLAN_QALLOC);
1228	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1229		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1230	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1231	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1232	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1233		num_queues = (j - base_queue) + 1;
1234	else
1235		num_queues = 0;
1236
1237	val = rd32(hw, I40E_PF_VT_PFALLOC);
1238	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1239	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1240	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1241	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1242	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1243		num_vfs = (j - i) + 1;
1244	else
1245		num_vfs = 0;
1246
1247	/* stop all the interrupts */
1248	wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1249	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1250	for (i = 0; i < num_pf_int - 2; i++)
1251		wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1252
1253	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1254	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1255	wr32(hw, I40E_PFINT_LNKLST0, val);
1256	for (i = 0; i < num_pf_int - 2; i++)
1257		wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1258	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1259	for (i = 0; i < num_vfs; i++)
1260		wr32(hw, I40E_VPINT_LNKLST0(i), val);
1261	for (i = 0; i < num_vf_int - 2; i++)
1262		wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1263
1264	/* warn the HW of the coming Tx disables */
1265	for (i = 0; i < num_queues; i++) {
1266		u32 abs_queue_idx = base_queue + i;
1267		u32 reg_block = 0;
1268
1269		if (abs_queue_idx >= 128) {
1270			reg_block = abs_queue_idx / 128;
1271			abs_queue_idx %= 128;
1272		}
1273
1274		val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1275		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1276		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1277		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1278
1279		wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1280	}
1281	udelay(400);
1282
1283	/* stop all the queues */
1284	for (i = 0; i < num_queues; i++) {
1285		wr32(hw, I40E_QINT_TQCTL(i), 0);
1286		wr32(hw, I40E_QTX_ENA(i), 0);
1287		wr32(hw, I40E_QINT_RQCTL(i), 0);
1288		wr32(hw, I40E_QRX_ENA(i), 0);
1289	}
1290
1291	/* short wait for all queue disables to settle */
1292	udelay(50);
1293}
1294
1295/**
1296 * i40e_clear_pxe_mode - clear pxe operations mode
1297 * @hw: pointer to the hw struct
1298 *
1299 * Make sure all PXE mode settings are cleared, including things
1300 * like descriptor fetch/write-back mode.
1301 **/
1302void i40e_clear_pxe_mode(struct i40e_hw *hw)
1303{
1304	u32 reg;
1305
1306	if (i40e_check_asq_alive(hw))
1307		i40e_aq_clear_pxe_mode(hw, NULL);
1308
1309	/* Clear single descriptor fetch/write-back mode */
1310	reg = rd32(hw, I40E_GLLAN_RCTL_0);
1311
1312	if (hw->revision_id == 0) {
1313		/* As a work around clear PXE_MODE instead of setting it */
1314		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1315	} else {
1316		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1317	}
1318}
1319
1320/**
1321 * i40e_led_is_mine - helper to find matching led
1322 * @hw: pointer to the hw struct
1323 * @idx: index into GPIO registers
1324 *
1325 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1326 */
1327static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1328{
1329	u32 gpio_val = 0;
1330	u32 port;
1331
1332	if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1333	    !hw->func_caps.led[idx])
1334		return 0;
 
1335	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1336	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1337		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1338
1339	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1340	 * if it is not our port then ignore
1341	 */
1342	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1343	    (port != hw->port))
1344		return 0;
1345
1346	return gpio_val;
1347}
1348
1349#define I40E_FW_LED BIT(4)
1350#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1351			     I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1352
1353#define I40E_LED0 22
1354
1355#define I40E_PIN_FUNC_SDP 0x0
1356#define I40E_PIN_FUNC_LED 0x1
1357
1358/**
1359 * i40e_led_get - return current on/off mode
1360 * @hw: pointer to the hw struct
1361 *
1362 * The value returned is the 'mode' field as defined in the
1363 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1364 * values are variations of possible behaviors relating to
1365 * blink, link, and wire.
1366 **/
1367u32 i40e_led_get(struct i40e_hw *hw)
1368{
1369	u32 mode = 0;
1370	int i;
1371
1372	/* as per the documentation GPIO 22-29 are the LED
1373	 * GPIO pins named LED0..LED7
1374	 */
1375	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1376		u32 gpio_val = i40e_led_is_mine(hw, i);
1377
1378		if (!gpio_val)
1379			continue;
1380
1381		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1382			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1383		break;
1384	}
1385
1386	return mode;
1387}
1388
1389/**
1390 * i40e_led_set - set new on/off mode
1391 * @hw: pointer to the hw struct
1392 * @mode: 0=off, 0xf=on (else see manual for mode details)
1393 * @blink: true if the LED should blink when on, false if steady
1394 *
1395 * if this function is used to turn on the blink it should
1396 * be used to disable the blink when restoring the original state.
1397 **/
1398void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1399{
1400	int i;
1401
1402	if (mode & ~I40E_LED_MODE_VALID) {
1403		hw_dbg(hw, "invalid mode passed in %X\n", mode);
1404		return;
1405	}
1406
1407	/* as per the documentation GPIO 22-29 are the LED
1408	 * GPIO pins named LED0..LED7
1409	 */
1410	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1411		u32 gpio_val = i40e_led_is_mine(hw, i);
1412
1413		if (!gpio_val)
1414			continue;
1415
1416		if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1417			u32 pin_func = 0;
1418
1419			if (mode & I40E_FW_LED)
1420				pin_func = I40E_PIN_FUNC_SDP;
1421			else
1422				pin_func = I40E_PIN_FUNC_LED;
1423
1424			gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1425			gpio_val |= ((pin_func <<
1426				     I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1427				     I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1428		}
1429		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1430		/* this & is a bit of paranoia, but serves as a range check */
1431		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1432			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1433
1434		if (blink)
1435			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1436		else
1437			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1438
1439		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1440		break;
1441	}
1442}
1443
1444/* Admin command wrappers */
1445
1446/**
1447 * i40e_aq_get_phy_capabilities
1448 * @hw: pointer to the hw struct
1449 * @abilities: structure for PHY capabilities to be filled
1450 * @qualified_modules: report Qualified Modules
1451 * @report_init: report init capabilities (active are default)
1452 * @cmd_details: pointer to command details structure or NULL
1453 *
1454 * Returns the various PHY abilities supported on the Port.
1455 **/
1456i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1457			bool qualified_modules, bool report_init,
1458			struct i40e_aq_get_phy_abilities_resp *abilities,
1459			struct i40e_asq_cmd_details *cmd_details)
1460{
1461	struct i40e_aq_desc desc;
1462	i40e_status status;
1463	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1464	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1465
1466	if (!abilities)
1467		return I40E_ERR_PARAM;
1468
1469	do {
1470		i40e_fill_default_direct_cmd_desc(&desc,
1471					       i40e_aqc_opc_get_phy_abilities);
1472
1473		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1474		if (abilities_size > I40E_AQ_LARGE_BUF)
1475			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1476
1477		if (qualified_modules)
1478			desc.params.external.param0 |=
1479			cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1480
1481		if (report_init)
1482			desc.params.external.param0 |=
1483			cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1484
1485		status = i40e_asq_send_command(hw, &desc, abilities,
1486					       abilities_size, cmd_details);
1487
1488		switch (hw->aq.asq_last_status) {
1489		case I40E_AQ_RC_EIO:
1490			status = I40E_ERR_UNKNOWN_PHY;
1491			break;
1492		case I40E_AQ_RC_EAGAIN:
1493			usleep_range(1000, 2000);
1494			total_delay++;
1495			status = I40E_ERR_TIMEOUT;
1496			break;
1497		/* also covers I40E_AQ_RC_OK */
1498		default:
1499			break;
1500		}
1501
1502	} while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1503		(total_delay < max_delay));
1504
1505	if (status)
1506		return status;
1507
1508	if (report_init) {
1509		if (hw->mac.type ==  I40E_MAC_XL710 &&
1510		    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1511		    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1512			status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1513		} else {
1514			hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1515			hw->phy.phy_types |=
1516					((u64)abilities->phy_type_ext << 32);
1517		}
1518	}
1519
1520	return status;
1521}
1522
1523/**
1524 * i40e_aq_set_phy_config
1525 * @hw: pointer to the hw struct
1526 * @config: structure with PHY configuration to be set
1527 * @cmd_details: pointer to command details structure or NULL
1528 *
1529 * Set the various PHY configuration parameters
1530 * supported on the Port.One or more of the Set PHY config parameters may be
1531 * ignored in an MFP mode as the PF may not have the privilege to set some
1532 * of the PHY Config parameters. This status will be indicated by the
1533 * command response.
1534 **/
1535enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1536				struct i40e_aq_set_phy_config *config,
1537				struct i40e_asq_cmd_details *cmd_details)
1538{
1539	struct i40e_aq_desc desc;
1540	struct i40e_aq_set_phy_config *cmd =
1541			(struct i40e_aq_set_phy_config *)&desc.params.raw;
1542	enum i40e_status_code status;
1543
1544	if (!config)
1545		return I40E_ERR_PARAM;
1546
1547	i40e_fill_default_direct_cmd_desc(&desc,
1548					  i40e_aqc_opc_set_phy_config);
1549
1550	*cmd = *config;
1551
1552	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1553
1554	return status;
1555}
1556
1557static noinline_for_stack enum i40e_status_code
1558i40e_set_fc_status(struct i40e_hw *hw,
1559		   struct i40e_aq_get_phy_abilities_resp *abilities,
1560		   bool atomic_restart)
1561{
1562	struct i40e_aq_set_phy_config config;
1563	enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1564	u8 pause_mask = 0x0;
1565
1566	switch (fc_mode) {
1567	case I40E_FC_FULL:
1568		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1569		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1570		break;
1571	case I40E_FC_RX_PAUSE:
1572		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1573		break;
1574	case I40E_FC_TX_PAUSE:
1575		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1576		break;
1577	default:
1578		break;
1579	}
1580
1581	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1582	/* clear the old pause settings */
1583	config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1584			   ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1585	/* set the new abilities */
1586	config.abilities |= pause_mask;
1587	/* If the abilities have changed, then set the new config */
1588	if (config.abilities == abilities->abilities)
1589		return 0;
1590
1591	/* Auto restart link so settings take effect */
1592	if (atomic_restart)
1593		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1594	/* Copy over all the old settings */
1595	config.phy_type = abilities->phy_type;
1596	config.phy_type_ext = abilities->phy_type_ext;
1597	config.link_speed = abilities->link_speed;
1598	config.eee_capability = abilities->eee_capability;
1599	config.eeer = abilities->eeer_val;
1600	config.low_power_ctrl = abilities->d3_lpan;
1601	config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1602			    I40E_AQ_PHY_FEC_CONFIG_MASK;
1603
1604	return i40e_aq_set_phy_config(hw, &config, NULL);
1605}
1606
1607/**
1608 * i40e_set_fc
1609 * @hw: pointer to the hw struct
1610 * @aq_failures: buffer to return AdminQ failure information
1611 * @atomic_restart: whether to enable atomic link restart
1612 *
1613 * Set the requested flow control mode using set_phy_config.
1614 **/
1615enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1616				  bool atomic_restart)
1617{
1618	struct i40e_aq_get_phy_abilities_resp abilities;
1619	enum i40e_status_code status;
1620
1621	*aq_failures = 0x0;
1622
1623	/* Get the current phy config */
1624	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1625					      NULL);
1626	if (status) {
1627		*aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1628		return status;
1629	}
1630
1631	status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1632	if (status)
1633		*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1634
1635	/* Update the link info */
1636	status = i40e_update_link_info(hw);
1637	if (status) {
1638		/* Wait a little bit (on 40G cards it sometimes takes a really
1639		 * long time for link to come back from the atomic reset)
1640		 * and try once more
1641		 */
1642		msleep(1000);
1643		status = i40e_update_link_info(hw);
1644	}
1645	if (status)
1646		*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1647
1648	return status;
1649}
1650
1651/**
1652 * i40e_aq_clear_pxe_mode
1653 * @hw: pointer to the hw struct
1654 * @cmd_details: pointer to command details structure or NULL
1655 *
1656 * Tell the firmware that the driver is taking over from PXE
1657 **/
1658i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1659				struct i40e_asq_cmd_details *cmd_details)
1660{
1661	i40e_status status;
1662	struct i40e_aq_desc desc;
1663	struct i40e_aqc_clear_pxe *cmd =
1664		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
1665
1666	i40e_fill_default_direct_cmd_desc(&desc,
1667					  i40e_aqc_opc_clear_pxe_mode);
1668
1669	cmd->rx_cnt = 0x2;
1670
1671	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1672
1673	wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1674
1675	return status;
1676}
1677
1678/**
1679 * i40e_aq_set_link_restart_an
1680 * @hw: pointer to the hw struct
1681 * @enable_link: if true: enable link, if false: disable link
1682 * @cmd_details: pointer to command details structure or NULL
1683 *
1684 * Sets up the link and restarts the Auto-Negotiation over the link.
1685 **/
1686i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1687					bool enable_link,
1688					struct i40e_asq_cmd_details *cmd_details)
1689{
1690	struct i40e_aq_desc desc;
1691	struct i40e_aqc_set_link_restart_an *cmd =
1692		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1693	i40e_status status;
1694
1695	i40e_fill_default_direct_cmd_desc(&desc,
1696					  i40e_aqc_opc_set_link_restart_an);
1697
1698	cmd->command = I40E_AQ_PHY_RESTART_AN;
1699	if (enable_link)
1700		cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1701	else
1702		cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1703
1704	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1705
1706	return status;
1707}
1708
1709/**
1710 * i40e_aq_get_link_info
1711 * @hw: pointer to the hw struct
1712 * @enable_lse: enable/disable LinkStatusEvent reporting
1713 * @link: pointer to link status structure - optional
1714 * @cmd_details: pointer to command details structure or NULL
1715 *
1716 * Returns the link status of the adapter.
1717 **/
1718i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1719				bool enable_lse, struct i40e_link_status *link,
1720				struct i40e_asq_cmd_details *cmd_details)
1721{
1722	struct i40e_aq_desc desc;
1723	struct i40e_aqc_get_link_status *resp =
1724		(struct i40e_aqc_get_link_status *)&desc.params.raw;
1725	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1726	i40e_status status;
1727	bool tx_pause, rx_pause;
1728	u16 command_flags;
1729
1730	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1731
1732	if (enable_lse)
1733		command_flags = I40E_AQ_LSE_ENABLE;
1734	else
1735		command_flags = I40E_AQ_LSE_DISABLE;
1736	resp->command_flags = cpu_to_le16(command_flags);
1737
1738	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1739
1740	if (status)
1741		goto aq_get_link_info_exit;
1742
1743	/* save off old link status information */
1744	hw->phy.link_info_old = *hw_link_info;
1745
1746	/* update link status */
1747	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1748	hw->phy.media_type = i40e_get_media_type(hw);
1749	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1750	hw_link_info->link_info = resp->link_info;
1751	hw_link_info->an_info = resp->an_info;
1752	hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1753						 I40E_AQ_CONFIG_FEC_RS_ENA);
1754	hw_link_info->ext_info = resp->ext_info;
1755	hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1756	hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1757	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1758
1759	/* update fc info */
1760	tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1761	rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1762	if (tx_pause & rx_pause)
1763		hw->fc.current_mode = I40E_FC_FULL;
1764	else if (tx_pause)
1765		hw->fc.current_mode = I40E_FC_TX_PAUSE;
1766	else if (rx_pause)
1767		hw->fc.current_mode = I40E_FC_RX_PAUSE;
1768	else
1769		hw->fc.current_mode = I40E_FC_NONE;
1770
1771	if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1772		hw_link_info->crc_enable = true;
1773	else
1774		hw_link_info->crc_enable = false;
1775
1776	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1777		hw_link_info->lse_enable = true;
1778	else
1779		hw_link_info->lse_enable = false;
1780
1781	if ((hw->mac.type == I40E_MAC_XL710) &&
1782	    (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1783	     hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1784		hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1785
1786	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1787	    hw->mac.type != I40E_MAC_X722) {
1788		__le32 tmp;
1789
1790		memcpy(&tmp, resp->link_type, sizeof(tmp));
1791		hw->phy.phy_types = le32_to_cpu(tmp);
1792		hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1793	}
1794
1795	/* save link status information */
1796	if (link)
1797		*link = *hw_link_info;
1798
1799	/* flag cleared so helper functions don't call AQ again */
1800	hw->phy.get_link_info = false;
1801
1802aq_get_link_info_exit:
1803	return status;
1804}
1805
1806/**
1807 * i40e_aq_set_phy_int_mask
1808 * @hw: pointer to the hw struct
1809 * @mask: interrupt mask to be set
1810 * @cmd_details: pointer to command details structure or NULL
1811 *
1812 * Set link interrupt mask.
1813 **/
1814i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1815				     u16 mask,
1816				     struct i40e_asq_cmd_details *cmd_details)
1817{
1818	struct i40e_aq_desc desc;
1819	struct i40e_aqc_set_phy_int_mask *cmd =
1820		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1821	i40e_status status;
1822
1823	i40e_fill_default_direct_cmd_desc(&desc,
1824					  i40e_aqc_opc_set_phy_int_mask);
1825
1826	cmd->event_mask = cpu_to_le16(mask);
1827
1828	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1829
1830	return status;
1831}
1832
1833/**
1834 * i40e_aq_set_mac_loopback
1835 * @hw: pointer to the HW struct
1836 * @ena_lpbk: Enable or Disable loopback
1837 * @cmd_details: pointer to command details structure or NULL
1838 *
1839 * Enable/disable loopback on a given port
1840 */
1841i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
1842				     struct i40e_asq_cmd_details *cmd_details)
1843{
1844	struct i40e_aq_desc desc;
1845	struct i40e_aqc_set_lb_mode *cmd =
1846		(struct i40e_aqc_set_lb_mode *)&desc.params.raw;
1847
1848	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
1849	if (ena_lpbk) {
1850		if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
1851			cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
1852		else
1853			cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL);
1854	}
1855
1856	return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1857}
1858
1859/**
1860 * i40e_aq_set_phy_debug
1861 * @hw: pointer to the hw struct
1862 * @cmd_flags: debug command flags
1863 * @cmd_details: pointer to command details structure or NULL
1864 *
1865 * Reset the external PHY.
1866 **/
1867i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1868				  struct i40e_asq_cmd_details *cmd_details)
1869{
1870	struct i40e_aq_desc desc;
1871	struct i40e_aqc_set_phy_debug *cmd =
1872		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1873	i40e_status status;
1874
1875	i40e_fill_default_direct_cmd_desc(&desc,
1876					  i40e_aqc_opc_set_phy_debug);
1877
1878	cmd->command_flags = cmd_flags;
1879
1880	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1881
1882	return status;
1883}
1884
1885/**
1886 * i40e_is_aq_api_ver_ge
1887 * @aq: pointer to AdminQ info containing HW API version to compare
1888 * @maj: API major value
1889 * @min: API minor value
1890 *
1891 * Assert whether current HW API version is greater/equal than provided.
1892 **/
1893static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1894				  u16 min)
1895{
1896	return (aq->api_maj_ver > maj ||
1897		(aq->api_maj_ver == maj && aq->api_min_ver >= min));
1898}
1899
1900/**
1901 * i40e_aq_add_vsi
1902 * @hw: pointer to the hw struct
1903 * @vsi_ctx: pointer to a vsi context struct
1904 * @cmd_details: pointer to command details structure or NULL
1905 *
1906 * Add a VSI context to the hardware.
1907**/
1908i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1909				struct i40e_vsi_context *vsi_ctx,
1910				struct i40e_asq_cmd_details *cmd_details)
1911{
1912	struct i40e_aq_desc desc;
1913	struct i40e_aqc_add_get_update_vsi *cmd =
1914		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1915	struct i40e_aqc_add_get_update_vsi_completion *resp =
1916		(struct i40e_aqc_add_get_update_vsi_completion *)
1917		&desc.params.raw;
1918	i40e_status status;
1919
1920	i40e_fill_default_direct_cmd_desc(&desc,
1921					  i40e_aqc_opc_add_vsi);
1922
1923	cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1924	cmd->connection_type = vsi_ctx->connection_type;
1925	cmd->vf_id = vsi_ctx->vf_num;
1926	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1927
1928	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1929
1930	status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1931					      sizeof(vsi_ctx->info),
1932					      cmd_details, true);
1933
1934	if (status)
1935		goto aq_add_vsi_exit;
1936
1937	vsi_ctx->seid = le16_to_cpu(resp->seid);
1938	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1939	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1940	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1941
1942aq_add_vsi_exit:
1943	return status;
1944}
1945
1946/**
1947 * i40e_aq_set_default_vsi
1948 * @hw: pointer to the hw struct
1949 * @seid: vsi number
1950 * @cmd_details: pointer to command details structure or NULL
1951 **/
1952i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1953				    u16 seid,
1954				    struct i40e_asq_cmd_details *cmd_details)
1955{
1956	struct i40e_aq_desc desc;
1957	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1958		(struct i40e_aqc_set_vsi_promiscuous_modes *)
1959		&desc.params.raw;
1960	i40e_status status;
1961
1962	i40e_fill_default_direct_cmd_desc(&desc,
1963					  i40e_aqc_opc_set_vsi_promiscuous_modes);
1964
1965	cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1966	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1967	cmd->seid = cpu_to_le16(seid);
1968
1969	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1970
1971	return status;
1972}
1973
1974/**
1975 * i40e_aq_clear_default_vsi
1976 * @hw: pointer to the hw struct
1977 * @seid: vsi number
1978 * @cmd_details: pointer to command details structure or NULL
1979 **/
1980i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1981				      u16 seid,
1982				      struct i40e_asq_cmd_details *cmd_details)
1983{
1984	struct i40e_aq_desc desc;
1985	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1986		(struct i40e_aqc_set_vsi_promiscuous_modes *)
1987		&desc.params.raw;
1988	i40e_status status;
1989
1990	i40e_fill_default_direct_cmd_desc(&desc,
1991					  i40e_aqc_opc_set_vsi_promiscuous_modes);
1992
1993	cmd->promiscuous_flags = cpu_to_le16(0);
1994	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1995	cmd->seid = cpu_to_le16(seid);
1996
1997	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1998
1999	return status;
2000}
2001
2002/**
2003 * i40e_aq_set_vsi_unicast_promiscuous
2004 * @hw: pointer to the hw struct
2005 * @seid: vsi number
2006 * @set: set unicast promiscuous enable/disable
2007 * @cmd_details: pointer to command details structure or NULL
2008 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2009 **/
2010i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2011				u16 seid, bool set,
2012				struct i40e_asq_cmd_details *cmd_details,
2013				bool rx_only_promisc)
2014{
2015	struct i40e_aq_desc desc;
2016	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2017		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2018	i40e_status status;
2019	u16 flags = 0;
2020
2021	i40e_fill_default_direct_cmd_desc(&desc,
2022					i40e_aqc_opc_set_vsi_promiscuous_modes);
2023
2024	if (set) {
2025		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2026		if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2027			flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
 
 
2028	}
2029
2030	cmd->promiscuous_flags = cpu_to_le16(flags);
2031
2032	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2033	if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2034		cmd->valid_flags |=
2035			cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2036
2037	cmd->seid = cpu_to_le16(seid);
2038	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2039
2040	return status;
2041}
2042
2043/**
2044 * i40e_aq_set_vsi_multicast_promiscuous
2045 * @hw: pointer to the hw struct
2046 * @seid: vsi number
2047 * @set: set multicast promiscuous enable/disable
2048 * @cmd_details: pointer to command details structure or NULL
2049 **/
2050i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2051				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2052{
2053	struct i40e_aq_desc desc;
2054	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2055		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2056	i40e_status status;
2057	u16 flags = 0;
2058
2059	i40e_fill_default_direct_cmd_desc(&desc,
2060					i40e_aqc_opc_set_vsi_promiscuous_modes);
2061
2062	if (set)
2063		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2064
2065	cmd->promiscuous_flags = cpu_to_le16(flags);
2066
2067	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2068
2069	cmd->seid = cpu_to_le16(seid);
2070	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2071
2072	return status;
2073}
2074
2075/**
2076 * i40e_aq_set_vsi_mc_promisc_on_vlan
2077 * @hw: pointer to the hw struct
2078 * @seid: vsi number
2079 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2080 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2081 * @cmd_details: pointer to command details structure or NULL
2082 **/
2083enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2084							 u16 seid, bool enable,
2085							 u16 vid,
2086				struct i40e_asq_cmd_details *cmd_details)
2087{
2088	struct i40e_aq_desc desc;
2089	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2090		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2091	enum i40e_status_code status;
2092	u16 flags = 0;
2093
2094	i40e_fill_default_direct_cmd_desc(&desc,
2095					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2096
2097	if (enable)
2098		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2099
2100	cmd->promiscuous_flags = cpu_to_le16(flags);
2101	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2102	cmd->seid = cpu_to_le16(seid);
2103	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2104
2105	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2106					      cmd_details, true);
2107
2108	return status;
2109}
2110
2111/**
2112 * i40e_aq_set_vsi_uc_promisc_on_vlan
2113 * @hw: pointer to the hw struct
2114 * @seid: vsi number
2115 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2116 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2117 * @cmd_details: pointer to command details structure or NULL
2118 **/
2119enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2120							 u16 seid, bool enable,
2121							 u16 vid,
2122				struct i40e_asq_cmd_details *cmd_details)
2123{
2124	struct i40e_aq_desc desc;
2125	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2126		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2127	enum i40e_status_code status;
2128	u16 flags = 0;
2129
2130	i40e_fill_default_direct_cmd_desc(&desc,
2131					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2132
2133	if (enable) {
2134		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2135		if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2136			flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2137	}
2138
2139	cmd->promiscuous_flags = cpu_to_le16(flags);
2140	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2141	if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2142		cmd->valid_flags |=
2143			cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2144	cmd->seid = cpu_to_le16(seid);
2145	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2146
2147	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2148					      cmd_details, true);
2149
2150	return status;
2151}
2152
2153/**
2154 * i40e_aq_set_vsi_bc_promisc_on_vlan
2155 * @hw: pointer to the hw struct
2156 * @seid: vsi number
2157 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2158 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2159 * @cmd_details: pointer to command details structure or NULL
2160 **/
2161i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2162				u16 seid, bool enable, u16 vid,
2163				struct i40e_asq_cmd_details *cmd_details)
2164{
2165	struct i40e_aq_desc desc;
2166	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2167		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2168	i40e_status status;
2169	u16 flags = 0;
2170
2171	i40e_fill_default_direct_cmd_desc(&desc,
2172					i40e_aqc_opc_set_vsi_promiscuous_modes);
2173
2174	if (enable)
2175		flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2176
2177	cmd->promiscuous_flags = cpu_to_le16(flags);
2178	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2179	cmd->seid = cpu_to_le16(seid);
2180	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2181
2182	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2183
2184	return status;
2185}
2186
2187/**
2188 * i40e_aq_set_vsi_broadcast
2189 * @hw: pointer to the hw struct
2190 * @seid: vsi number
2191 * @set_filter: true to set filter, false to clear filter
2192 * @cmd_details: pointer to command details structure or NULL
2193 *
2194 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2195 **/
2196i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2197				u16 seid, bool set_filter,
2198				struct i40e_asq_cmd_details *cmd_details)
2199{
2200	struct i40e_aq_desc desc;
2201	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2202		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2203	i40e_status status;
2204
2205	i40e_fill_default_direct_cmd_desc(&desc,
2206					i40e_aqc_opc_set_vsi_promiscuous_modes);
2207
2208	if (set_filter)
2209		cmd->promiscuous_flags
2210			    |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2211	else
2212		cmd->promiscuous_flags
2213			    &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2214
2215	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2216	cmd->seid = cpu_to_le16(seid);
2217	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2218
2219	return status;
2220}
2221
2222/**
2223 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2224 * @hw: pointer to the hw struct
2225 * @seid: vsi number
2226 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2227 * @cmd_details: pointer to command details structure or NULL
2228 **/
2229i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2230				       u16 seid, bool enable,
2231				       struct i40e_asq_cmd_details *cmd_details)
2232{
2233	struct i40e_aq_desc desc;
2234	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2235		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2236	i40e_status status;
2237	u16 flags = 0;
2238
2239	i40e_fill_default_direct_cmd_desc(&desc,
2240					i40e_aqc_opc_set_vsi_promiscuous_modes);
2241	if (enable)
2242		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2243
2244	cmd->promiscuous_flags = cpu_to_le16(flags);
2245	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2246	cmd->seid = cpu_to_le16(seid);
2247
2248	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2249
2250	return status;
2251}
2252
2253/**
2254 * i40e_aq_get_vsi_params - get VSI configuration info
2255 * @hw: pointer to the hw struct
2256 * @vsi_ctx: pointer to a vsi context struct
2257 * @cmd_details: pointer to command details structure or NULL
2258 **/
2259i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2260				struct i40e_vsi_context *vsi_ctx,
2261				struct i40e_asq_cmd_details *cmd_details)
2262{
2263	struct i40e_aq_desc desc;
2264	struct i40e_aqc_add_get_update_vsi *cmd =
2265		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2266	struct i40e_aqc_add_get_update_vsi_completion *resp =
2267		(struct i40e_aqc_add_get_update_vsi_completion *)
2268		&desc.params.raw;
2269	i40e_status status;
2270
2271	i40e_fill_default_direct_cmd_desc(&desc,
2272					  i40e_aqc_opc_get_vsi_parameters);
2273
2274	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2275
2276	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2277
2278	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2279				    sizeof(vsi_ctx->info), NULL);
2280
2281	if (status)
2282		goto aq_get_vsi_params_exit;
2283
2284	vsi_ctx->seid = le16_to_cpu(resp->seid);
2285	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2286	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2287	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2288
2289aq_get_vsi_params_exit:
2290	return status;
2291}
2292
2293/**
2294 * i40e_aq_update_vsi_params
2295 * @hw: pointer to the hw struct
2296 * @vsi_ctx: pointer to a vsi context struct
2297 * @cmd_details: pointer to command details structure or NULL
2298 *
2299 * Update a VSI context.
2300 **/
2301i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2302				struct i40e_vsi_context *vsi_ctx,
2303				struct i40e_asq_cmd_details *cmd_details)
2304{
2305	struct i40e_aq_desc desc;
2306	struct i40e_aqc_add_get_update_vsi *cmd =
2307		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2308	struct i40e_aqc_add_get_update_vsi_completion *resp =
2309		(struct i40e_aqc_add_get_update_vsi_completion *)
2310		&desc.params.raw;
2311	i40e_status status;
2312
2313	i40e_fill_default_direct_cmd_desc(&desc,
2314					  i40e_aqc_opc_update_vsi_parameters);
2315	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2316
2317	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2318
2319	status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
2320					      sizeof(vsi_ctx->info),
2321					      cmd_details, true);
2322
2323	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2324	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2325
2326	return status;
2327}
2328
2329/**
2330 * i40e_aq_get_switch_config
2331 * @hw: pointer to the hardware structure
2332 * @buf: pointer to the result buffer
2333 * @buf_size: length of input buffer
2334 * @start_seid: seid to start for the report, 0 == beginning
2335 * @cmd_details: pointer to command details structure or NULL
2336 *
2337 * Fill the buf with switch configuration returned from AdminQ command
2338 **/
2339i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2340				struct i40e_aqc_get_switch_config_resp *buf,
2341				u16 buf_size, u16 *start_seid,
2342				struct i40e_asq_cmd_details *cmd_details)
2343{
2344	struct i40e_aq_desc desc;
2345	struct i40e_aqc_switch_seid *scfg =
2346		(struct i40e_aqc_switch_seid *)&desc.params.raw;
2347	i40e_status status;
2348
2349	i40e_fill_default_direct_cmd_desc(&desc,
2350					  i40e_aqc_opc_get_switch_config);
2351	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2352	if (buf_size > I40E_AQ_LARGE_BUF)
2353		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2354	scfg->seid = cpu_to_le16(*start_seid);
2355
2356	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2357	*start_seid = le16_to_cpu(scfg->seid);
2358
2359	return status;
2360}
2361
2362/**
2363 * i40e_aq_set_switch_config
2364 * @hw: pointer to the hardware structure
2365 * @flags: bit flag values to set
2366 * @mode: cloud filter mode
2367 * @valid_flags: which bit flags to set
2368 * @mode: cloud filter mode
2369 * @cmd_details: pointer to command details structure or NULL
2370 *
2371 * Set switch configuration bits
2372 **/
2373enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2374						u16 flags,
2375						u16 valid_flags, u8 mode,
2376				struct i40e_asq_cmd_details *cmd_details)
2377{
2378	struct i40e_aq_desc desc;
2379	struct i40e_aqc_set_switch_config *scfg =
2380		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
2381	enum i40e_status_code status;
2382
2383	i40e_fill_default_direct_cmd_desc(&desc,
2384					  i40e_aqc_opc_set_switch_config);
2385	scfg->flags = cpu_to_le16(flags);
2386	scfg->valid_flags = cpu_to_le16(valid_flags);
2387	scfg->mode = mode;
2388	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2389		scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2390		scfg->first_tag = cpu_to_le16(hw->first_tag);
2391		scfg->second_tag = cpu_to_le16(hw->second_tag);
2392	}
2393	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2394
2395	return status;
2396}
2397
2398/**
2399 * i40e_aq_get_firmware_version
2400 * @hw: pointer to the hw struct
2401 * @fw_major_version: firmware major version
2402 * @fw_minor_version: firmware minor version
2403 * @fw_build: firmware build number
2404 * @api_major_version: major queue version
2405 * @api_minor_version: minor queue version
2406 * @cmd_details: pointer to command details structure or NULL
2407 *
2408 * Get the firmware version from the admin queue commands
2409 **/
2410i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2411				u16 *fw_major_version, u16 *fw_minor_version,
2412				u32 *fw_build,
2413				u16 *api_major_version, u16 *api_minor_version,
2414				struct i40e_asq_cmd_details *cmd_details)
2415{
2416	struct i40e_aq_desc desc;
2417	struct i40e_aqc_get_version *resp =
2418		(struct i40e_aqc_get_version *)&desc.params.raw;
2419	i40e_status status;
2420
2421	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2422
2423	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2424
2425	if (!status) {
2426		if (fw_major_version)
2427			*fw_major_version = le16_to_cpu(resp->fw_major);
2428		if (fw_minor_version)
2429			*fw_minor_version = le16_to_cpu(resp->fw_minor);
2430		if (fw_build)
2431			*fw_build = le32_to_cpu(resp->fw_build);
2432		if (api_major_version)
2433			*api_major_version = le16_to_cpu(resp->api_major);
2434		if (api_minor_version)
2435			*api_minor_version = le16_to_cpu(resp->api_minor);
2436	}
2437
2438	return status;
2439}
2440
2441/**
2442 * i40e_aq_send_driver_version
2443 * @hw: pointer to the hw struct
2444 * @dv: driver's major, minor version
2445 * @cmd_details: pointer to command details structure or NULL
2446 *
2447 * Send the driver version to the firmware
2448 **/
2449i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2450				struct i40e_driver_version *dv,
2451				struct i40e_asq_cmd_details *cmd_details)
2452{
2453	struct i40e_aq_desc desc;
2454	struct i40e_aqc_driver_version *cmd =
2455		(struct i40e_aqc_driver_version *)&desc.params.raw;
2456	i40e_status status;
2457	u16 len;
2458
2459	if (dv == NULL)
2460		return I40E_ERR_PARAM;
2461
2462	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2463
2464	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2465	cmd->driver_major_ver = dv->major_version;
2466	cmd->driver_minor_ver = dv->minor_version;
2467	cmd->driver_build_ver = dv->build_version;
2468	cmd->driver_subbuild_ver = dv->subbuild_version;
2469
2470	len = 0;
2471	while (len < sizeof(dv->driver_string) &&
2472	       (dv->driver_string[len] < 0x80) &&
2473	       dv->driver_string[len])
2474		len++;
2475	status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2476				       len, cmd_details);
2477
2478	return status;
2479}
2480
2481/**
2482 * i40e_get_link_status - get status of the HW network link
2483 * @hw: pointer to the hw struct
2484 * @link_up: pointer to bool (true/false = linkup/linkdown)
2485 *
2486 * Variable link_up true if link is up, false if link is down.
2487 * The variable link_up is invalid if returned value of status != 0
2488 *
2489 * Side effect: LinkStatusEvent reporting becomes enabled
2490 **/
2491i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2492{
2493	i40e_status status = 0;
2494
2495	if (hw->phy.get_link_info) {
2496		status = i40e_update_link_info(hw);
2497
2498		if (status)
2499			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2500				   status);
2501	}
2502
2503	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2504
2505	return status;
2506}
2507
2508/**
2509 * i40e_update_link_info - update status of the HW network link
2510 * @hw: pointer to the hw struct
2511 **/
2512noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2513{
2514	struct i40e_aq_get_phy_abilities_resp abilities;
2515	i40e_status status = 0;
2516
2517	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2518	if (status)
2519		return status;
2520
2521	/* extra checking needed to ensure link info to user is timely */
2522	if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2523	    ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2524	     !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2525		status = i40e_aq_get_phy_capabilities(hw, false, false,
2526						      &abilities, NULL);
2527		if (status)
2528			return status;
2529
2530		if (abilities.fec_cfg_curr_mod_ext_info &
2531		    I40E_AQ_ENABLE_FEC_AUTO)
2532			hw->phy.link_info.req_fec_info =
2533				(I40E_AQ_REQUEST_FEC_KR |
2534				 I40E_AQ_REQUEST_FEC_RS);
2535		else
2536			hw->phy.link_info.req_fec_info =
2537				abilities.fec_cfg_curr_mod_ext_info &
2538				(I40E_AQ_REQUEST_FEC_KR |
2539				 I40E_AQ_REQUEST_FEC_RS);
2540
2541		memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2542		       sizeof(hw->phy.link_info.module_type));
2543	}
2544
2545	return status;
2546}
2547
2548/**
2549 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2550 * @hw: pointer to the hw struct
2551 * @uplink_seid: the MAC or other gizmo SEID
2552 * @downlink_seid: the VSI SEID
2553 * @enabled_tc: bitmap of TCs to be enabled
2554 * @default_port: true for default port VSI, false for control port
2555 * @veb_seid: pointer to where to put the resulting VEB SEID
2556 * @enable_stats: true to turn on VEB stats
2557 * @cmd_details: pointer to command details structure or NULL
2558 *
2559 * This asks the FW to add a VEB between the uplink and downlink
2560 * elements.  If the uplink SEID is 0, this will be a floating VEB.
2561 **/
2562i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2563				u16 downlink_seid, u8 enabled_tc,
2564				bool default_port, u16 *veb_seid,
2565				bool enable_stats,
2566				struct i40e_asq_cmd_details *cmd_details)
2567{
2568	struct i40e_aq_desc desc;
2569	struct i40e_aqc_add_veb *cmd =
2570		(struct i40e_aqc_add_veb *)&desc.params.raw;
2571	struct i40e_aqc_add_veb_completion *resp =
2572		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2573	i40e_status status;
2574	u16 veb_flags = 0;
2575
2576	/* SEIDs need to either both be set or both be 0 for floating VEB */
2577	if (!!uplink_seid != !!downlink_seid)
2578		return I40E_ERR_PARAM;
2579
2580	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2581
2582	cmd->uplink_seid = cpu_to_le16(uplink_seid);
2583	cmd->downlink_seid = cpu_to_le16(downlink_seid);
2584	cmd->enable_tcs = enabled_tc;
2585	if (!uplink_seid)
2586		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2587	if (default_port)
2588		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2589	else
2590		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2591
2592	/* reverse logic here: set the bitflag to disable the stats */
2593	if (!enable_stats)
2594		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2595
2596	cmd->veb_flags = cpu_to_le16(veb_flags);
2597
2598	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2599
2600	if (!status && veb_seid)
2601		*veb_seid = le16_to_cpu(resp->veb_seid);
2602
2603	return status;
2604}
2605
2606/**
2607 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2608 * @hw: pointer to the hw struct
2609 * @veb_seid: the SEID of the VEB to query
2610 * @switch_id: the uplink switch id
2611 * @floating: set to true if the VEB is floating
2612 * @statistic_index: index of the stats counter block for this VEB
2613 * @vebs_used: number of VEB's used by function
2614 * @vebs_free: total VEB's not reserved by any function
2615 * @cmd_details: pointer to command details structure or NULL
2616 *
2617 * This retrieves the parameters for a particular VEB, specified by
2618 * uplink_seid, and returns them to the caller.
2619 **/
2620i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2621				u16 veb_seid, u16 *switch_id,
2622				bool *floating, u16 *statistic_index,
2623				u16 *vebs_used, u16 *vebs_free,
2624				struct i40e_asq_cmd_details *cmd_details)
2625{
2626	struct i40e_aq_desc desc;
2627	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2628		(struct i40e_aqc_get_veb_parameters_completion *)
2629		&desc.params.raw;
2630	i40e_status status;
2631
2632	if (veb_seid == 0)
2633		return I40E_ERR_PARAM;
2634
2635	i40e_fill_default_direct_cmd_desc(&desc,
2636					  i40e_aqc_opc_get_veb_parameters);
2637	cmd_resp->seid = cpu_to_le16(veb_seid);
2638
2639	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2640	if (status)
2641		goto get_veb_exit;
2642
2643	if (switch_id)
2644		*switch_id = le16_to_cpu(cmd_resp->switch_id);
2645	if (statistic_index)
2646		*statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2647	if (vebs_used)
2648		*vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2649	if (vebs_free)
2650		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2651	if (floating) {
2652		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2653
2654		if (flags & I40E_AQC_ADD_VEB_FLOATING)
2655			*floating = true;
2656		else
2657			*floating = false;
2658	}
2659
2660get_veb_exit:
2661	return status;
2662}
2663
2664/**
2665 * i40e_prepare_add_macvlan
 
 
2666 * @mv_list: list of macvlans to be added
2667 * @desc: pointer to AQ descriptor structure
2668 * @count: length of the list
2669 * @seid: VSI for the mac address
2670 *
2671 * Internal helper function that prepares the add macvlan request
2672 * and returns the buffer size.
2673 **/
2674static u16
2675i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2676			 struct i40e_aq_desc *desc, u16 count, u16 seid)
2677{
 
2678	struct i40e_aqc_macvlan *cmd =
2679		(struct i40e_aqc_macvlan *)&desc->params.raw;
 
2680	u16 buf_size;
2681	int i;
2682
 
 
 
2683	buf_size = count * sizeof(*mv_list);
2684
2685	/* prep the rest of the request */
2686	i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2687	cmd->num_addresses = cpu_to_le16(count);
2688	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2689	cmd->seid[1] = 0;
2690	cmd->seid[2] = 0;
2691
2692	for (i = 0; i < count; i++)
2693		if (is_multicast_ether_addr(mv_list[i].mac_addr))
2694			mv_list[i].flags |=
2695			       cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2696
2697	desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2698	if (buf_size > I40E_AQ_LARGE_BUF)
2699		desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2700
2701	return buf_size;
2702}
2703
2704/**
2705 * i40e_aq_add_macvlan
2706 * @hw: pointer to the hw struct
2707 * @seid: VSI for the mac address
2708 * @mv_list: list of macvlans to be added
2709 * @count: length of the list
2710 * @cmd_details: pointer to command details structure or NULL
2711 *
2712 * Add MAC/VLAN addresses to the HW filtering
2713 **/
2714i40e_status
2715i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2716		    struct i40e_aqc_add_macvlan_element_data *mv_list,
2717		    u16 count, struct i40e_asq_cmd_details *cmd_details)
2718{
2719	struct i40e_aq_desc desc;
2720	u16 buf_size;
2721
2722	if (count == 0 || !mv_list || !hw)
2723		return I40E_ERR_PARAM;
2724
2725	buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2726
2727	return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2728					    cmd_details, true);
2729}
2730
2731/**
2732 * i40e_aq_add_macvlan_v2
2733 * @hw: pointer to the hw struct
2734 * @seid: VSI for the mac address
2735 * @mv_list: list of macvlans to be added
2736 * @count: length of the list
2737 * @cmd_details: pointer to command details structure or NULL
2738 * @aq_status: pointer to Admin Queue status return value
2739 *
2740 * Add MAC/VLAN addresses to the HW filtering.
2741 * The _v2 version returns the last Admin Queue status in aq_status
2742 * to avoid race conditions in access to hw->aq.asq_last_status.
2743 * It also calls _v2 versions of asq_send_command functions to
2744 * get the aq_status on the stack.
2745 **/
2746i40e_status
2747i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2748		       struct i40e_aqc_add_macvlan_element_data *mv_list,
2749		       u16 count, struct i40e_asq_cmd_details *cmd_details,
2750		       enum i40e_admin_queue_err *aq_status)
2751{
2752	struct i40e_aq_desc desc;
2753	u16 buf_size;
2754
2755	if (count == 0 || !mv_list || !hw)
2756		return I40E_ERR_PARAM;
2757
2758	buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2759
2760	return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2761					       cmd_details, true, aq_status);
2762}
2763
2764/**
2765 * i40e_aq_remove_macvlan
2766 * @hw: pointer to the hw struct
2767 * @seid: VSI for the mac address
2768 * @mv_list: list of macvlans to be removed
2769 * @count: length of the list
2770 * @cmd_details: pointer to command details structure or NULL
2771 *
2772 * Remove MAC/VLAN addresses from the HW filtering
2773 **/
2774i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2775			struct i40e_aqc_remove_macvlan_element_data *mv_list,
2776			u16 count, struct i40e_asq_cmd_details *cmd_details)
2777{
2778	struct i40e_aq_desc desc;
2779	struct i40e_aqc_macvlan *cmd =
2780		(struct i40e_aqc_macvlan *)&desc.params.raw;
2781	i40e_status status;
2782	u16 buf_size;
2783
2784	if (count == 0 || !mv_list || !hw)
2785		return I40E_ERR_PARAM;
2786
2787	buf_size = count * sizeof(*mv_list);
2788
2789	/* prep the rest of the request */
2790	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2791	cmd->num_addresses = cpu_to_le16(count);
2792	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2793	cmd->seid[1] = 0;
2794	cmd->seid[2] = 0;
2795
2796	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2797	if (buf_size > I40E_AQ_LARGE_BUF)
2798		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2799
2800	status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2801					      cmd_details, true);
2802
2803	return status;
2804}
2805
2806/**
2807 * i40e_aq_remove_macvlan_v2
2808 * @hw: pointer to the hw struct
2809 * @seid: VSI for the mac address
2810 * @mv_list: list of macvlans to be removed
2811 * @count: length of the list
2812 * @cmd_details: pointer to command details structure or NULL
2813 * @aq_status: pointer to Admin Queue status return value
2814 *
2815 * Remove MAC/VLAN addresses from the HW filtering.
2816 * The _v2 version returns the last Admin Queue status in aq_status
2817 * to avoid race conditions in access to hw->aq.asq_last_status.
2818 * It also calls _v2 versions of asq_send_command functions to
2819 * get the aq_status on the stack.
2820 **/
2821i40e_status
2822i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2823			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
2824			  u16 count, struct i40e_asq_cmd_details *cmd_details,
2825			  enum i40e_admin_queue_err *aq_status)
2826{
2827	struct i40e_aqc_macvlan *cmd;
2828	struct i40e_aq_desc desc;
2829	u16 buf_size;
2830
2831	if (count == 0 || !mv_list || !hw)
2832		return I40E_ERR_PARAM;
2833
2834	buf_size = count * sizeof(*mv_list);
2835
2836	/* prep the rest of the request */
2837	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2838	cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2839	cmd->num_addresses = cpu_to_le16(count);
2840	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2841	cmd->seid[1] = 0;
2842	cmd->seid[2] = 0;
2843
2844	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2845	if (buf_size > I40E_AQ_LARGE_BUF)
2846		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2847
2848	return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2849						 cmd_details, true, aq_status);
2850}
2851
2852/**
2853 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2854 * @hw: pointer to the hw struct
2855 * @opcode: AQ opcode for add or delete mirror rule
2856 * @sw_seid: Switch SEID (to which rule refers)
2857 * @rule_type: Rule Type (ingress/egress/VLAN)
2858 * @id: Destination VSI SEID or Rule ID
2859 * @count: length of the list
2860 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2861 * @cmd_details: pointer to command details structure or NULL
2862 * @rule_id: Rule ID returned from FW
2863 * @rules_used: Number of rules used in internal switch
2864 * @rules_free: Number of rules free in internal switch
2865 *
2866 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2867 * VEBs/VEPA elements only
2868 **/
2869static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2870				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2871				u16 count, __le16 *mr_list,
2872				struct i40e_asq_cmd_details *cmd_details,
2873				u16 *rule_id, u16 *rules_used, u16 *rules_free)
2874{
2875	struct i40e_aq_desc desc;
2876	struct i40e_aqc_add_delete_mirror_rule *cmd =
2877		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2878	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2879	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2880	i40e_status status;
2881	u16 buf_size;
2882
2883	buf_size = count * sizeof(*mr_list);
2884
2885	/* prep the rest of the request */
2886	i40e_fill_default_direct_cmd_desc(&desc, opcode);
2887	cmd->seid = cpu_to_le16(sw_seid);
2888	cmd->rule_type = cpu_to_le16(rule_type &
2889				     I40E_AQC_MIRROR_RULE_TYPE_MASK);
2890	cmd->num_entries = cpu_to_le16(count);
2891	/* Dest VSI for add, rule_id for delete */
2892	cmd->destination = cpu_to_le16(id);
2893	if (mr_list) {
2894		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2895						I40E_AQ_FLAG_RD));
2896		if (buf_size > I40E_AQ_LARGE_BUF)
2897			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2898	}
2899
2900	status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2901				       cmd_details);
2902	if (!status ||
2903	    hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2904		if (rule_id)
2905			*rule_id = le16_to_cpu(resp->rule_id);
2906		if (rules_used)
2907			*rules_used = le16_to_cpu(resp->mirror_rules_used);
2908		if (rules_free)
2909			*rules_free = le16_to_cpu(resp->mirror_rules_free);
2910	}
2911	return status;
2912}
2913
2914/**
2915 * i40e_aq_add_mirrorrule - add a mirror rule
2916 * @hw: pointer to the hw struct
2917 * @sw_seid: Switch SEID (to which rule refers)
2918 * @rule_type: Rule Type (ingress/egress/VLAN)
2919 * @dest_vsi: SEID of VSI to which packets will be mirrored
2920 * @count: length of the list
2921 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2922 * @cmd_details: pointer to command details structure or NULL
2923 * @rule_id: Rule ID returned from FW
2924 * @rules_used: Number of rules used in internal switch
2925 * @rules_free: Number of rules free in internal switch
2926 *
2927 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2928 **/
2929i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2930			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2931			struct i40e_asq_cmd_details *cmd_details,
2932			u16 *rule_id, u16 *rules_used, u16 *rules_free)
2933{
2934	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2935	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2936		if (count == 0 || !mr_list)
2937			return I40E_ERR_PARAM;
2938	}
2939
2940	return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2941				  rule_type, dest_vsi, count, mr_list,
2942				  cmd_details, rule_id, rules_used, rules_free);
2943}
2944
2945/**
2946 * i40e_aq_delete_mirrorrule - delete a mirror rule
2947 * @hw: pointer to the hw struct
2948 * @sw_seid: Switch SEID (to which rule refers)
2949 * @rule_type: Rule Type (ingress/egress/VLAN)
2950 * @count: length of the list
2951 * @rule_id: Rule ID that is returned in the receive desc as part of
2952 *		add_mirrorrule.
2953 * @mr_list: list of mirrored VLAN IDs to be removed
2954 * @cmd_details: pointer to command details structure or NULL
2955 * @rules_used: Number of rules used in internal switch
2956 * @rules_free: Number of rules free in internal switch
2957 *
2958 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2959 **/
2960i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2961			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2962			struct i40e_asq_cmd_details *cmd_details,
2963			u16 *rules_used, u16 *rules_free)
2964{
2965	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2966	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2967		/* count and mr_list shall be valid for rule_type INGRESS VLAN
2968		 * mirroring. For other rule_type, count and rule_type should
2969		 * not matter.
2970		 */
2971		if (count == 0 || !mr_list)
2972			return I40E_ERR_PARAM;
2973	}
2974
2975	return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2976				  rule_type, rule_id, count, mr_list,
2977				  cmd_details, NULL, rules_used, rules_free);
2978}
2979
2980/**
2981 * i40e_aq_send_msg_to_vf
2982 * @hw: pointer to the hardware structure
2983 * @vfid: VF id to send msg
2984 * @v_opcode: opcodes for VF-PF communication
2985 * @v_retval: return error code
2986 * @msg: pointer to the msg buffer
2987 * @msglen: msg length
2988 * @cmd_details: pointer to command details
2989 *
2990 * send msg to vf
2991 **/
2992i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2993				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2994				struct i40e_asq_cmd_details *cmd_details)
2995{
2996	struct i40e_aq_desc desc;
2997	struct i40e_aqc_pf_vf_message *cmd =
2998		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2999	i40e_status status;
3000
3001	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
3002	cmd->id = cpu_to_le32(vfid);
3003	desc.cookie_high = cpu_to_le32(v_opcode);
3004	desc.cookie_low = cpu_to_le32(v_retval);
3005	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
3006	if (msglen) {
3007		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
3008						I40E_AQ_FLAG_RD));
3009		if (msglen > I40E_AQ_LARGE_BUF)
3010			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3011		desc.datalen = cpu_to_le16(msglen);
3012	}
3013	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
3014
3015	return status;
3016}
3017
3018/**
3019 * i40e_aq_debug_read_register
3020 * @hw: pointer to the hw struct
3021 * @reg_addr: register address
3022 * @reg_val: register value
3023 * @cmd_details: pointer to command details structure or NULL
3024 *
3025 * Read the register using the admin queue commands
3026 **/
3027i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
3028				u32 reg_addr, u64 *reg_val,
3029				struct i40e_asq_cmd_details *cmd_details)
3030{
3031	struct i40e_aq_desc desc;
3032	struct i40e_aqc_debug_reg_read_write *cmd_resp =
3033		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3034	i40e_status status;
3035
3036	if (reg_val == NULL)
3037		return I40E_ERR_PARAM;
3038
3039	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
3040
3041	cmd_resp->address = cpu_to_le32(reg_addr);
3042
3043	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3044
3045	if (!status) {
3046		*reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
3047			   (u64)le32_to_cpu(cmd_resp->value_low);
3048	}
3049
3050	return status;
3051}
3052
3053/**
3054 * i40e_aq_debug_write_register
3055 * @hw: pointer to the hw struct
3056 * @reg_addr: register address
3057 * @reg_val: register value
3058 * @cmd_details: pointer to command details structure or NULL
3059 *
3060 * Write to a register using the admin queue commands
3061 **/
3062i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
3063					u32 reg_addr, u64 reg_val,
3064					struct i40e_asq_cmd_details *cmd_details)
3065{
3066	struct i40e_aq_desc desc;
3067	struct i40e_aqc_debug_reg_read_write *cmd =
3068		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3069	i40e_status status;
3070
3071	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
3072
3073	cmd->address = cpu_to_le32(reg_addr);
3074	cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
3075	cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
3076
3077	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3078
3079	return status;
3080}
3081
3082/**
3083 * i40e_aq_request_resource
3084 * @hw: pointer to the hw struct
3085 * @resource: resource id
3086 * @access: access type
3087 * @sdp_number: resource number
3088 * @timeout: the maximum time in ms that the driver may hold the resource
3089 * @cmd_details: pointer to command details structure or NULL
3090 *
3091 * requests common resource using the admin queue commands
3092 **/
3093i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3094				enum i40e_aq_resources_ids resource,
3095				enum i40e_aq_resource_access_type access,
3096				u8 sdp_number, u64 *timeout,
3097				struct i40e_asq_cmd_details *cmd_details)
3098{
3099	struct i40e_aq_desc desc;
3100	struct i40e_aqc_request_resource *cmd_resp =
3101		(struct i40e_aqc_request_resource *)&desc.params.raw;
3102	i40e_status status;
3103
3104	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3105
3106	cmd_resp->resource_id = cpu_to_le16(resource);
3107	cmd_resp->access_type = cpu_to_le16(access);
3108	cmd_resp->resource_number = cpu_to_le32(sdp_number);
3109
3110	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3111	/* The completion specifies the maximum time in ms that the driver
3112	 * may hold the resource in the Timeout field.
3113	 * If the resource is held by someone else, the command completes with
3114	 * busy return value and the timeout field indicates the maximum time
3115	 * the current owner of the resource has to free it.
3116	 */
3117	if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3118		*timeout = le32_to_cpu(cmd_resp->timeout);
3119
3120	return status;
3121}
3122
3123/**
3124 * i40e_aq_release_resource
3125 * @hw: pointer to the hw struct
3126 * @resource: resource id
3127 * @sdp_number: resource number
3128 * @cmd_details: pointer to command details structure or NULL
3129 *
3130 * release common resource using the admin queue commands
3131 **/
3132i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3133				enum i40e_aq_resources_ids resource,
3134				u8 sdp_number,
3135				struct i40e_asq_cmd_details *cmd_details)
3136{
3137	struct i40e_aq_desc desc;
3138	struct i40e_aqc_request_resource *cmd =
3139		(struct i40e_aqc_request_resource *)&desc.params.raw;
3140	i40e_status status;
3141
3142	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3143
3144	cmd->resource_id = cpu_to_le16(resource);
3145	cmd->resource_number = cpu_to_le32(sdp_number);
3146
3147	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3148
3149	return status;
3150}
3151
3152/**
3153 * i40e_aq_read_nvm
3154 * @hw: pointer to the hw struct
3155 * @module_pointer: module pointer location in words from the NVM beginning
3156 * @offset: byte offset from the module beginning
3157 * @length: length of the section to be read (in bytes from the offset)
3158 * @data: command buffer (size [bytes] = length)
3159 * @last_command: tells if this is the last command in a series
3160 * @cmd_details: pointer to command details structure or NULL
3161 *
3162 * Read the NVM using the admin queue commands
3163 **/
3164i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3165				u32 offset, u16 length, void *data,
3166				bool last_command,
3167				struct i40e_asq_cmd_details *cmd_details)
3168{
3169	struct i40e_aq_desc desc;
3170	struct i40e_aqc_nvm_update *cmd =
3171		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3172	i40e_status status;
3173
3174	/* In offset the highest byte must be zeroed. */
3175	if (offset & 0xFF000000) {
3176		status = I40E_ERR_PARAM;
3177		goto i40e_aq_read_nvm_exit;
3178	}
3179
3180	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3181
3182	/* If this is the last command in a series, set the proper flag. */
3183	if (last_command)
3184		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3185	cmd->module_pointer = module_pointer;
3186	cmd->offset = cpu_to_le32(offset);
3187	cmd->length = cpu_to_le16(length);
3188
3189	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3190	if (length > I40E_AQ_LARGE_BUF)
3191		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3192
3193	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3194
3195i40e_aq_read_nvm_exit:
3196	return status;
3197}
3198
3199/**
3200 * i40e_aq_erase_nvm
3201 * @hw: pointer to the hw struct
3202 * @module_pointer: module pointer location in words from the NVM beginning
3203 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3204 * @length: length of the section to be erased (expressed in 4 KB)
3205 * @last_command: tells if this is the last command in a series
3206 * @cmd_details: pointer to command details structure or NULL
3207 *
3208 * Erase the NVM sector using the admin queue commands
3209 **/
3210i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3211			      u32 offset, u16 length, bool last_command,
3212			      struct i40e_asq_cmd_details *cmd_details)
3213{
3214	struct i40e_aq_desc desc;
3215	struct i40e_aqc_nvm_update *cmd =
3216		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3217	i40e_status status;
3218
3219	/* In offset the highest byte must be zeroed. */
3220	if (offset & 0xFF000000) {
3221		status = I40E_ERR_PARAM;
3222		goto i40e_aq_erase_nvm_exit;
3223	}
3224
3225	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3226
3227	/* If this is the last command in a series, set the proper flag. */
3228	if (last_command)
3229		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3230	cmd->module_pointer = module_pointer;
3231	cmd->offset = cpu_to_le32(offset);
3232	cmd->length = cpu_to_le16(length);
3233
3234	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3235
3236i40e_aq_erase_nvm_exit:
3237	return status;
3238}
3239
3240/**
3241 * i40e_parse_discover_capabilities
3242 * @hw: pointer to the hw struct
3243 * @buff: pointer to a buffer containing device/function capability records
3244 * @cap_count: number of capability records in the list
3245 * @list_type_opc: type of capabilities list to parse
3246 *
3247 * Parse the device/function capabilities list.
3248 **/
3249static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3250				     u32 cap_count,
3251				     enum i40e_admin_queue_opc list_type_opc)
3252{
3253	struct i40e_aqc_list_capabilities_element_resp *cap;
3254	u32 valid_functions, num_functions;
3255	u32 number, logical_id, phys_id;
3256	struct i40e_hw_capabilities *p;
3257	u16 id, ocp_cfg_word0;
3258	i40e_status status;
3259	u8 major_rev;
3260	u32 i = 0;
3261
3262	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3263
3264	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3265		p = &hw->dev_caps;
3266	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3267		p = &hw->func_caps;
3268	else
3269		return;
3270
3271	for (i = 0; i < cap_count; i++, cap++) {
3272		id = le16_to_cpu(cap->id);
3273		number = le32_to_cpu(cap->number);
3274		logical_id = le32_to_cpu(cap->logical_id);
3275		phys_id = le32_to_cpu(cap->phys_id);
3276		major_rev = cap->major_rev;
3277
3278		switch (id) {
3279		case I40E_AQ_CAP_ID_SWITCH_MODE:
3280			p->switch_mode = number;
3281			break;
3282		case I40E_AQ_CAP_ID_MNG_MODE:
3283			p->management_mode = number;
3284			if (major_rev > 1) {
3285				p->mng_protocols_over_mctp = logical_id;
3286				i40e_debug(hw, I40E_DEBUG_INIT,
3287					   "HW Capability: Protocols over MCTP = %d\n",
3288					   p->mng_protocols_over_mctp);
3289			} else {
3290				p->mng_protocols_over_mctp = 0;
3291			}
3292			break;
3293		case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3294			p->npar_enable = number;
3295			break;
3296		case I40E_AQ_CAP_ID_OS2BMC_CAP:
3297			p->os2bmc = number;
3298			break;
3299		case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3300			p->valid_functions = number;
3301			break;
3302		case I40E_AQ_CAP_ID_SRIOV:
3303			if (number == 1)
3304				p->sr_iov_1_1 = true;
3305			break;
3306		case I40E_AQ_CAP_ID_VF:
3307			p->num_vfs = number;
3308			p->vf_base_id = logical_id;
3309			break;
3310		case I40E_AQ_CAP_ID_VMDQ:
3311			if (number == 1)
3312				p->vmdq = true;
3313			break;
3314		case I40E_AQ_CAP_ID_8021QBG:
3315			if (number == 1)
3316				p->evb_802_1_qbg = true;
3317			break;
3318		case I40E_AQ_CAP_ID_8021QBR:
3319			if (number == 1)
3320				p->evb_802_1_qbh = true;
3321			break;
3322		case I40E_AQ_CAP_ID_VSI:
3323			p->num_vsis = number;
3324			break;
3325		case I40E_AQ_CAP_ID_DCB:
3326			if (number == 1) {
3327				p->dcb = true;
3328				p->enabled_tcmap = logical_id;
3329				p->maxtc = phys_id;
3330			}
3331			break;
3332		case I40E_AQ_CAP_ID_FCOE:
3333			if (number == 1)
3334				p->fcoe = true;
3335			break;
3336		case I40E_AQ_CAP_ID_ISCSI:
3337			if (number == 1)
3338				p->iscsi = true;
3339			break;
3340		case I40E_AQ_CAP_ID_RSS:
3341			p->rss = true;
3342			p->rss_table_size = number;
3343			p->rss_table_entry_width = logical_id;
3344			break;
3345		case I40E_AQ_CAP_ID_RXQ:
3346			p->num_rx_qp = number;
3347			p->base_queue = phys_id;
3348			break;
3349		case I40E_AQ_CAP_ID_TXQ:
3350			p->num_tx_qp = number;
3351			p->base_queue = phys_id;
3352			break;
3353		case I40E_AQ_CAP_ID_MSIX:
3354			p->num_msix_vectors = number;
3355			i40e_debug(hw, I40E_DEBUG_INIT,
3356				   "HW Capability: MSIX vector count = %d\n",
3357				   p->num_msix_vectors);
3358			break;
3359		case I40E_AQ_CAP_ID_VF_MSIX:
3360			p->num_msix_vectors_vf = number;
3361			break;
3362		case I40E_AQ_CAP_ID_FLEX10:
3363			if (major_rev == 1) {
3364				if (number == 1) {
3365					p->flex10_enable = true;
3366					p->flex10_capable = true;
3367				}
3368			} else {
3369				/* Capability revision >= 2 */
3370				if (number & 1)
3371					p->flex10_enable = true;
3372				if (number & 2)
3373					p->flex10_capable = true;
3374			}
3375			p->flex10_mode = logical_id;
3376			p->flex10_status = phys_id;
3377			break;
3378		case I40E_AQ_CAP_ID_CEM:
3379			if (number == 1)
3380				p->mgmt_cem = true;
3381			break;
3382		case I40E_AQ_CAP_ID_IWARP:
3383			if (number == 1)
3384				p->iwarp = true;
3385			break;
3386		case I40E_AQ_CAP_ID_LED:
3387			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3388				p->led[phys_id] = true;
3389			break;
3390		case I40E_AQ_CAP_ID_SDP:
3391			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3392				p->sdp[phys_id] = true;
3393			break;
3394		case I40E_AQ_CAP_ID_MDIO:
3395			if (number == 1) {
3396				p->mdio_port_num = phys_id;
3397				p->mdio_port_mode = logical_id;
3398			}
3399			break;
3400		case I40E_AQ_CAP_ID_1588:
3401			if (number == 1)
3402				p->ieee_1588 = true;
3403			break;
3404		case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3405			p->fd = true;
3406			p->fd_filters_guaranteed = number;
3407			p->fd_filters_best_effort = logical_id;
3408			break;
3409		case I40E_AQ_CAP_ID_WSR_PROT:
3410			p->wr_csr_prot = (u64)number;
3411			p->wr_csr_prot |= (u64)logical_id << 32;
3412			break;
3413		case I40E_AQ_CAP_ID_NVM_MGMT:
3414			if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3415				p->sec_rev_disabled = true;
3416			if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3417				p->update_disabled = true;
3418			break;
3419		default:
3420			break;
3421		}
3422	}
3423
3424	if (p->fcoe)
3425		i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3426
3427	/* Software override ensuring FCoE is disabled if npar or mfp
3428	 * mode because it is not supported in these modes.
3429	 */
3430	if (p->npar_enable || p->flex10_enable)
3431		p->fcoe = false;
3432
3433	/* count the enabled ports (aka the "not disabled" ports) */
3434	hw->num_ports = 0;
3435	for (i = 0; i < 4; i++) {
3436		u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3437		u64 port_cfg = 0;
3438
3439		/* use AQ read to get the physical register offset instead
3440		 * of the port relative offset
3441		 */
3442		i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3443		if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3444			hw->num_ports++;
3445	}
3446
3447	/* OCP cards case: if a mezz is removed the Ethernet port is at
3448	 * disabled state in PRTGEN_CNF register. Additional NVM read is
3449	 * needed in order to check if we are dealing with OCP card.
3450	 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3451	 * physical ports results in wrong partition id calculation and thus
3452	 * not supporting WoL.
3453	 */
3454	if (hw->mac.type == I40E_MAC_X722) {
3455		if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3456			status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3457						  2 * I40E_SR_OCP_CFG_WORD0,
3458						  sizeof(ocp_cfg_word0),
3459						  &ocp_cfg_word0, true, NULL);
3460			if (!status &&
3461			    (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3462				hw->num_ports = 4;
3463			i40e_release_nvm(hw);
3464		}
3465	}
3466
3467	valid_functions = p->valid_functions;
3468	num_functions = 0;
3469	while (valid_functions) {
3470		if (valid_functions & 1)
3471			num_functions++;
3472		valid_functions >>= 1;
3473	}
3474
3475	/* partition id is 1-based, and functions are evenly spread
3476	 * across the ports as partitions
3477	 */
3478	if (hw->num_ports != 0) {
3479		hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3480		hw->num_partitions = num_functions / hw->num_ports;
3481	}
3482
3483	/* additional HW specific goodies that might
3484	 * someday be HW version specific
3485	 */
3486	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3487}
3488
3489/**
3490 * i40e_aq_discover_capabilities
3491 * @hw: pointer to the hw struct
3492 * @buff: a virtual buffer to hold the capabilities
3493 * @buff_size: Size of the virtual buffer
3494 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3495 * @list_type_opc: capabilities type to discover - pass in the command opcode
3496 * @cmd_details: pointer to command details structure or NULL
3497 *
3498 * Get the device capabilities descriptions from the firmware
3499 **/
3500i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3501				void *buff, u16 buff_size, u16 *data_size,
3502				enum i40e_admin_queue_opc list_type_opc,
3503				struct i40e_asq_cmd_details *cmd_details)
3504{
3505	struct i40e_aqc_list_capabilites *cmd;
3506	struct i40e_aq_desc desc;
3507	i40e_status status = 0;
3508
3509	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3510
3511	if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3512		list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3513		status = I40E_ERR_PARAM;
3514		goto exit;
3515	}
3516
3517	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3518
3519	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3520	if (buff_size > I40E_AQ_LARGE_BUF)
3521		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3522
3523	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3524	*data_size = le16_to_cpu(desc.datalen);
3525
3526	if (status)
3527		goto exit;
3528
3529	i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3530					 list_type_opc);
3531
3532exit:
3533	return status;
3534}
3535
3536/**
3537 * i40e_aq_update_nvm
3538 * @hw: pointer to the hw struct
3539 * @module_pointer: module pointer location in words from the NVM beginning
3540 * @offset: byte offset from the module beginning
3541 * @length: length of the section to be written (in bytes from the offset)
3542 * @data: command buffer (size [bytes] = length)
3543 * @last_command: tells if this is the last command in a series
3544 * @preservation_flags: Preservation mode flags
3545 * @cmd_details: pointer to command details structure or NULL
3546 *
3547 * Update the NVM using the admin queue commands
3548 **/
3549i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3550			       u32 offset, u16 length, void *data,
3551				bool last_command, u8 preservation_flags,
3552			       struct i40e_asq_cmd_details *cmd_details)
3553{
3554	struct i40e_aq_desc desc;
3555	struct i40e_aqc_nvm_update *cmd =
3556		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3557	i40e_status status;
3558
3559	/* In offset the highest byte must be zeroed. */
3560	if (offset & 0xFF000000) {
3561		status = I40E_ERR_PARAM;
3562		goto i40e_aq_update_nvm_exit;
3563	}
3564
3565	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3566
3567	/* If this is the last command in a series, set the proper flag. */
3568	if (last_command)
3569		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3570	if (hw->mac.type == I40E_MAC_X722) {
3571		if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3572			cmd->command_flags |=
3573				(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3574				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3575		else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3576			cmd->command_flags |=
3577				(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3578				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3579	}
3580	cmd->module_pointer = module_pointer;
3581	cmd->offset = cpu_to_le32(offset);
3582	cmd->length = cpu_to_le16(length);
3583
3584	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3585	if (length > I40E_AQ_LARGE_BUF)
3586		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3587
3588	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3589
3590i40e_aq_update_nvm_exit:
3591	return status;
3592}
3593
3594/**
3595 * i40e_aq_rearrange_nvm
3596 * @hw: pointer to the hw struct
3597 * @rearrange_nvm: defines direction of rearrangement
3598 * @cmd_details: pointer to command details structure or NULL
3599 *
3600 * Rearrange NVM structure, available only for transition FW
3601 **/
3602i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3603				  u8 rearrange_nvm,
3604				  struct i40e_asq_cmd_details *cmd_details)
3605{
3606	struct i40e_aqc_nvm_update *cmd;
3607	i40e_status status;
3608	struct i40e_aq_desc desc;
3609
3610	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3611
3612	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3613
3614	rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3615			 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3616
3617	if (!rearrange_nvm) {
3618		status = I40E_ERR_PARAM;
3619		goto i40e_aq_rearrange_nvm_exit;
3620	}
3621
3622	cmd->command_flags |= rearrange_nvm;
3623	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3624
3625i40e_aq_rearrange_nvm_exit:
3626	return status;
3627}
3628
3629/**
3630 * i40e_aq_get_lldp_mib
3631 * @hw: pointer to the hw struct
3632 * @bridge_type: type of bridge requested
3633 * @mib_type: Local, Remote or both Local and Remote MIBs
3634 * @buff: pointer to a user supplied buffer to store the MIB block
3635 * @buff_size: size of the buffer (in bytes)
3636 * @local_len : length of the returned Local LLDP MIB
3637 * @remote_len: length of the returned Remote LLDP MIB
3638 * @cmd_details: pointer to command details structure or NULL
3639 *
3640 * Requests the complete LLDP MIB (entire packet).
3641 **/
3642i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3643				u8 mib_type, void *buff, u16 buff_size,
3644				u16 *local_len, u16 *remote_len,
3645				struct i40e_asq_cmd_details *cmd_details)
3646{
3647	struct i40e_aq_desc desc;
3648	struct i40e_aqc_lldp_get_mib *cmd =
3649		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3650	struct i40e_aqc_lldp_get_mib *resp =
3651		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3652	i40e_status status;
3653
3654	if (buff_size == 0 || !buff)
3655		return I40E_ERR_PARAM;
3656
3657	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3658	/* Indirect Command */
3659	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3660
3661	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3662	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3663		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3664
3665	desc.datalen = cpu_to_le16(buff_size);
3666
3667	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3668	if (buff_size > I40E_AQ_LARGE_BUF)
3669		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3670
3671	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3672	if (!status) {
3673		if (local_len != NULL)
3674			*local_len = le16_to_cpu(resp->local_len);
3675		if (remote_len != NULL)
3676			*remote_len = le16_to_cpu(resp->remote_len);
3677	}
3678
3679	return status;
3680}
3681
3682/**
3683 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3684 * @hw: pointer to the hw struct
3685 * @mib_type: Local, Remote or both Local and Remote MIBs
3686 * @buff: pointer to a user supplied buffer to store the MIB block
3687 * @buff_size: size of the buffer (in bytes)
3688 * @cmd_details: pointer to command details structure or NULL
3689 *
3690 * Set the LLDP MIB.
3691 **/
3692enum i40e_status_code
3693i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3694		     u8 mib_type, void *buff, u16 buff_size,
3695		     struct i40e_asq_cmd_details *cmd_details)
3696{
3697	struct i40e_aqc_lldp_set_local_mib *cmd;
3698	enum i40e_status_code status;
3699	struct i40e_aq_desc desc;
3700
3701	cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3702	if (buff_size == 0 || !buff)
3703		return I40E_ERR_PARAM;
3704
3705	i40e_fill_default_direct_cmd_desc(&desc,
3706					  i40e_aqc_opc_lldp_set_local_mib);
3707	/* Indirect Command */
3708	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3709	if (buff_size > I40E_AQ_LARGE_BUF)
3710		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3711	desc.datalen = cpu_to_le16(buff_size);
3712
3713	cmd->type = mib_type;
3714	cmd->length = cpu_to_le16(buff_size);
3715	cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3716	cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3717
3718	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3719	return status;
3720}
3721
3722/**
3723 * i40e_aq_cfg_lldp_mib_change_event
3724 * @hw: pointer to the hw struct
3725 * @enable_update: Enable or Disable event posting
3726 * @cmd_details: pointer to command details structure or NULL
3727 *
3728 * Enable or Disable posting of an event on ARQ when LLDP MIB
3729 * associated with the interface changes
3730 **/
3731i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3732				bool enable_update,
3733				struct i40e_asq_cmd_details *cmd_details)
3734{
3735	struct i40e_aq_desc desc;
3736	struct i40e_aqc_lldp_update_mib *cmd =
3737		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3738	i40e_status status;
3739
3740	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3741
3742	if (!enable_update)
3743		cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3744
3745	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3746
3747	return status;
3748}
3749
3750/**
3751 * i40e_aq_restore_lldp
3752 * @hw: pointer to the hw struct
3753 * @setting: pointer to factory setting variable or NULL
3754 * @restore: True if factory settings should be restored
3755 * @cmd_details: pointer to command details structure or NULL
3756 *
3757 * Restore LLDP Agent factory settings if @restore set to True. In other case
3758 * only returns factory setting in AQ response.
3759 **/
3760enum i40e_status_code
3761i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3762		     struct i40e_asq_cmd_details *cmd_details)
3763{
3764	struct i40e_aq_desc desc;
3765	struct i40e_aqc_lldp_restore *cmd =
3766		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
3767	i40e_status status;
3768
3769	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3770		i40e_debug(hw, I40E_DEBUG_ALL,
3771			   "Restore LLDP not supported by current FW version.\n");
3772		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3773	}
3774
3775	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3776
3777	if (restore)
3778		cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3779
3780	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3781
3782	if (setting)
3783		*setting = cmd->command & 1;
3784
3785	return status;
3786}
3787
3788/**
3789 * i40e_aq_stop_lldp
3790 * @hw: pointer to the hw struct
3791 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3792 * @persist: True if stop of LLDP should be persistent across power cycles
3793 * @cmd_details: pointer to command details structure or NULL
3794 *
3795 * Stop or Shutdown the embedded LLDP Agent
3796 **/
3797i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3798				bool persist,
3799				struct i40e_asq_cmd_details *cmd_details)
3800{
3801	struct i40e_aq_desc desc;
3802	struct i40e_aqc_lldp_stop *cmd =
3803		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
3804	i40e_status status;
3805
3806	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3807
3808	if (shutdown_agent)
3809		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3810
3811	if (persist) {
3812		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3813			cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3814		else
3815			i40e_debug(hw, I40E_DEBUG_ALL,
3816				   "Persistent Stop LLDP not supported by current FW version.\n");
3817	}
3818
3819	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3820
3821	return status;
3822}
3823
3824/**
3825 * i40e_aq_start_lldp
3826 * @hw: pointer to the hw struct
 
3827 * @persist: True if start of LLDP should be persistent across power cycles
 
3828 * @cmd_details: pointer to command details structure or NULL
3829 *
3830 * Start the embedded LLDP Agent on all ports.
3831 **/
3832i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3833			       struct i40e_asq_cmd_details *cmd_details)
3834{
3835	struct i40e_aq_desc desc;
3836	struct i40e_aqc_lldp_start *cmd =
3837		(struct i40e_aqc_lldp_start *)&desc.params.raw;
3838	i40e_status status;
3839
3840	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3841
3842	cmd->command = I40E_AQ_LLDP_AGENT_START;
3843
3844	if (persist) {
3845		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3846			cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3847		else
3848			i40e_debug(hw, I40E_DEBUG_ALL,
3849				   "Persistent Start LLDP not supported by current FW version.\n");
3850	}
3851
3852	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3853
3854	return status;
3855}
3856
3857/**
3858 * i40e_aq_set_dcb_parameters
3859 * @hw: pointer to the hw struct
3860 * @cmd_details: pointer to command details structure or NULL
3861 * @dcb_enable: True if DCB configuration needs to be applied
3862 *
3863 **/
3864enum i40e_status_code
3865i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3866			   struct i40e_asq_cmd_details *cmd_details)
3867{
3868	struct i40e_aq_desc desc;
3869	struct i40e_aqc_set_dcb_parameters *cmd =
3870		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3871	i40e_status status;
3872
3873	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3874		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3875
3876	i40e_fill_default_direct_cmd_desc(&desc,
3877					  i40e_aqc_opc_set_dcb_parameters);
3878
3879	if (dcb_enable) {
3880		cmd->valid_flags = I40E_DCB_VALID;
3881		cmd->command = I40E_AQ_DCB_SET_AGENT;
3882	}
3883	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3884
3885	return status;
3886}
3887
3888/**
3889 * i40e_aq_get_cee_dcb_config
3890 * @hw: pointer to the hw struct
3891 * @buff: response buffer that stores CEE operational configuration
3892 * @buff_size: size of the buffer passed
3893 * @cmd_details: pointer to command details structure or NULL
3894 *
3895 * Get CEE DCBX mode operational configuration from firmware
3896 **/
3897i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3898				       void *buff, u16 buff_size,
3899				       struct i40e_asq_cmd_details *cmd_details)
3900{
3901	struct i40e_aq_desc desc;
3902	i40e_status status;
3903
3904	if (buff_size == 0 || !buff)
3905		return I40E_ERR_PARAM;
3906
3907	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3908
3909	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3910	status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3911				       cmd_details);
3912
3913	return status;
3914}
3915
3916/**
3917 * i40e_aq_add_udp_tunnel
3918 * @hw: pointer to the hw struct
3919 * @udp_port: the UDP port to add in Host byte order
3920 * @protocol_index: protocol index type
3921 * @filter_index: pointer to filter index
3922 * @cmd_details: pointer to command details structure or NULL
3923 *
3924 * Note: Firmware expects the udp_port value to be in Little Endian format,
3925 * and this function will call cpu_to_le16 to convert from Host byte order to
3926 * Little Endian order.
3927 **/
3928i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3929				u16 udp_port, u8 protocol_index,
3930				u8 *filter_index,
3931				struct i40e_asq_cmd_details *cmd_details)
3932{
3933	struct i40e_aq_desc desc;
3934	struct i40e_aqc_add_udp_tunnel *cmd =
3935		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3936	struct i40e_aqc_del_udp_tunnel_completion *resp =
3937		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3938	i40e_status status;
3939
3940	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3941
3942	cmd->udp_port = cpu_to_le16(udp_port);
3943	cmd->protocol_type = protocol_index;
3944
3945	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3946
3947	if (!status && filter_index)
3948		*filter_index = resp->index;
3949
3950	return status;
3951}
3952
3953/**
3954 * i40e_aq_del_udp_tunnel
3955 * @hw: pointer to the hw struct
3956 * @index: filter index
3957 * @cmd_details: pointer to command details structure or NULL
3958 **/
3959i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3960				struct i40e_asq_cmd_details *cmd_details)
3961{
3962	struct i40e_aq_desc desc;
3963	struct i40e_aqc_remove_udp_tunnel *cmd =
3964		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3965	i40e_status status;
3966
3967	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3968
3969	cmd->index = index;
3970
3971	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3972
3973	return status;
3974}
3975
3976/**
3977 * i40e_aq_delete_element - Delete switch element
3978 * @hw: pointer to the hw struct
3979 * @seid: the SEID to delete from the switch
3980 * @cmd_details: pointer to command details structure or NULL
3981 *
3982 * This deletes a switch element from the switch.
3983 **/
3984i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3985				struct i40e_asq_cmd_details *cmd_details)
3986{
3987	struct i40e_aq_desc desc;
3988	struct i40e_aqc_switch_seid *cmd =
3989		(struct i40e_aqc_switch_seid *)&desc.params.raw;
3990	i40e_status status;
3991
3992	if (seid == 0)
3993		return I40E_ERR_PARAM;
3994
3995	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3996
3997	cmd->seid = cpu_to_le16(seid);
3998
3999	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
4000					      cmd_details, true);
4001
4002	return status;
4003}
4004
4005/**
4006 * i40e_aq_dcb_updated - DCB Updated Command
4007 * @hw: pointer to the hw struct
4008 * @cmd_details: pointer to command details structure or NULL
4009 *
4010 * EMP will return when the shared RPB settings have been
4011 * recomputed and modified. The retval field in the descriptor
4012 * will be set to 0 when RPB is modified.
4013 **/
4014i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
4015				struct i40e_asq_cmd_details *cmd_details)
4016{
4017	struct i40e_aq_desc desc;
4018	i40e_status status;
4019
4020	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
4021
4022	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4023
4024	return status;
4025}
4026
4027/**
4028 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
4029 * @hw: pointer to the hw struct
4030 * @seid: seid for the physical port/switching component/vsi
4031 * @buff: Indirect buffer to hold data parameters and response
4032 * @buff_size: Indirect buffer size
4033 * @opcode: Tx scheduler AQ command opcode
4034 * @cmd_details: pointer to command details structure or NULL
4035 *
4036 * Generic command handler for Tx scheduler AQ commands
4037 **/
4038static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
4039				void *buff, u16 buff_size,
4040				 enum i40e_admin_queue_opc opcode,
4041				struct i40e_asq_cmd_details *cmd_details)
4042{
4043	struct i40e_aq_desc desc;
4044	struct i40e_aqc_tx_sched_ind *cmd =
4045		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4046	i40e_status status;
4047	bool cmd_param_flag = false;
4048
4049	switch (opcode) {
4050	case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
4051	case i40e_aqc_opc_configure_vsi_tc_bw:
4052	case i40e_aqc_opc_enable_switching_comp_ets:
4053	case i40e_aqc_opc_modify_switching_comp_ets:
4054	case i40e_aqc_opc_disable_switching_comp_ets:
4055	case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
4056	case i40e_aqc_opc_configure_switching_comp_bw_config:
4057		cmd_param_flag = true;
4058		break;
4059	case i40e_aqc_opc_query_vsi_bw_config:
4060	case i40e_aqc_opc_query_vsi_ets_sla_config:
4061	case i40e_aqc_opc_query_switching_comp_ets_config:
4062	case i40e_aqc_opc_query_port_ets_config:
4063	case i40e_aqc_opc_query_switching_comp_bw_config:
4064		cmd_param_flag = false;
4065		break;
4066	default:
4067		return I40E_ERR_PARAM;
4068	}
4069
4070	i40e_fill_default_direct_cmd_desc(&desc, opcode);
4071
4072	/* Indirect command */
4073	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4074	if (cmd_param_flag)
4075		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4076	if (buff_size > I40E_AQ_LARGE_BUF)
4077		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4078
4079	desc.datalen = cpu_to_le16(buff_size);
4080
4081	cmd->vsi_seid = cpu_to_le16(seid);
4082
4083	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4084
4085	return status;
4086}
4087
4088/**
4089 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
4090 * @hw: pointer to the hw struct
4091 * @seid: VSI seid
4092 * @credit: BW limit credits (0 = disabled)
4093 * @max_credit: Max BW limit credits
4094 * @cmd_details: pointer to command details structure or NULL
4095 **/
4096i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
4097				u16 seid, u16 credit, u8 max_credit,
4098				struct i40e_asq_cmd_details *cmd_details)
4099{
4100	struct i40e_aq_desc desc;
4101	struct i40e_aqc_configure_vsi_bw_limit *cmd =
4102		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
4103	i40e_status status;
4104
4105	i40e_fill_default_direct_cmd_desc(&desc,
4106					  i40e_aqc_opc_configure_vsi_bw_limit);
4107
4108	cmd->vsi_seid = cpu_to_le16(seid);
4109	cmd->credit = cpu_to_le16(credit);
4110	cmd->max_credit = max_credit;
4111
4112	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4113
4114	return status;
4115}
4116
4117/**
4118 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4119 * @hw: pointer to the hw struct
4120 * @seid: VSI seid
4121 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4122 * @cmd_details: pointer to command details structure or NULL
4123 **/
4124i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4125			u16 seid,
4126			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4127			struct i40e_asq_cmd_details *cmd_details)
4128{
4129	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4130				    i40e_aqc_opc_configure_vsi_tc_bw,
4131				    cmd_details);
4132}
4133
4134/**
4135 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4136 * @hw: pointer to the hw struct
4137 * @seid: seid of the switching component connected to Physical Port
4138 * @ets_data: Buffer holding ETS parameters
4139 * @opcode: Tx scheduler AQ command opcode
4140 * @cmd_details: pointer to command details structure or NULL
4141 **/
4142i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4143		u16 seid,
4144		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4145		enum i40e_admin_queue_opc opcode,
4146		struct i40e_asq_cmd_details *cmd_details)
4147{
4148	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4149				    sizeof(*ets_data), opcode, cmd_details);
4150}
4151
4152/**
4153 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4154 * @hw: pointer to the hw struct
4155 * @seid: seid of the switching component
4156 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4157 * @cmd_details: pointer to command details structure or NULL
4158 **/
4159i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4160	u16 seid,
4161	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4162	struct i40e_asq_cmd_details *cmd_details)
4163{
4164	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4165			    i40e_aqc_opc_configure_switching_comp_bw_config,
4166			    cmd_details);
4167}
4168
4169/**
4170 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4171 * @hw: pointer to the hw struct
4172 * @seid: seid of the VSI
4173 * @bw_data: Buffer to hold VSI BW configuration
4174 * @cmd_details: pointer to command details structure or NULL
4175 **/
4176i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4177			u16 seid,
4178			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4179			struct i40e_asq_cmd_details *cmd_details)
4180{
4181	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4182				    i40e_aqc_opc_query_vsi_bw_config,
4183				    cmd_details);
4184}
4185
4186/**
4187 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4188 * @hw: pointer to the hw struct
4189 * @seid: seid of the VSI
4190 * @bw_data: Buffer to hold VSI BW configuration per TC
4191 * @cmd_details: pointer to command details structure or NULL
4192 **/
4193i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4194			u16 seid,
4195			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4196			struct i40e_asq_cmd_details *cmd_details)
4197{
4198	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4199				    i40e_aqc_opc_query_vsi_ets_sla_config,
4200				    cmd_details);
4201}
4202
4203/**
4204 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4205 * @hw: pointer to the hw struct
4206 * @seid: seid of the switching component
4207 * @bw_data: Buffer to hold switching component's per TC BW config
4208 * @cmd_details: pointer to command details structure or NULL
4209 **/
4210i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4211		u16 seid,
4212		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4213		struct i40e_asq_cmd_details *cmd_details)
4214{
4215	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4216				   i40e_aqc_opc_query_switching_comp_ets_config,
4217				   cmd_details);
4218}
4219
4220/**
4221 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4222 * @hw: pointer to the hw struct
4223 * @seid: seid of the VSI or switching component connected to Physical Port
4224 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4225 * @cmd_details: pointer to command details structure or NULL
4226 **/
4227i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4228			u16 seid,
4229			struct i40e_aqc_query_port_ets_config_resp *bw_data,
4230			struct i40e_asq_cmd_details *cmd_details)
4231{
4232	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4233				    i40e_aqc_opc_query_port_ets_config,
4234				    cmd_details);
4235}
4236
4237/**
4238 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4239 * @hw: pointer to the hw struct
4240 * @seid: seid of the switching component
4241 * @bw_data: Buffer to hold switching component's BW configuration
4242 * @cmd_details: pointer to command details structure or NULL
4243 **/
4244i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4245		u16 seid,
4246		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4247		struct i40e_asq_cmd_details *cmd_details)
4248{
4249	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4250				    i40e_aqc_opc_query_switching_comp_bw_config,
4251				    cmd_details);
4252}
4253
4254/**
4255 * i40e_validate_filter_settings
4256 * @hw: pointer to the hardware structure
4257 * @settings: Filter control settings
4258 *
4259 * Check and validate the filter control settings passed.
4260 * The function checks for the valid filter/context sizes being
4261 * passed for FCoE and PE.
4262 *
4263 * Returns 0 if the values passed are valid and within
4264 * range else returns an error.
4265 **/
4266static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4267				struct i40e_filter_control_settings *settings)
4268{
4269	u32 fcoe_cntx_size, fcoe_filt_size;
 
4270	u32 fcoe_fmax;
4271	u32 val;
4272
4273	/* Validate FCoE settings passed */
4274	switch (settings->fcoe_filt_num) {
4275	case I40E_HASH_FILTER_SIZE_1K:
4276	case I40E_HASH_FILTER_SIZE_2K:
4277	case I40E_HASH_FILTER_SIZE_4K:
4278	case I40E_HASH_FILTER_SIZE_8K:
4279	case I40E_HASH_FILTER_SIZE_16K:
4280	case I40E_HASH_FILTER_SIZE_32K:
4281		fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4282		fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4283		break;
4284	default:
4285		return I40E_ERR_PARAM;
4286	}
4287
4288	switch (settings->fcoe_cntx_num) {
4289	case I40E_DMA_CNTX_SIZE_512:
4290	case I40E_DMA_CNTX_SIZE_1K:
4291	case I40E_DMA_CNTX_SIZE_2K:
4292	case I40E_DMA_CNTX_SIZE_4K:
4293		fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4294		fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4295		break;
4296	default:
4297		return I40E_ERR_PARAM;
4298	}
4299
4300	/* Validate PE settings passed */
4301	switch (settings->pe_filt_num) {
4302	case I40E_HASH_FILTER_SIZE_1K:
4303	case I40E_HASH_FILTER_SIZE_2K:
4304	case I40E_HASH_FILTER_SIZE_4K:
4305	case I40E_HASH_FILTER_SIZE_8K:
4306	case I40E_HASH_FILTER_SIZE_16K:
4307	case I40E_HASH_FILTER_SIZE_32K:
4308	case I40E_HASH_FILTER_SIZE_64K:
4309	case I40E_HASH_FILTER_SIZE_128K:
4310	case I40E_HASH_FILTER_SIZE_256K:
4311	case I40E_HASH_FILTER_SIZE_512K:
4312	case I40E_HASH_FILTER_SIZE_1M:
 
 
4313		break;
4314	default:
4315		return I40E_ERR_PARAM;
4316	}
4317
4318	switch (settings->pe_cntx_num) {
4319	case I40E_DMA_CNTX_SIZE_512:
4320	case I40E_DMA_CNTX_SIZE_1K:
4321	case I40E_DMA_CNTX_SIZE_2K:
4322	case I40E_DMA_CNTX_SIZE_4K:
4323	case I40E_DMA_CNTX_SIZE_8K:
4324	case I40E_DMA_CNTX_SIZE_16K:
4325	case I40E_DMA_CNTX_SIZE_32K:
4326	case I40E_DMA_CNTX_SIZE_64K:
4327	case I40E_DMA_CNTX_SIZE_128K:
4328	case I40E_DMA_CNTX_SIZE_256K:
 
 
4329		break;
4330	default:
4331		return I40E_ERR_PARAM;
4332	}
4333
4334	/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4335	val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4336	fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4337		     >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4338	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
4339		return I40E_ERR_INVALID_SIZE;
4340
4341	return 0;
4342}
4343
4344/**
4345 * i40e_set_filter_control
4346 * @hw: pointer to the hardware structure
4347 * @settings: Filter control settings
4348 *
4349 * Set the Queue Filters for PE/FCoE and enable filters required
4350 * for a single PF. It is expected that these settings are programmed
4351 * at the driver initialization time.
4352 **/
4353i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4354				struct i40e_filter_control_settings *settings)
4355{
4356	i40e_status ret = 0;
4357	u32 hash_lut_size = 0;
4358	u32 val;
4359
4360	if (!settings)
4361		return I40E_ERR_PARAM;
4362
4363	/* Validate the input settings */
4364	ret = i40e_validate_filter_settings(hw, settings);
4365	if (ret)
4366		return ret;
4367
4368	/* Read the PF Queue Filter control register */
4369	val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4370
4371	/* Program required PE hash buckets for the PF */
4372	val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4373	val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4374		I40E_PFQF_CTL_0_PEHSIZE_MASK;
4375	/* Program required PE contexts for the PF */
4376	val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4377	val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4378		I40E_PFQF_CTL_0_PEDSIZE_MASK;
4379
4380	/* Program required FCoE hash buckets for the PF */
4381	val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4382	val |= ((u32)settings->fcoe_filt_num <<
4383			I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4384		I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4385	/* Program required FCoE DDP contexts for the PF */
4386	val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4387	val |= ((u32)settings->fcoe_cntx_num <<
4388			I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4389		I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4390
4391	/* Program Hash LUT size for the PF */
4392	val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4393	if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4394		hash_lut_size = 1;
4395	val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4396		I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4397
4398	/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4399	if (settings->enable_fdir)
4400		val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4401	if (settings->enable_ethtype)
4402		val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4403	if (settings->enable_macvlan)
4404		val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4405
4406	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4407
4408	return 0;
4409}
4410
4411/**
4412 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4413 * @hw: pointer to the hw struct
4414 * @mac_addr: MAC address to use in the filter
4415 * @ethtype: Ethertype to use in the filter
4416 * @flags: Flags that needs to be applied to the filter
4417 * @vsi_seid: seid of the control VSI
4418 * @queue: VSI queue number to send the packet to
4419 * @is_add: Add control packet filter if True else remove
4420 * @stats: Structure to hold information on control filter counts
4421 * @cmd_details: pointer to command details structure or NULL
4422 *
4423 * This command will Add or Remove control packet filter for a control VSI.
4424 * In return it will update the total number of perfect filter count in
4425 * the stats member.
4426 **/
4427i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4428				u8 *mac_addr, u16 ethtype, u16 flags,
4429				u16 vsi_seid, u16 queue, bool is_add,
4430				struct i40e_control_filter_stats *stats,
4431				struct i40e_asq_cmd_details *cmd_details)
4432{
4433	struct i40e_aq_desc desc;
4434	struct i40e_aqc_add_remove_control_packet_filter *cmd =
4435		(struct i40e_aqc_add_remove_control_packet_filter *)
4436		&desc.params.raw;
4437	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4438		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
4439		&desc.params.raw;
4440	i40e_status status;
4441
4442	if (vsi_seid == 0)
4443		return I40E_ERR_PARAM;
4444
4445	if (is_add) {
4446		i40e_fill_default_direct_cmd_desc(&desc,
4447				i40e_aqc_opc_add_control_packet_filter);
4448		cmd->queue = cpu_to_le16(queue);
4449	} else {
4450		i40e_fill_default_direct_cmd_desc(&desc,
4451				i40e_aqc_opc_remove_control_packet_filter);
4452	}
4453
4454	if (mac_addr)
4455		ether_addr_copy(cmd->mac, mac_addr);
4456
4457	cmd->etype = cpu_to_le16(ethtype);
4458	cmd->flags = cpu_to_le16(flags);
4459	cmd->seid = cpu_to_le16(vsi_seid);
4460
4461	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4462
4463	if (!status && stats) {
4464		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4465		stats->etype_used = le16_to_cpu(resp->etype_used);
4466		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4467		stats->etype_free = le16_to_cpu(resp->etype_free);
4468	}
4469
4470	return status;
4471}
4472
4473/**
4474 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4475 * @hw: pointer to the hw struct
4476 * @seid: VSI seid to add ethertype filter from
4477 **/
4478void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4479						    u16 seid)
4480{
4481#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4482	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4483		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4484		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4485	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4486	i40e_status status;
4487
4488	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4489						       seid, 0, true, NULL,
4490						       NULL);
4491	if (status)
4492		hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4493}
4494
4495/**
4496 * i40e_aq_alternate_read
4497 * @hw: pointer to the hardware structure
4498 * @reg_addr0: address of first dword to be read
4499 * @reg_val0: pointer for data read from 'reg_addr0'
4500 * @reg_addr1: address of second dword to be read
4501 * @reg_val1: pointer for data read from 'reg_addr1'
4502 *
4503 * Read one or two dwords from alternate structure. Fields are indicated
4504 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4505 * is not passed then only register at 'reg_addr0' is read.
4506 *
4507 **/
4508static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4509					  u32 reg_addr0, u32 *reg_val0,
4510					  u32 reg_addr1, u32 *reg_val1)
4511{
4512	struct i40e_aq_desc desc;
4513	struct i40e_aqc_alternate_write *cmd_resp =
4514		(struct i40e_aqc_alternate_write *)&desc.params.raw;
4515	i40e_status status;
4516
4517	if (!reg_val0)
4518		return I40E_ERR_PARAM;
4519
4520	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4521	cmd_resp->address0 = cpu_to_le32(reg_addr0);
4522	cmd_resp->address1 = cpu_to_le32(reg_addr1);
4523
4524	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4525
4526	if (!status) {
4527		*reg_val0 = le32_to_cpu(cmd_resp->data0);
4528
4529		if (reg_val1)
4530			*reg_val1 = le32_to_cpu(cmd_resp->data1);
4531	}
4532
4533	return status;
4534}
4535
4536/**
4537 * i40e_aq_suspend_port_tx
4538 * @hw: pointer to the hardware structure
4539 * @seid: port seid
4540 * @cmd_details: pointer to command details structure or NULL
4541 *
4542 * Suspend port's Tx traffic
4543 **/
4544i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4545				    struct i40e_asq_cmd_details *cmd_details)
4546{
4547	struct i40e_aqc_tx_sched_ind *cmd;
4548	struct i40e_aq_desc desc;
4549	i40e_status status;
4550
4551	cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4552	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4553	cmd->vsi_seid = cpu_to_le16(seid);
4554	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4555
4556	return status;
4557}
4558
4559/**
4560 * i40e_aq_resume_port_tx
4561 * @hw: pointer to the hardware structure
4562 * @cmd_details: pointer to command details structure or NULL
4563 *
4564 * Resume port's Tx traffic
4565 **/
4566i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4567				   struct i40e_asq_cmd_details *cmd_details)
4568{
4569	struct i40e_aq_desc desc;
4570	i40e_status status;
4571
4572	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4573
4574	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4575
4576	return status;
4577}
4578
4579/**
4580 * i40e_set_pci_config_data - store PCI bus info
4581 * @hw: pointer to hardware structure
4582 * @link_status: the link status word from PCI config space
4583 *
4584 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4585 **/
4586void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4587{
4588	hw->bus.type = i40e_bus_type_pci_express;
4589
4590	switch (link_status & PCI_EXP_LNKSTA_NLW) {
4591	case PCI_EXP_LNKSTA_NLW_X1:
4592		hw->bus.width = i40e_bus_width_pcie_x1;
4593		break;
4594	case PCI_EXP_LNKSTA_NLW_X2:
4595		hw->bus.width = i40e_bus_width_pcie_x2;
4596		break;
4597	case PCI_EXP_LNKSTA_NLW_X4:
4598		hw->bus.width = i40e_bus_width_pcie_x4;
4599		break;
4600	case PCI_EXP_LNKSTA_NLW_X8:
4601		hw->bus.width = i40e_bus_width_pcie_x8;
4602		break;
4603	default:
4604		hw->bus.width = i40e_bus_width_unknown;
4605		break;
4606	}
4607
4608	switch (link_status & PCI_EXP_LNKSTA_CLS) {
4609	case PCI_EXP_LNKSTA_CLS_2_5GB:
4610		hw->bus.speed = i40e_bus_speed_2500;
4611		break;
4612	case PCI_EXP_LNKSTA_CLS_5_0GB:
4613		hw->bus.speed = i40e_bus_speed_5000;
4614		break;
4615	case PCI_EXP_LNKSTA_CLS_8_0GB:
4616		hw->bus.speed = i40e_bus_speed_8000;
4617		break;
4618	default:
4619		hw->bus.speed = i40e_bus_speed_unknown;
4620		break;
4621	}
4622}
4623
4624/**
4625 * i40e_aq_debug_dump
4626 * @hw: pointer to the hardware structure
4627 * @cluster_id: specific cluster to dump
4628 * @table_id: table id within cluster
4629 * @start_index: index of line in the block to read
4630 * @buff_size: dump buffer size
4631 * @buff: dump buffer
4632 * @ret_buff_size: actual buffer size returned
4633 * @ret_next_table: next block to read
4634 * @ret_next_index: next index to read
4635 * @cmd_details: pointer to command details structure or NULL
4636 *
4637 * Dump internal FW/HW data for debug purposes.
4638 *
4639 **/
4640i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4641			       u8 table_id, u32 start_index, u16 buff_size,
4642			       void *buff, u16 *ret_buff_size,
4643			       u8 *ret_next_table, u32 *ret_next_index,
4644			       struct i40e_asq_cmd_details *cmd_details)
4645{
4646	struct i40e_aq_desc desc;
4647	struct i40e_aqc_debug_dump_internals *cmd =
4648		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4649	struct i40e_aqc_debug_dump_internals *resp =
4650		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4651	i40e_status status;
4652
4653	if (buff_size == 0 || !buff)
4654		return I40E_ERR_PARAM;
4655
4656	i40e_fill_default_direct_cmd_desc(&desc,
4657					  i40e_aqc_opc_debug_dump_internals);
4658	/* Indirect Command */
4659	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4660	if (buff_size > I40E_AQ_LARGE_BUF)
4661		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4662
4663	cmd->cluster_id = cluster_id;
4664	cmd->table_id = table_id;
4665	cmd->idx = cpu_to_le32(start_index);
4666
4667	desc.datalen = cpu_to_le16(buff_size);
4668
4669	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4670	if (!status) {
4671		if (ret_buff_size)
4672			*ret_buff_size = le16_to_cpu(desc.datalen);
4673		if (ret_next_table)
4674			*ret_next_table = resp->table_id;
4675		if (ret_next_index)
4676			*ret_next_index = le32_to_cpu(resp->idx);
4677	}
4678
4679	return status;
4680}
4681
4682/**
4683 * i40e_read_bw_from_alt_ram
4684 * @hw: pointer to the hardware structure
4685 * @max_bw: pointer for max_bw read
4686 * @min_bw: pointer for min_bw read
4687 * @min_valid: pointer for bool that is true if min_bw is a valid value
4688 * @max_valid: pointer for bool that is true if max_bw is a valid value
4689 *
4690 * Read bw from the alternate ram for the given pf
4691 **/
4692i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4693				      u32 *max_bw, u32 *min_bw,
4694				      bool *min_valid, bool *max_valid)
4695{
4696	i40e_status status;
4697	u32 max_bw_addr, min_bw_addr;
4698
4699	/* Calculate the address of the min/max bw registers */
4700	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4701		      I40E_ALT_STRUCT_MAX_BW_OFFSET +
4702		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4703	min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4704		      I40E_ALT_STRUCT_MIN_BW_OFFSET +
4705		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4706
4707	/* Read the bandwidths from alt ram */
4708	status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4709					min_bw_addr, min_bw);
4710
4711	if (*min_bw & I40E_ALT_BW_VALID_MASK)
4712		*min_valid = true;
4713	else
4714		*min_valid = false;
4715
4716	if (*max_bw & I40E_ALT_BW_VALID_MASK)
4717		*max_valid = true;
4718	else
4719		*max_valid = false;
4720
4721	return status;
4722}
4723
4724/**
4725 * i40e_aq_configure_partition_bw
4726 * @hw: pointer to the hardware structure
4727 * @bw_data: Buffer holding valid pfs and bw limits
4728 * @cmd_details: pointer to command details
4729 *
4730 * Configure partitions guaranteed/max bw
4731 **/
4732i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4733			struct i40e_aqc_configure_partition_bw_data *bw_data,
4734			struct i40e_asq_cmd_details *cmd_details)
4735{
4736	i40e_status status;
4737	struct i40e_aq_desc desc;
4738	u16 bwd_size = sizeof(*bw_data);
4739
4740	i40e_fill_default_direct_cmd_desc(&desc,
4741					  i40e_aqc_opc_configure_partition_bw);
4742
4743	/* Indirect command */
4744	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4745	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4746
4747	if (bwd_size > I40E_AQ_LARGE_BUF)
4748		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4749
4750	desc.datalen = cpu_to_le16(bwd_size);
4751
4752	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4753				       cmd_details);
4754
4755	return status;
4756}
4757
4758/**
4759 * i40e_read_phy_register_clause22
4760 * @hw: pointer to the HW structure
4761 * @reg: register address in the page
4762 * @phy_addr: PHY address on MDIO interface
4763 * @value: PHY register value
4764 *
4765 * Reads specified PHY register value
4766 **/
4767i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4768					    u16 reg, u8 phy_addr, u16 *value)
4769{
4770	i40e_status status = I40E_ERR_TIMEOUT;
4771	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4772	u32 command = 0;
4773	u16 retry = 1000;
4774
4775	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4776		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4777		  (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4778		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4779		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4780	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4781	do {
4782		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4783		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4784			status = 0;
4785			break;
4786		}
4787		udelay(10);
4788		retry--;
4789	} while (retry);
4790
4791	if (status) {
4792		i40e_debug(hw, I40E_DEBUG_PHY,
4793			   "PHY: Can't write command to external PHY.\n");
4794	} else {
4795		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4796		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4797			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4798	}
4799
4800	return status;
4801}
4802
4803/**
4804 * i40e_write_phy_register_clause22
4805 * @hw: pointer to the HW structure
4806 * @reg: register address in the page
4807 * @phy_addr: PHY address on MDIO interface
4808 * @value: PHY register value
4809 *
4810 * Writes specified PHY register value
4811 **/
4812i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4813					     u16 reg, u8 phy_addr, u16 value)
4814{
4815	i40e_status status = I40E_ERR_TIMEOUT;
4816	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4817	u32 command  = 0;
4818	u16 retry = 1000;
4819
4820	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4821	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4822
4823	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4824		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4825		  (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4826		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4827		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4828
4829	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4830	do {
4831		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4832		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4833			status = 0;
4834			break;
4835		}
4836		udelay(10);
4837		retry--;
4838	} while (retry);
4839
4840	return status;
4841}
4842
4843/**
4844 * i40e_read_phy_register_clause45
4845 * @hw: pointer to the HW structure
4846 * @page: registers page number
4847 * @reg: register address in the page
4848 * @phy_addr: PHY address on MDIO interface
4849 * @value: PHY register value
4850 *
4851 * Reads specified PHY register value
4852 **/
4853i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4854				u8 page, u16 reg, u8 phy_addr, u16 *value)
4855{
4856	i40e_status status = I40E_ERR_TIMEOUT;
4857	u32 command = 0;
4858	u16 retry = 1000;
4859	u8 port_num = hw->func_caps.mdio_port_num;
4860
4861	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4862		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4863		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4864		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4865		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4866		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4867		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4868	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4869	do {
4870		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4871		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4872			status = 0;
4873			break;
4874		}
4875		usleep_range(10, 20);
4876		retry--;
4877	} while (retry);
4878
4879	if (status) {
4880		i40e_debug(hw, I40E_DEBUG_PHY,
4881			   "PHY: Can't write command to external PHY.\n");
4882		goto phy_read_end;
4883	}
4884
4885	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4886		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4887		  (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4888		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4889		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4890		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4891	status = I40E_ERR_TIMEOUT;
4892	retry = 1000;
4893	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4894	do {
4895		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4896		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4897			status = 0;
4898			break;
4899		}
4900		usleep_range(10, 20);
4901		retry--;
4902	} while (retry);
4903
4904	if (!status) {
4905		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4906		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4907			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4908	} else {
4909		i40e_debug(hw, I40E_DEBUG_PHY,
4910			   "PHY: Can't read register value from external PHY.\n");
4911	}
4912
4913phy_read_end:
4914	return status;
4915}
4916
4917/**
4918 * i40e_write_phy_register_clause45
4919 * @hw: pointer to the HW structure
4920 * @page: registers page number
4921 * @reg: register address in the page
4922 * @phy_addr: PHY address on MDIO interface
4923 * @value: PHY register value
4924 *
4925 * Writes value to specified PHY register
4926 **/
4927i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4928				u8 page, u16 reg, u8 phy_addr, u16 value)
4929{
4930	i40e_status status = I40E_ERR_TIMEOUT;
4931	u32 command = 0;
4932	u16 retry = 1000;
4933	u8 port_num = hw->func_caps.mdio_port_num;
4934
4935	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4936		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4937		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4938		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4939		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4940		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4941		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4942	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4943	do {
4944		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4945		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4946			status = 0;
4947			break;
4948		}
4949		usleep_range(10, 20);
4950		retry--;
4951	} while (retry);
4952	if (status) {
4953		i40e_debug(hw, I40E_DEBUG_PHY,
4954			   "PHY: Can't write command to external PHY.\n");
4955		goto phy_write_end;
4956	}
4957
4958	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4959	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4960
4961	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4962		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4963		  (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4964		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4965		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4966		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4967	status = I40E_ERR_TIMEOUT;
4968	retry = 1000;
4969	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4970	do {
4971		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4972		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4973			status = 0;
4974			break;
4975		}
4976		usleep_range(10, 20);
4977		retry--;
4978	} while (retry);
4979
4980phy_write_end:
4981	return status;
4982}
4983
4984/**
4985 * i40e_write_phy_register
4986 * @hw: pointer to the HW structure
4987 * @page: registers page number
4988 * @reg: register address in the page
4989 * @phy_addr: PHY address on MDIO interface
4990 * @value: PHY register value
4991 *
4992 * Writes value to specified PHY register
4993 **/
4994i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4995				    u8 page, u16 reg, u8 phy_addr, u16 value)
4996{
4997	i40e_status status;
4998
4999	switch (hw->device_id) {
5000	case I40E_DEV_ID_1G_BASE_T_X722:
5001		status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
5002							  value);
5003		break;
5004	case I40E_DEV_ID_1G_BASE_T_BC:
5005	case I40E_DEV_ID_5G_BASE_T_BC:
5006	case I40E_DEV_ID_10G_BASE_T:
5007	case I40E_DEV_ID_10G_BASE_T4:
5008	case I40E_DEV_ID_10G_BASE_T_BC:
5009	case I40E_DEV_ID_10G_BASE_T_X722:
5010	case I40E_DEV_ID_25G_B:
5011	case I40E_DEV_ID_25G_SFP28:
5012		status = i40e_write_phy_register_clause45(hw, page, reg,
5013							  phy_addr, value);
5014		break;
5015	default:
5016		status = I40E_ERR_UNKNOWN_PHY;
5017		break;
5018	}
5019
5020	return status;
5021}
5022
5023/**
5024 * i40e_read_phy_register
5025 * @hw: pointer to the HW structure
5026 * @page: registers page number
5027 * @reg: register address in the page
5028 * @phy_addr: PHY address on MDIO interface
5029 * @value: PHY register value
5030 *
5031 * Reads specified PHY register value
5032 **/
5033i40e_status i40e_read_phy_register(struct i40e_hw *hw,
5034				   u8 page, u16 reg, u8 phy_addr, u16 *value)
5035{
5036	i40e_status status;
5037
5038	switch (hw->device_id) {
5039	case I40E_DEV_ID_1G_BASE_T_X722:
5040		status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
5041							 value);
5042		break;
5043	case I40E_DEV_ID_1G_BASE_T_BC:
5044	case I40E_DEV_ID_5G_BASE_T_BC:
5045	case I40E_DEV_ID_10G_BASE_T:
5046	case I40E_DEV_ID_10G_BASE_T4:
5047	case I40E_DEV_ID_10G_BASE_T_BC:
5048	case I40E_DEV_ID_10G_BASE_T_X722:
5049	case I40E_DEV_ID_25G_B:
5050	case I40E_DEV_ID_25G_SFP28:
5051		status = i40e_read_phy_register_clause45(hw, page, reg,
5052							 phy_addr, value);
5053		break;
5054	default:
5055		status = I40E_ERR_UNKNOWN_PHY;
5056		break;
5057	}
5058
5059	return status;
5060}
5061
5062/**
5063 * i40e_get_phy_address
5064 * @hw: pointer to the HW structure
5065 * @dev_num: PHY port num that address we want
5066 *
5067 * Gets PHY address for current port
5068 **/
5069u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
5070{
5071	u8 port_num = hw->func_caps.mdio_port_num;
5072	u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
5073
5074	return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
5075}
5076
5077/**
5078 * i40e_blink_phy_link_led
5079 * @hw: pointer to the HW structure
5080 * @time: time how long led will blinks in secs
5081 * @interval: gap between LED on and off in msecs
5082 *
5083 * Blinks PHY link LED
5084 **/
5085i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
5086				    u32 time, u32 interval)
5087{
5088	i40e_status status = 0;
5089	u32 i;
5090	u16 led_ctl;
5091	u16 gpio_led_port;
5092	u16 led_reg;
5093	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
5094	u8 phy_addr = 0;
5095	u8 port_num;
5096
5097	i = rd32(hw, I40E_PFGEN_PORTNUM);
5098	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5099	phy_addr = i40e_get_phy_address(hw, port_num);
5100
5101	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5102	     led_addr++) {
5103		status = i40e_read_phy_register_clause45(hw,
5104							 I40E_PHY_COM_REG_PAGE,
5105							 led_addr, phy_addr,
5106							 &led_reg);
5107		if (status)
5108			goto phy_blinking_end;
5109		led_ctl = led_reg;
5110		if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5111			led_reg = 0;
5112			status = i40e_write_phy_register_clause45(hw,
5113							 I40E_PHY_COM_REG_PAGE,
5114							 led_addr, phy_addr,
5115							 led_reg);
5116			if (status)
5117				goto phy_blinking_end;
5118			break;
5119		}
5120	}
5121
5122	if (time > 0 && interval > 0) {
5123		for (i = 0; i < time * 1000; i += interval) {
5124			status = i40e_read_phy_register_clause45(hw,
5125						I40E_PHY_COM_REG_PAGE,
5126						led_addr, phy_addr, &led_reg);
5127			if (status)
5128				goto restore_config;
5129			if (led_reg & I40E_PHY_LED_MANUAL_ON)
5130				led_reg = 0;
5131			else
5132				led_reg = I40E_PHY_LED_MANUAL_ON;
5133			status = i40e_write_phy_register_clause45(hw,
5134						I40E_PHY_COM_REG_PAGE,
5135						led_addr, phy_addr, led_reg);
5136			if (status)
5137				goto restore_config;
5138			msleep(interval);
5139		}
5140	}
5141
5142restore_config:
5143	status = i40e_write_phy_register_clause45(hw,
5144						  I40E_PHY_COM_REG_PAGE,
5145						  led_addr, phy_addr, led_ctl);
5146
5147phy_blinking_end:
5148	return status;
5149}
5150
5151/**
5152 * i40e_led_get_reg - read LED register
5153 * @hw: pointer to the HW structure
5154 * @led_addr: LED register address
5155 * @reg_val: read register value
5156 **/
5157static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5158					      u32 *reg_val)
5159{
5160	enum i40e_status_code status;
5161	u8 phy_addr = 0;
5162	u8 port_num;
5163	u32 i;
5164
5165	*reg_val = 0;
5166	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5167		status =
5168		       i40e_aq_get_phy_register(hw,
5169						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5170						I40E_PHY_COM_REG_PAGE, true,
5171						I40E_PHY_LED_PROV_REG_1,
5172						reg_val, NULL);
5173	} else {
5174		i = rd32(hw, I40E_PFGEN_PORTNUM);
5175		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5176		phy_addr = i40e_get_phy_address(hw, port_num);
5177		status = i40e_read_phy_register_clause45(hw,
5178							 I40E_PHY_COM_REG_PAGE,
5179							 led_addr, phy_addr,
5180							 (u16 *)reg_val);
5181	}
5182	return status;
5183}
5184
5185/**
5186 * i40e_led_set_reg - write LED register
5187 * @hw: pointer to the HW structure
5188 * @led_addr: LED register address
5189 * @reg_val: register value to write
5190 **/
5191static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5192					      u32 reg_val)
5193{
5194	enum i40e_status_code status;
5195	u8 phy_addr = 0;
5196	u8 port_num;
5197	u32 i;
5198
5199	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5200		status =
5201		       i40e_aq_set_phy_register(hw,
5202						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5203						I40E_PHY_COM_REG_PAGE, true,
5204						I40E_PHY_LED_PROV_REG_1,
5205						reg_val, NULL);
5206	} else {
5207		i = rd32(hw, I40E_PFGEN_PORTNUM);
5208		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5209		phy_addr = i40e_get_phy_address(hw, port_num);
5210		status = i40e_write_phy_register_clause45(hw,
5211							  I40E_PHY_COM_REG_PAGE,
5212							  led_addr, phy_addr,
5213							  (u16)reg_val);
5214	}
5215
5216	return status;
5217}
5218
5219/**
5220 * i40e_led_get_phy - return current on/off mode
5221 * @hw: pointer to the hw struct
5222 * @led_addr: address of led register to use
5223 * @val: original value of register to use
5224 *
5225 **/
5226i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5227			     u16 *val)
5228{
5229	i40e_status status = 0;
5230	u16 gpio_led_port;
5231	u8 phy_addr = 0;
5232	u16 reg_val;
5233	u16 temp_addr;
5234	u8 port_num;
5235	u32 i;
5236	u32 reg_val_aq;
5237
5238	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5239		status =
5240		      i40e_aq_get_phy_register(hw,
5241					       I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5242					       I40E_PHY_COM_REG_PAGE, true,
5243					       I40E_PHY_LED_PROV_REG_1,
5244					       &reg_val_aq, NULL);
5245		if (status == I40E_SUCCESS)
5246			*val = (u16)reg_val_aq;
5247		return status;
5248	}
5249	temp_addr = I40E_PHY_LED_PROV_REG_1;
5250	i = rd32(hw, I40E_PFGEN_PORTNUM);
5251	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5252	phy_addr = i40e_get_phy_address(hw, port_num);
5253
5254	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5255	     temp_addr++) {
5256		status = i40e_read_phy_register_clause45(hw,
5257							 I40E_PHY_COM_REG_PAGE,
5258							 temp_addr, phy_addr,
5259							 &reg_val);
5260		if (status)
5261			return status;
5262		*val = reg_val;
5263		if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5264			*led_addr = temp_addr;
5265			break;
5266		}
5267	}
5268	return status;
5269}
5270
5271/**
5272 * i40e_led_set_phy
5273 * @hw: pointer to the HW structure
5274 * @on: true or false
5275 * @led_addr: address of led register to use
5276 * @mode: original val plus bit for set or ignore
5277 *
5278 * Set led's on or off when controlled by the PHY
5279 *
5280 **/
5281i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5282			     u16 led_addr, u32 mode)
5283{
5284	i40e_status status = 0;
5285	u32 led_ctl = 0;
5286	u32 led_reg = 0;
5287
5288	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5289	if (status)
5290		return status;
5291	led_ctl = led_reg;
5292	if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5293		led_reg = 0;
5294		status = i40e_led_set_reg(hw, led_addr, led_reg);
5295		if (status)
5296			return status;
5297	}
5298	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5299	if (status)
5300		goto restore_config;
5301	if (on)
5302		led_reg = I40E_PHY_LED_MANUAL_ON;
5303	else
5304		led_reg = 0;
5305
5306	status = i40e_led_set_reg(hw, led_addr, led_reg);
5307	if (status)
5308		goto restore_config;
5309	if (mode & I40E_PHY_LED_MODE_ORIG) {
5310		led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5311		status = i40e_led_set_reg(hw, led_addr, led_ctl);
5312	}
5313	return status;
5314
5315restore_config:
5316	status = i40e_led_set_reg(hw, led_addr, led_ctl);
5317	return status;
5318}
5319
5320/**
5321 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5322 * @hw: pointer to the hw struct
5323 * @reg_addr: register address
5324 * @reg_val: ptr to register value
5325 * @cmd_details: pointer to command details structure or NULL
5326 *
5327 * Use the firmware to read the Rx control register,
5328 * especially useful if the Rx unit is under heavy pressure
5329 **/
5330i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5331				u32 reg_addr, u32 *reg_val,
5332				struct i40e_asq_cmd_details *cmd_details)
5333{
5334	struct i40e_aq_desc desc;
5335	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5336		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5337	i40e_status status;
5338
5339	if (!reg_val)
5340		return I40E_ERR_PARAM;
5341
5342	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5343
5344	cmd_resp->address = cpu_to_le32(reg_addr);
5345
5346	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5347
5348	if (status == 0)
5349		*reg_val = le32_to_cpu(cmd_resp->value);
5350
5351	return status;
5352}
5353
5354/**
5355 * i40e_read_rx_ctl - read from an Rx control register
5356 * @hw: pointer to the hw struct
5357 * @reg_addr: register address
5358 **/
5359u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5360{
5361	i40e_status status = 0;
5362	bool use_register;
5363	int retry = 5;
5364	u32 val = 0;
5365
5366	use_register = (((hw->aq.api_maj_ver == 1) &&
5367			(hw->aq.api_min_ver < 5)) ||
5368			(hw->mac.type == I40E_MAC_X722));
5369	if (!use_register) {
5370do_retry:
5371		status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5372		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5373			usleep_range(1000, 2000);
5374			retry--;
5375			goto do_retry;
5376		}
5377	}
5378
5379	/* if the AQ access failed, try the old-fashioned way */
5380	if (status || use_register)
5381		val = rd32(hw, reg_addr);
5382
5383	return val;
5384}
5385
5386/**
5387 * i40e_aq_rx_ctl_write_register
5388 * @hw: pointer to the hw struct
5389 * @reg_addr: register address
5390 * @reg_val: register value
5391 * @cmd_details: pointer to command details structure or NULL
5392 *
5393 * Use the firmware to write to an Rx control register,
5394 * especially useful if the Rx unit is under heavy pressure
5395 **/
5396i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5397				u32 reg_addr, u32 reg_val,
5398				struct i40e_asq_cmd_details *cmd_details)
5399{
5400	struct i40e_aq_desc desc;
5401	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5402		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5403	i40e_status status;
5404
5405	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5406
5407	cmd->address = cpu_to_le32(reg_addr);
5408	cmd->value = cpu_to_le32(reg_val);
5409
5410	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5411
5412	return status;
5413}
5414
5415/**
5416 * i40e_write_rx_ctl - write to an Rx control register
5417 * @hw: pointer to the hw struct
5418 * @reg_addr: register address
5419 * @reg_val: register value
5420 **/
5421void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5422{
5423	i40e_status status = 0;
5424	bool use_register;
5425	int retry = 5;
5426
5427	use_register = (((hw->aq.api_maj_ver == 1) &&
5428			(hw->aq.api_min_ver < 5)) ||
5429			(hw->mac.type == I40E_MAC_X722));
5430	if (!use_register) {
5431do_retry:
5432		status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5433						       reg_val, NULL);
5434		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5435			usleep_range(1000, 2000);
5436			retry--;
5437			goto do_retry;
5438		}
5439	}
5440
5441	/* if the AQ access failed, try the old-fashioned way */
5442	if (status || use_register)
5443		wr32(hw, reg_addr, reg_val);
5444}
5445
5446/**
5447 * i40e_mdio_if_number_selection - MDIO I/F number selection
5448 * @hw: pointer to the hw struct
5449 * @set_mdio: use MDIO I/F number specified by mdio_num
5450 * @mdio_num: MDIO I/F number
5451 * @cmd: pointer to PHY Register command structure
5452 **/
5453static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5454					  u8 mdio_num,
5455					  struct i40e_aqc_phy_register_access *cmd)
5456{
5457	if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5458		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5459			cmd->cmd_flags |=
5460				I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5461				((mdio_num <<
5462				I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5463				I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5464		else
5465			i40e_debug(hw, I40E_DEBUG_PHY,
5466				   "MDIO I/F number selection not supported by current FW version.\n");
5467	}
5468}
5469
5470/**
5471 * i40e_aq_set_phy_register_ext
5472 * @hw: pointer to the hw struct
5473 * @phy_select: select which phy should be accessed
5474 * @dev_addr: PHY device address
5475 * @page_change: flag to indicate if phy page should be updated
5476 * @set_mdio: use MDIO I/F number specified by mdio_num
5477 * @mdio_num: MDIO I/F number
5478 * @reg_addr: PHY register address
5479 * @reg_val: new register value
5480 * @cmd_details: pointer to command details structure or NULL
5481 *
5482 * Write the external PHY register.
5483 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5484 * may use simple wrapper i40e_aq_set_phy_register.
5485 **/
5486enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5487			     u8 phy_select, u8 dev_addr, bool page_change,
5488			     bool set_mdio, u8 mdio_num,
5489			     u32 reg_addr, u32 reg_val,
5490			     struct i40e_asq_cmd_details *cmd_details)
5491{
5492	struct i40e_aq_desc desc;
5493	struct i40e_aqc_phy_register_access *cmd =
5494		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5495	i40e_status status;
5496
5497	i40e_fill_default_direct_cmd_desc(&desc,
5498					  i40e_aqc_opc_set_phy_register);
5499
5500	cmd->phy_interface = phy_select;
5501	cmd->dev_address = dev_addr;
5502	cmd->reg_address = cpu_to_le32(reg_addr);
5503	cmd->reg_value = cpu_to_le32(reg_val);
5504
5505	i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5506
5507	if (!page_change)
5508		cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5509
5510	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5511
5512	return status;
5513}
5514
5515/**
5516 * i40e_aq_get_phy_register_ext
5517 * @hw: pointer to the hw struct
5518 * @phy_select: select which phy should be accessed
5519 * @dev_addr: PHY device address
5520 * @page_change: flag to indicate if phy page should be updated
5521 * @set_mdio: use MDIO I/F number specified by mdio_num
5522 * @mdio_num: MDIO I/F number
5523 * @reg_addr: PHY register address
5524 * @reg_val: read register value
5525 * @cmd_details: pointer to command details structure or NULL
5526 *
5527 * Read the external PHY register.
5528 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5529 * may use simple wrapper i40e_aq_get_phy_register.
5530 **/
5531enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5532			     u8 phy_select, u8 dev_addr, bool page_change,
5533			     bool set_mdio, u8 mdio_num,
5534			     u32 reg_addr, u32 *reg_val,
5535			     struct i40e_asq_cmd_details *cmd_details)
5536{
5537	struct i40e_aq_desc desc;
5538	struct i40e_aqc_phy_register_access *cmd =
5539		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5540	i40e_status status;
5541
5542	i40e_fill_default_direct_cmd_desc(&desc,
5543					  i40e_aqc_opc_get_phy_register);
5544
5545	cmd->phy_interface = phy_select;
5546	cmd->dev_address = dev_addr;
5547	cmd->reg_address = cpu_to_le32(reg_addr);
5548
5549	i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5550
5551	if (!page_change)
5552		cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5553
5554	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5555	if (!status)
5556		*reg_val = le32_to_cpu(cmd->reg_value);
5557
5558	return status;
5559}
5560
5561/**
5562 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5563 * @hw: pointer to the hw struct
5564 * @buff: command buffer (size in bytes = buff_size)
5565 * @buff_size: buffer size in bytes
5566 * @track_id: package tracking id
5567 * @error_offset: returns error offset
5568 * @error_info: returns error information
5569 * @cmd_details: pointer to command details structure or NULL
5570 **/
5571enum
5572i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5573				   u16 buff_size, u32 track_id,
5574				   u32 *error_offset, u32 *error_info,
5575				   struct i40e_asq_cmd_details *cmd_details)
5576{
5577	struct i40e_aq_desc desc;
5578	struct i40e_aqc_write_personalization_profile *cmd =
5579		(struct i40e_aqc_write_personalization_profile *)
5580		&desc.params.raw;
5581	struct i40e_aqc_write_ddp_resp *resp;
5582	i40e_status status;
5583
5584	i40e_fill_default_direct_cmd_desc(&desc,
5585					  i40e_aqc_opc_write_personalization_profile);
5586
5587	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5588	if (buff_size > I40E_AQ_LARGE_BUF)
5589		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5590
5591	desc.datalen = cpu_to_le16(buff_size);
5592
5593	cmd->profile_track_id = cpu_to_le32(track_id);
5594
5595	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5596	if (!status) {
5597		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5598		if (error_offset)
5599			*error_offset = le32_to_cpu(resp->error_offset);
5600		if (error_info)
5601			*error_info = le32_to_cpu(resp->error_info);
5602	}
5603
5604	return status;
5605}
5606
5607/**
5608 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5609 * @hw: pointer to the hw struct
5610 * @buff: command buffer (size in bytes = buff_size)
5611 * @buff_size: buffer size in bytes
5612 * @flags: AdminQ command flags
5613 * @cmd_details: pointer to command details structure or NULL
5614 **/
5615enum
5616i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5617				      u16 buff_size, u8 flags,
5618				      struct i40e_asq_cmd_details *cmd_details)
5619{
5620	struct i40e_aq_desc desc;
5621	struct i40e_aqc_get_applied_profiles *cmd =
5622		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5623	i40e_status status;
5624
5625	i40e_fill_default_direct_cmd_desc(&desc,
5626					  i40e_aqc_opc_get_personalization_profile_list);
5627
5628	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5629	if (buff_size > I40E_AQ_LARGE_BUF)
5630		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5631	desc.datalen = cpu_to_le16(buff_size);
5632
5633	cmd->flags = flags;
5634
5635	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5636
5637	return status;
5638}
5639
5640/**
5641 * i40e_find_segment_in_package
5642 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5643 * @pkg_hdr: pointer to the package header to be searched
5644 *
5645 * This function searches a package file for a particular segment type. On
5646 * success it returns a pointer to the segment header, otherwise it will
5647 * return NULL.
5648 **/
5649struct i40e_generic_seg_header *
5650i40e_find_segment_in_package(u32 segment_type,
5651			     struct i40e_package_header *pkg_hdr)
5652{
5653	struct i40e_generic_seg_header *segment;
5654	u32 i;
5655
5656	/* Search all package segments for the requested segment type */
5657	for (i = 0; i < pkg_hdr->segment_count; i++) {
5658		segment =
5659			(struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5660			 pkg_hdr->segment_offset[i]);
5661
5662		if (segment->type == segment_type)
5663			return segment;
5664	}
5665
5666	return NULL;
5667}
5668
5669/* Get section table in profile */
5670#define I40E_SECTION_TABLE(profile, sec_tbl)				\
5671	do {								\
5672		struct i40e_profile_segment *p = (profile);		\
5673		u32 count;						\
5674		u32 *nvm;						\
5675		count = p->device_table_count;				\
5676		nvm = (u32 *)&p->device_table[count];			\
5677		sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5678	} while (0)
5679
5680/* Get section header in profile */
5681#define I40E_SECTION_HEADER(profile, offset)				\
5682	(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5683
5684/**
5685 * i40e_find_section_in_profile
5686 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5687 * @profile: pointer to the i40e segment header to be searched
5688 *
5689 * This function searches i40e segment for a particular section type. On
5690 * success it returns a pointer to the section header, otherwise it will
5691 * return NULL.
5692 **/
5693struct i40e_profile_section_header *
5694i40e_find_section_in_profile(u32 section_type,
5695			     struct i40e_profile_segment *profile)
5696{
5697	struct i40e_profile_section_header *sec;
5698	struct i40e_section_table *sec_tbl;
5699	u32 sec_off;
5700	u32 i;
5701
5702	if (profile->header.type != SEGMENT_TYPE_I40E)
5703		return NULL;
5704
5705	I40E_SECTION_TABLE(profile, sec_tbl);
5706
5707	for (i = 0; i < sec_tbl->section_count; i++) {
5708		sec_off = sec_tbl->section_offset[i];
5709		sec = I40E_SECTION_HEADER(profile, sec_off);
5710		if (sec->section.type == section_type)
5711			return sec;
5712	}
5713
5714	return NULL;
5715}
5716
5717/**
5718 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5719 * @hw: pointer to the hw struct
5720 * @aq: command buffer containing all data to execute AQ
5721 **/
5722static enum
5723i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5724					  struct i40e_profile_aq_section *aq)
5725{
5726	i40e_status status;
5727	struct i40e_aq_desc desc;
5728	u8 *msg = NULL;
5729	u16 msglen;
5730
5731	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5732	desc.flags |= cpu_to_le16(aq->flags);
5733	memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5734
5735	msglen = aq->datalen;
5736	if (msglen) {
5737		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5738						I40E_AQ_FLAG_RD));
5739		if (msglen > I40E_AQ_LARGE_BUF)
5740			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5741		desc.datalen = cpu_to_le16(msglen);
5742		msg = &aq->data[0];
5743	}
5744
5745	status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5746
5747	if (status) {
5748		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5749			   "unable to exec DDP AQ opcode %u, error %d\n",
5750			   aq->opcode, status);
5751		return status;
5752	}
5753
5754	/* copy returned desc to aq_buf */
5755	memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5756
5757	return 0;
5758}
5759
5760/**
5761 * i40e_validate_profile
5762 * @hw: pointer to the hardware structure
5763 * @profile: pointer to the profile segment of the package to be validated
5764 * @track_id: package tracking id
5765 * @rollback: flag if the profile is for rollback.
5766 *
5767 * Validates supported devices and profile's sections.
5768 */
5769static enum i40e_status_code
5770i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5771		      u32 track_id, bool rollback)
5772{
5773	struct i40e_profile_section_header *sec = NULL;
5774	i40e_status status = 0;
5775	struct i40e_section_table *sec_tbl;
5776	u32 vendor_dev_id;
5777	u32 dev_cnt;
5778	u32 sec_off;
5779	u32 i;
5780
5781	if (track_id == I40E_DDP_TRACKID_INVALID) {
5782		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5783		return I40E_NOT_SUPPORTED;
5784	}
5785
5786	dev_cnt = profile->device_table_count;
5787	for (i = 0; i < dev_cnt; i++) {
5788		vendor_dev_id = profile->device_table[i].vendor_dev_id;
5789		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5790		    hw->device_id == (vendor_dev_id & 0xFFFF))
5791			break;
5792	}
5793	if (dev_cnt && i == dev_cnt) {
5794		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5795			   "Device doesn't support DDP\n");
5796		return I40E_ERR_DEVICE_NOT_SUPPORTED;
5797	}
5798
5799	I40E_SECTION_TABLE(profile, sec_tbl);
5800
5801	/* Validate sections types */
5802	for (i = 0; i < sec_tbl->section_count; i++) {
5803		sec_off = sec_tbl->section_offset[i];
5804		sec = I40E_SECTION_HEADER(profile, sec_off);
5805		if (rollback) {
5806			if (sec->section.type == SECTION_TYPE_MMIO ||
5807			    sec->section.type == SECTION_TYPE_AQ ||
5808			    sec->section.type == SECTION_TYPE_RB_AQ) {
5809				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5810					   "Not a roll-back package\n");
5811				return I40E_NOT_SUPPORTED;
5812			}
5813		} else {
5814			if (sec->section.type == SECTION_TYPE_RB_AQ ||
5815			    sec->section.type == SECTION_TYPE_RB_MMIO) {
5816				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5817					   "Not an original package\n");
5818				return I40E_NOT_SUPPORTED;
5819			}
5820		}
5821	}
5822
5823	return status;
5824}
5825
5826/**
5827 * i40e_write_profile
5828 * @hw: pointer to the hardware structure
5829 * @profile: pointer to the profile segment of the package to be downloaded
5830 * @track_id: package tracking id
5831 *
5832 * Handles the download of a complete package.
5833 */
5834enum i40e_status_code
5835i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5836		   u32 track_id)
5837{
5838	i40e_status status = 0;
5839	struct i40e_section_table *sec_tbl;
5840	struct i40e_profile_section_header *sec = NULL;
5841	struct i40e_profile_aq_section *ddp_aq;
5842	u32 section_size = 0;
5843	u32 offset = 0, info = 0;
5844	u32 sec_off;
5845	u32 i;
5846
5847	status = i40e_validate_profile(hw, profile, track_id, false);
5848	if (status)
5849		return status;
5850
5851	I40E_SECTION_TABLE(profile, sec_tbl);
5852
5853	for (i = 0; i < sec_tbl->section_count; i++) {
5854		sec_off = sec_tbl->section_offset[i];
5855		sec = I40E_SECTION_HEADER(profile, sec_off);
5856		/* Process generic admin command */
5857		if (sec->section.type == SECTION_TYPE_AQ) {
5858			ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5859			status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5860			if (status) {
5861				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5862					   "Failed to execute aq: section %d, opcode %u\n",
5863					   i, ddp_aq->opcode);
5864				break;
5865			}
5866			sec->section.type = SECTION_TYPE_RB_AQ;
5867		}
5868
5869		/* Skip any non-mmio sections */
5870		if (sec->section.type != SECTION_TYPE_MMIO)
5871			continue;
5872
5873		section_size = sec->section.size +
5874			sizeof(struct i40e_profile_section_header);
5875
5876		/* Write MMIO section */
5877		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5878					   track_id, &offset, &info, NULL);
5879		if (status) {
5880			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5881				   "Failed to write profile: section %d, offset %d, info %d\n",
5882				   i, offset, info);
5883			break;
5884		}
5885	}
5886	return status;
5887}
5888
5889/**
5890 * i40e_rollback_profile
5891 * @hw: pointer to the hardware structure
5892 * @profile: pointer to the profile segment of the package to be removed
5893 * @track_id: package tracking id
5894 *
5895 * Rolls back previously loaded package.
5896 */
5897enum i40e_status_code
5898i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5899		      u32 track_id)
5900{
5901	struct i40e_profile_section_header *sec = NULL;
5902	i40e_status status = 0;
5903	struct i40e_section_table *sec_tbl;
5904	u32 offset = 0, info = 0;
5905	u32 section_size = 0;
5906	u32 sec_off;
5907	int i;
5908
5909	status = i40e_validate_profile(hw, profile, track_id, true);
5910	if (status)
5911		return status;
5912
5913	I40E_SECTION_TABLE(profile, sec_tbl);
5914
5915	/* For rollback write sections in reverse */
5916	for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5917		sec_off = sec_tbl->section_offset[i];
5918		sec = I40E_SECTION_HEADER(profile, sec_off);
5919
5920		/* Skip any non-rollback sections */
5921		if (sec->section.type != SECTION_TYPE_RB_MMIO)
5922			continue;
5923
5924		section_size = sec->section.size +
5925			sizeof(struct i40e_profile_section_header);
5926
5927		/* Write roll-back MMIO section */
5928		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5929					   track_id, &offset, &info, NULL);
5930		if (status) {
5931			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5932				   "Failed to write profile: section %d, offset %d, info %d\n",
5933				   i, offset, info);
5934			break;
5935		}
5936	}
5937	return status;
5938}
5939
5940/**
5941 * i40e_add_pinfo_to_list
5942 * @hw: pointer to the hardware structure
5943 * @profile: pointer to the profile segment of the package
5944 * @profile_info_sec: buffer for information section
5945 * @track_id: package tracking id
5946 *
5947 * Register a profile to the list of loaded profiles.
5948 */
5949enum i40e_status_code
5950i40e_add_pinfo_to_list(struct i40e_hw *hw,
5951		       struct i40e_profile_segment *profile,
5952		       u8 *profile_info_sec, u32 track_id)
5953{
5954	i40e_status status = 0;
5955	struct i40e_profile_section_header *sec = NULL;
5956	struct i40e_profile_info *pinfo;
5957	u32 offset = 0, info = 0;
5958
5959	sec = (struct i40e_profile_section_header *)profile_info_sec;
5960	sec->tbl_size = 1;
5961	sec->data_end = sizeof(struct i40e_profile_section_header) +
5962			sizeof(struct i40e_profile_info);
5963	sec->section.type = SECTION_TYPE_INFO;
5964	sec->section.offset = sizeof(struct i40e_profile_section_header);
5965	sec->section.size = sizeof(struct i40e_profile_info);
5966	pinfo = (struct i40e_profile_info *)(profile_info_sec +
5967					     sec->section.offset);
5968	pinfo->track_id = track_id;
5969	pinfo->version = profile->version;
5970	pinfo->op = I40E_DDP_ADD_TRACKID;
5971	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5972
5973	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5974				   track_id, &offset, &info, NULL);
5975
5976	return status;
5977}
5978
5979/**
5980 * i40e_aq_add_cloud_filters
5981 * @hw: pointer to the hardware structure
5982 * @seid: VSI seid to add cloud filters from
5983 * @filters: Buffer which contains the filters to be added
5984 * @filter_count: number of filters contained in the buffer
5985 *
5986 * Set the cloud filters for a given VSI.  The contents of the
5987 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5988 * of the function.
5989 *
5990 **/
5991enum i40e_status_code
5992i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5993			  struct i40e_aqc_cloud_filters_element_data *filters,
5994			  u8 filter_count)
5995{
5996	struct i40e_aq_desc desc;
5997	struct i40e_aqc_add_remove_cloud_filters *cmd =
5998	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5999	enum i40e_status_code status;
6000	u16 buff_len;
6001
6002	i40e_fill_default_direct_cmd_desc(&desc,
6003					  i40e_aqc_opc_add_cloud_filters);
6004
6005	buff_len = filter_count * sizeof(*filters);
6006	desc.datalen = cpu_to_le16(buff_len);
6007	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6008	cmd->num_filters = filter_count;
6009	cmd->seid = cpu_to_le16(seid);
6010
6011	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6012
6013	return status;
6014}
6015
6016/**
6017 * i40e_aq_add_cloud_filters_bb
6018 * @hw: pointer to the hardware structure
6019 * @seid: VSI seid to add cloud filters from
6020 * @filters: Buffer which contains the filters in big buffer to be added
6021 * @filter_count: number of filters contained in the buffer
6022 *
6023 * Set the big buffer cloud filters for a given VSI.  The contents of the
6024 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6025 * function.
6026 *
6027 **/
6028enum i40e_status_code
6029i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6030			     struct i40e_aqc_cloud_filters_element_bb *filters,
6031			     u8 filter_count)
6032{
6033	struct i40e_aq_desc desc;
6034	struct i40e_aqc_add_remove_cloud_filters *cmd =
6035	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6036	i40e_status status;
6037	u16 buff_len;
6038	int i;
6039
6040	i40e_fill_default_direct_cmd_desc(&desc,
6041					  i40e_aqc_opc_add_cloud_filters);
6042
6043	buff_len = filter_count * sizeof(*filters);
6044	desc.datalen = cpu_to_le16(buff_len);
6045	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6046	cmd->num_filters = filter_count;
6047	cmd->seid = cpu_to_le16(seid);
6048	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6049
6050	for (i = 0; i < filter_count; i++) {
6051		u16 tnl_type;
6052		u32 ti;
6053
6054		tnl_type = (le16_to_cpu(filters[i].element.flags) &
6055			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6056			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6057
6058		/* Due to hardware eccentricities, the VNI for Geneve is shifted
6059		 * one more byte further than normally used for Tenant ID in
6060		 * other tunnel types.
6061		 */
6062		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6063			ti = le32_to_cpu(filters[i].element.tenant_id);
6064			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6065		}
6066	}
6067
6068	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6069
6070	return status;
6071}
6072
6073/**
6074 * i40e_aq_rem_cloud_filters
6075 * @hw: pointer to the hardware structure
6076 * @seid: VSI seid to remove cloud filters from
6077 * @filters: Buffer which contains the filters to be removed
6078 * @filter_count: number of filters contained in the buffer
6079 *
6080 * Remove the cloud filters for a given VSI.  The contents of the
6081 * i40e_aqc_cloud_filters_element_data are filled in by the caller
6082 * of the function.
6083 *
6084 **/
6085enum i40e_status_code
6086i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
6087			  struct i40e_aqc_cloud_filters_element_data *filters,
6088			  u8 filter_count)
6089{
6090	struct i40e_aq_desc desc;
6091	struct i40e_aqc_add_remove_cloud_filters *cmd =
6092	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6093	enum i40e_status_code status;
6094	u16 buff_len;
6095
6096	i40e_fill_default_direct_cmd_desc(&desc,
6097					  i40e_aqc_opc_remove_cloud_filters);
6098
6099	buff_len = filter_count * sizeof(*filters);
6100	desc.datalen = cpu_to_le16(buff_len);
6101	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6102	cmd->num_filters = filter_count;
6103	cmd->seid = cpu_to_le16(seid);
6104
6105	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6106
6107	return status;
6108}
6109
6110/**
6111 * i40e_aq_rem_cloud_filters_bb
6112 * @hw: pointer to the hardware structure
6113 * @seid: VSI seid to remove cloud filters from
6114 * @filters: Buffer which contains the filters in big buffer to be removed
6115 * @filter_count: number of filters contained in the buffer
6116 *
6117 * Remove the big buffer cloud filters for a given VSI.  The contents of the
6118 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6119 * function.
6120 *
6121 **/
6122enum i40e_status_code
6123i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6124			     struct i40e_aqc_cloud_filters_element_bb *filters,
6125			     u8 filter_count)
6126{
6127	struct i40e_aq_desc desc;
6128	struct i40e_aqc_add_remove_cloud_filters *cmd =
6129	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6130	i40e_status status;
6131	u16 buff_len;
6132	int i;
6133
6134	i40e_fill_default_direct_cmd_desc(&desc,
6135					  i40e_aqc_opc_remove_cloud_filters);
6136
6137	buff_len = filter_count * sizeof(*filters);
6138	desc.datalen = cpu_to_le16(buff_len);
6139	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6140	cmd->num_filters = filter_count;
6141	cmd->seid = cpu_to_le16(seid);
6142	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6143
6144	for (i = 0; i < filter_count; i++) {
6145		u16 tnl_type;
6146		u32 ti;
6147
6148		tnl_type = (le16_to_cpu(filters[i].element.flags) &
6149			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6150			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6151
6152		/* Due to hardware eccentricities, the VNI for Geneve is shifted
6153		 * one more byte further than normally used for Tenant ID in
6154		 * other tunnel types.
6155		 */
6156		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6157			ti = le32_to_cpu(filters[i].element.tenant_id);
6158			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6159		}
6160	}
6161
6162	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6163
6164	return status;
6165}