Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*******************************************************************************
   3 *
   4 * Intel Ethernet Controller XL710 Family Linux Driver
   5 * Copyright(c) 2013 - 2016 Intel Corporation.
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along
  17 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 *
  19 * The full GNU General Public License is included in this distribution in
  20 * the file called "COPYING".
  21 *
  22 * Contact Information:
  23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25 *
  26 ******************************************************************************/
  27
  28#include "i40e.h"
  29
  30/*********************notification routines***********************/
  31
  32/**
  33 * i40e_vc_vf_broadcast
  34 * @pf: pointer to the PF structure
  35 * @opcode: operation code
  36 * @retval: return value
  37 * @msg: pointer to the msg buffer
  38 * @msglen: msg length
  39 *
  40 * send a message to all VFs on a given PF
  41 **/
  42static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  43				 enum virtchnl_ops v_opcode,
  44				 i40e_status v_retval, u8 *msg,
  45				 u16 msglen)
  46{
  47	struct i40e_hw *hw = &pf->hw;
  48	struct i40e_vf *vf = pf->vf;
  49	int i;
  50
  51	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  52		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  53		/* Not all vfs are enabled so skip the ones that are not */
  54		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
  55		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
  56			continue;
  57
  58		/* Ignore return value on purpose - a given VF may fail, but
  59		 * we need to keep going and send to all of them
  60		 */
  61		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  62				       msg, msglen, NULL);
  63	}
  64}
  65
  66/**
  67 * i40e_vc_notify_vf_link_state
  68 * @vf: pointer to the VF structure
  69 *
  70 * send a link status message to a single VF
  71 **/
  72static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
  73{
  74	struct virtchnl_pf_event pfe;
  75	struct i40e_pf *pf = vf->pf;
  76	struct i40e_hw *hw = &pf->hw;
  77	struct i40e_link_status *ls = &pf->hw.phy.link_info;
  78	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  79
  80	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
  81	pfe.severity = PF_EVENT_SEVERITY_INFO;
  82	if (vf->link_forced) {
  83		pfe.event_data.link_event.link_status = vf->link_up;
  84		pfe.event_data.link_event.link_speed =
  85			(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
  86	} else {
  87		pfe.event_data.link_event.link_status =
  88			ls->link_info & I40E_AQ_LINK_UP;
  89		pfe.event_data.link_event.link_speed =
  90			i40e_virtchnl_link_speed(ls->link_speed);
  91	}
  92	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
  93			       0, (u8 *)&pfe, sizeof(pfe), NULL);
  94}
  95
  96/**
  97 * i40e_vc_notify_link_state
  98 * @pf: pointer to the PF structure
  99 *
 100 * send a link status message to all VFs on a given PF
 101 **/
 102void i40e_vc_notify_link_state(struct i40e_pf *pf)
 103{
 104	int i;
 105
 106	for (i = 0; i < pf->num_alloc_vfs; i++)
 107		i40e_vc_notify_vf_link_state(&pf->vf[i]);
 108}
 109
 110/**
 111 * i40e_vc_notify_reset
 112 * @pf: pointer to the PF structure
 113 *
 114 * indicate a pending reset to all VFs on a given PF
 115 **/
 116void i40e_vc_notify_reset(struct i40e_pf *pf)
 117{
 118	struct virtchnl_pf_event pfe;
 119
 120	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 121	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 122	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
 123			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
 124}
 125
 126/**
 127 * i40e_vc_notify_vf_reset
 128 * @vf: pointer to the VF structure
 129 *
 130 * indicate a pending reset to the given VF
 131 **/
 132void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 133{
 134	struct virtchnl_pf_event pfe;
 135	int abs_vf_id;
 136
 137	/* validate the request */
 138	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 139		return;
 140
 141	/* verify if the VF is in either init or active before proceeding */
 142	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
 143	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
 144		return;
 145
 146	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 147
 148	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 149	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 150	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 151			       0, (u8 *)&pfe,
 152			       sizeof(struct virtchnl_pf_event), NULL);
 153}
 154/***********************misc routines*****************************/
 155
 156/**
 157 * i40e_vc_disable_vf
 158 * @vf: pointer to the VF info
 159 *
 160 * Disable the VF through a SW reset.
 161 **/
 162static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 163{
 164	int i;
 165
 166	i40e_vc_notify_vf_reset(vf);
 167
 168	/* We want to ensure that an actual reset occurs initiated after this
 169	 * function was called. However, we do not want to wait forever, so
 170	 * we'll give a reasonable time and print a message if we failed to
 171	 * ensure a reset.
 172	 */
 173	for (i = 0; i < 20; i++) {
 174		if (i40e_reset_vf(vf, false))
 175			return;
 176		usleep_range(10000, 20000);
 177	}
 178
 179	dev_warn(&vf->pf->pdev->dev,
 180		 "Failed to initiate reset for VF %d after 200 milliseconds\n",
 181		 vf->vf_id);
 182}
 183
 184/**
 185 * i40e_vc_isvalid_vsi_id
 186 * @vf: pointer to the VF info
 187 * @vsi_id: VF relative VSI id
 188 *
 189 * check for the valid VSI id
 190 **/
 191static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 192{
 193	struct i40e_pf *pf = vf->pf;
 194	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 195
 196	return (vsi && (vsi->vf_id == vf->vf_id));
 197}
 198
 199/**
 200 * i40e_vc_isvalid_queue_id
 201 * @vf: pointer to the VF info
 202 * @vsi_id: vsi id
 203 * @qid: vsi relative queue id
 204 *
 205 * check for the valid queue id
 206 **/
 207static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 208					    u8 qid)
 209{
 210	struct i40e_pf *pf = vf->pf;
 211	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 212
 213	return (vsi && (qid < vsi->alloc_queue_pairs));
 214}
 215
 216/**
 217 * i40e_vc_isvalid_vector_id
 218 * @vf: pointer to the VF info
 219 * @vector_id: VF relative vector id
 220 *
 221 * check for the valid vector id
 222 **/
 223static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 224{
 225	struct i40e_pf *pf = vf->pf;
 226
 227	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 228}
 229
 230/***********************vf resource mgmt routines*****************/
 231
 232/**
 233 * i40e_vc_get_pf_queue_id
 234 * @vf: pointer to the VF info
 235 * @vsi_id: id of VSI as provided by the FW
 236 * @vsi_queue_id: vsi relative queue id
 237 *
 238 * return PF relative queue id
 239 **/
 240static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 241				   u8 vsi_queue_id)
 242{
 243	struct i40e_pf *pf = vf->pf;
 244	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 245	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 246
 247	if (!vsi)
 248		return pf_queue_id;
 249
 250	if (le16_to_cpu(vsi->info.mapping_flags) &
 251	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 252		pf_queue_id =
 253			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 254	else
 255		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 256			      vsi_queue_id;
 257
 258	return pf_queue_id;
 259}
 260
 261/**
 262 * i40e_get_real_pf_qid
 263 * @vf: pointer to the VF info
 264 * @vsi_id: vsi id
 265 * @queue_id: queue number
 266 *
 267 * wrapper function to get pf_queue_id handling ADq code as well
 268 **/
 269static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
 270{
 271	int i;
 272
 273	if (vf->adq_enabled) {
 274		/* Although VF considers all the queues(can be 1 to 16) as its
 275		 * own but they may actually belong to different VSIs(up to 4).
 276		 * We need to find which queues belongs to which VSI.
 277		 */
 278		for (i = 0; i < vf->num_tc; i++) {
 279			if (queue_id < vf->ch[i].num_qps) {
 280				vsi_id = vf->ch[i].vsi_id;
 281				break;
 282			}
 283			/* find right queue id which is relative to a
 284			 * given VSI.
 285			 */
 286			queue_id -= vf->ch[i].num_qps;
 287			}
 288		}
 289
 290	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
 291}
 292
 293/**
 294 * i40e_config_irq_link_list
 295 * @vf: pointer to the VF info
 296 * @vsi_id: id of VSI as given by the FW
 297 * @vecmap: irq map info
 298 *
 299 * configure irq link list from the map
 300 **/
 301static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 302				      struct virtchnl_vector_map *vecmap)
 303{
 304	unsigned long linklistmap = 0, tempmap;
 305	struct i40e_pf *pf = vf->pf;
 306	struct i40e_hw *hw = &pf->hw;
 307	u16 vsi_queue_id, pf_queue_id;
 308	enum i40e_queue_type qtype;
 309	u16 next_q, vector_id, size;
 310	u32 reg, reg_idx;
 311	u16 itr_idx = 0;
 312
 313	vector_id = vecmap->vector_id;
 314	/* setup the head */
 315	if (0 == vector_id)
 316		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 317	else
 318		reg_idx = I40E_VPINT_LNKLSTN(
 319		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 320		     (vector_id - 1));
 321
 322	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 323		/* Special case - No queues mapped on this vector */
 324		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 325		goto irq_list_done;
 326	}
 327	tempmap = vecmap->rxq_map;
 328	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 329		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 330				    vsi_queue_id));
 331	}
 332
 333	tempmap = vecmap->txq_map;
 334	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 335		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 336				     vsi_queue_id + 1));
 337	}
 338
 339	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
 340	next_q = find_first_bit(&linklistmap, size);
 341	if (unlikely(next_q == size))
 342		goto irq_list_done;
 343
 344	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 345	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 346	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
 347	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 348
 349	wr32(hw, reg_idx, reg);
 350
 351	while (next_q < size) {
 352		switch (qtype) {
 353		case I40E_QUEUE_TYPE_RX:
 354			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 355			itr_idx = vecmap->rxitr_idx;
 356			break;
 357		case I40E_QUEUE_TYPE_TX:
 358			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 359			itr_idx = vecmap->txitr_idx;
 360			break;
 361		default:
 362			break;
 363		}
 364
 365		next_q = find_next_bit(&linklistmap, size, next_q + 1);
 366		if (next_q < size) {
 367			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 368			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 369			pf_queue_id = i40e_get_real_pf_qid(vf,
 370							   vsi_id,
 371							   vsi_queue_id);
 372		} else {
 373			pf_queue_id = I40E_QUEUE_END_OF_LIST;
 374			qtype = 0;
 375		}
 376
 377		/* format for the RQCTL & TQCTL regs is same */
 378		reg = (vector_id) |
 379		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 380		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 381		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 382		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 383		wr32(hw, reg_idx, reg);
 384	}
 385
 386	/* if the vf is running in polling mode and using interrupt zero,
 387	 * need to disable auto-mask on enabling zero interrupt for VFs.
 388	 */
 389	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
 390	    (vector_id == 0)) {
 391		reg = rd32(hw, I40E_GLINT_CTL);
 392		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
 393			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
 394			wr32(hw, I40E_GLINT_CTL, reg);
 395		}
 396	}
 397
 398irq_list_done:
 399	i40e_flush(hw);
 400}
 401
 402/**
 403 * i40e_release_iwarp_qvlist
 404 * @vf: pointer to the VF.
 405 *
 406 **/
 407static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
 408{
 409	struct i40e_pf *pf = vf->pf;
 410	struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
 411	u32 msix_vf;
 412	u32 i;
 413
 414	if (!vf->qvlist_info)
 415		return;
 416
 417	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 418	for (i = 0; i < qvlist_info->num_vectors; i++) {
 419		struct virtchnl_iwarp_qv_info *qv_info;
 420		u32 next_q_index, next_q_type;
 421		struct i40e_hw *hw = &pf->hw;
 422		u32 v_idx, reg_idx, reg;
 423
 424		qv_info = &qvlist_info->qv_info[i];
 425		if (!qv_info)
 426			continue;
 427		v_idx = qv_info->v_idx;
 428		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 429			/* Figure out the queue after CEQ and make that the
 430			 * first queue.
 431			 */
 432			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 433			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
 434			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
 435					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
 436			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
 437					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
 438
 439			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 440			reg = (next_q_index &
 441			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 442			       (next_q_type <<
 443			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 444
 445			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 446		}
 447	}
 448	kfree(vf->qvlist_info);
 449	vf->qvlist_info = NULL;
 450}
 451
 452/**
 453 * i40e_config_iwarp_qvlist
 454 * @vf: pointer to the VF info
 455 * @qvlist_info: queue and vector list
 456 *
 457 * Return 0 on success or < 0 on error
 458 **/
 459static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
 460				    struct virtchnl_iwarp_qvlist_info *qvlist_info)
 461{
 462	struct i40e_pf *pf = vf->pf;
 463	struct i40e_hw *hw = &pf->hw;
 464	struct virtchnl_iwarp_qv_info *qv_info;
 465	u32 v_idx, i, reg_idx, reg;
 466	u32 next_q_idx, next_q_type;
 467	u32 msix_vf, size;
 468
 469	size = sizeof(struct virtchnl_iwarp_qvlist_info) +
 470	       (sizeof(struct virtchnl_iwarp_qv_info) *
 471						(qvlist_info->num_vectors - 1));
 472	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
 473	if (!vf->qvlist_info)
 474		return -ENOMEM;
 475
 476	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 477
 478	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 479	for (i = 0; i < qvlist_info->num_vectors; i++) {
 480		qv_info = &qvlist_info->qv_info[i];
 481		if (!qv_info)
 482			continue;
 483		v_idx = qv_info->v_idx;
 484
 485		/* Validate vector id belongs to this vf */
 486		if (!i40e_vc_isvalid_vector_id(vf, v_idx))
 487			goto err;
 488
 489		vf->qvlist_info->qv_info[i] = *qv_info;
 490
 491		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 492		/* We might be sharing the interrupt, so get the first queue
 493		 * index and type, push it down the list by adding the new
 494		 * queue on top. Also link it with the new queue in CEQCTL.
 495		 */
 496		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
 497		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
 498				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
 499		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
 500				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 501
 502		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 503			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 504			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
 505			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
 506			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
 507			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
 508			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
 509			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
 510
 511			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 512			reg = (qv_info->ceq_idx &
 513			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 514			       (I40E_QUEUE_TYPE_PE_CEQ <<
 515			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 516			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 517		}
 518
 519		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
 520			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
 521			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
 522			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
 523
 524			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
 525		}
 526	}
 527
 528	return 0;
 529err:
 530	kfree(vf->qvlist_info);
 531	vf->qvlist_info = NULL;
 532	return -EINVAL;
 533}
 534
 535/**
 536 * i40e_config_vsi_tx_queue
 537 * @vf: pointer to the VF info
 538 * @vsi_id: id of VSI as provided by the FW
 539 * @vsi_queue_id: vsi relative queue index
 540 * @info: config. info
 541 *
 542 * configure tx queue
 543 **/
 544static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 545				    u16 vsi_queue_id,
 546				    struct virtchnl_txq_info *info)
 547{
 548	struct i40e_pf *pf = vf->pf;
 549	struct i40e_hw *hw = &pf->hw;
 550	struct i40e_hmc_obj_txq tx_ctx;
 551	struct i40e_vsi *vsi;
 552	u16 pf_queue_id;
 553	u32 qtx_ctl;
 554	int ret = 0;
 555
 556	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
 557		ret = -ENOENT;
 558		goto error_context;
 559	}
 560	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 561	vsi = i40e_find_vsi_from_id(pf, vsi_id);
 562	if (!vsi) {
 563		ret = -ENOENT;
 564		goto error_context;
 565	}
 566
 567	/* clear the context structure first */
 568	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 569
 570	/* only set the required fields */
 571	tx_ctx.base = info->dma_ring_addr / 128;
 572	tx_ctx.qlen = info->ring_len;
 573	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 574	tx_ctx.rdylist_act = 0;
 575	tx_ctx.head_wb_ena = info->headwb_enabled;
 576	tx_ctx.head_wb_addr = info->dma_headwb_addr;
 577
 578	/* clear the context in the HMC */
 579	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 580	if (ret) {
 581		dev_err(&pf->pdev->dev,
 582			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
 583			pf_queue_id, ret);
 584		ret = -ENOENT;
 585		goto error_context;
 586	}
 587
 588	/* set the context in the HMC */
 589	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 590	if (ret) {
 591		dev_err(&pf->pdev->dev,
 592			"Failed to set VF LAN Tx queue context %d error: %d\n",
 593			pf_queue_id, ret);
 594		ret = -ENOENT;
 595		goto error_context;
 596	}
 597
 598	/* associate this queue with the PCI VF function */
 599	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 600	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 601		    & I40E_QTX_CTL_PF_INDX_MASK);
 602	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 603		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 604		    & I40E_QTX_CTL_VFVM_INDX_MASK);
 605	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 606	i40e_flush(hw);
 607
 608error_context:
 609	return ret;
 610}
 611
 612/**
 613 * i40e_config_vsi_rx_queue
 614 * @vf: pointer to the VF info
 615 * @vsi_id: id of VSI  as provided by the FW
 616 * @vsi_queue_id: vsi relative queue index
 617 * @info: config. info
 618 *
 619 * configure rx queue
 620 **/
 621static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 622				    u16 vsi_queue_id,
 623				    struct virtchnl_rxq_info *info)
 624{
 625	struct i40e_pf *pf = vf->pf;
 626	struct i40e_hw *hw = &pf->hw;
 627	struct i40e_hmc_obj_rxq rx_ctx;
 628	u16 pf_queue_id;
 629	int ret = 0;
 630
 631	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 632
 633	/* clear the context structure first */
 634	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 635
 636	/* only set the required fields */
 637	rx_ctx.base = info->dma_ring_addr / 128;
 638	rx_ctx.qlen = info->ring_len;
 639
 640	if (info->splithdr_enabled) {
 641		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 642				  I40E_RX_SPLIT_IP      |
 643				  I40E_RX_SPLIT_TCP_UDP |
 644				  I40E_RX_SPLIT_SCTP;
 645		/* header length validation */
 646		if (info->hdr_size > ((2 * 1024) - 64)) {
 647			ret = -EINVAL;
 648			goto error_param;
 649		}
 650		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 651
 652		/* set split mode 10b */
 653		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 654	}
 655
 656	/* databuffer length validation */
 657	if (info->databuffer_size > ((16 * 1024) - 128)) {
 658		ret = -EINVAL;
 659		goto error_param;
 660	}
 661	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 662
 663	/* max pkt. length validation */
 664	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 665		ret = -EINVAL;
 666		goto error_param;
 667	}
 668	rx_ctx.rxmax = info->max_pkt_size;
 669
 670	/* enable 32bytes desc always */
 671	rx_ctx.dsize = 1;
 672
 673	/* default values */
 674	rx_ctx.lrxqthresh = 1;
 675	rx_ctx.crcstrip = 1;
 676	rx_ctx.prefena = 1;
 677	rx_ctx.l2tsel = 1;
 678
 679	/* clear the context in the HMC */
 680	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 681	if (ret) {
 682		dev_err(&pf->pdev->dev,
 683			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
 684			pf_queue_id, ret);
 685		ret = -ENOENT;
 686		goto error_param;
 687	}
 688
 689	/* set the context in the HMC */
 690	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 691	if (ret) {
 692		dev_err(&pf->pdev->dev,
 693			"Failed to set VF LAN Rx queue context %d error: %d\n",
 694			pf_queue_id, ret);
 695		ret = -ENOENT;
 696		goto error_param;
 697	}
 698
 699error_param:
 700	return ret;
 701}
 702
 703/**
 704 * i40e_alloc_vsi_res
 705 * @vf: pointer to the VF info
 706 * @idx: VSI index, applies only for ADq mode, zero otherwise
 707 *
 708 * alloc VF vsi context & resources
 709 **/
 710static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
 711{
 712	struct i40e_mac_filter *f = NULL;
 713	struct i40e_pf *pf = vf->pf;
 714	struct i40e_vsi *vsi;
 715	u64 max_tx_rate = 0;
 716	int ret = 0;
 717
 718	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
 719			     vf->vf_id);
 720
 721	if (!vsi) {
 722		dev_err(&pf->pdev->dev,
 723			"add vsi failed for VF %d, aq_err %d\n",
 724			vf->vf_id, pf->hw.aq.asq_last_status);
 725		ret = -ENOENT;
 726		goto error_alloc_vsi_res;
 727	}
 728
 729	if (!idx) {
 730		u64 hena = i40e_pf_get_default_rss_hena(pf);
 731		u8 broadcast[ETH_ALEN];
 732
 733		vf->lan_vsi_idx = vsi->idx;
 734		vf->lan_vsi_id = vsi->id;
 735		/* If the port VLAN has been configured and then the
 736		 * VF driver was removed then the VSI port VLAN
 737		 * configuration was destroyed.  Check if there is
 738		 * a port VLAN and restore the VSI configuration if
 739		 * needed.
 740		 */
 741		if (vf->port_vlan_id)
 742			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 743
 744		spin_lock_bh(&vsi->mac_filter_hash_lock);
 745		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
 746			f = i40e_add_mac_filter(vsi,
 747						vf->default_lan_addr.addr);
 748			if (!f)
 749				dev_info(&pf->pdev->dev,
 750					 "Could not add MAC filter %pM for VF %d\n",
 751					vf->default_lan_addr.addr, vf->vf_id);
 752		}
 753		eth_broadcast_addr(broadcast);
 754		f = i40e_add_mac_filter(vsi, broadcast);
 755		if (!f)
 756			dev_info(&pf->pdev->dev,
 757				 "Could not allocate VF broadcast filter\n");
 758		spin_unlock_bh(&vsi->mac_filter_hash_lock);
 759		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
 760		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
 761		/* program mac filter only for VF VSI */
 762		ret = i40e_sync_vsi_filters(vsi);
 763		if (ret)
 764			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 765	}
 766
 767	/* storing VSI index and id for ADq and don't apply the mac filter */
 768	if (vf->adq_enabled) {
 769		vf->ch[idx].vsi_idx = vsi->idx;
 770		vf->ch[idx].vsi_id = vsi->id;
 771	}
 772
 773	/* Set VF bandwidth if specified */
 774	if (vf->tx_rate) {
 775		max_tx_rate = vf->tx_rate;
 776	} else if (vf->ch[idx].max_tx_rate) {
 777		max_tx_rate = vf->ch[idx].max_tx_rate;
 778	}
 779
 780	if (max_tx_rate) {
 781		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
 782		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 783						  max_tx_rate, 0, NULL);
 784		if (ret)
 785			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 786				vf->vf_id, ret);
 787	}
 788
 789error_alloc_vsi_res:
 790	return ret;
 791}
 792
 793/**
 794 * i40e_map_pf_queues_to_vsi
 795 * @vf: pointer to the VF info
 796 *
 797 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 798 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
 799 **/
 800static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
 801{
 802	struct i40e_pf *pf = vf->pf;
 803	struct i40e_hw *hw = &pf->hw;
 804	u32 reg, num_tc = 1; /* VF has at least one traffic class */
 805	u16 vsi_id, qps;
 806	int i, j;
 807
 808	if (vf->adq_enabled)
 809		num_tc = vf->num_tc;
 810
 811	for (i = 0; i < num_tc; i++) {
 812		if (vf->adq_enabled) {
 813			qps = vf->ch[i].num_qps;
 814			vsi_id =  vf->ch[i].vsi_id;
 815		} else {
 816			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 817			vsi_id = vf->lan_vsi_id;
 818		}
 819
 820		for (j = 0; j < 7; j++) {
 821			if (j * 2 >= qps) {
 822				/* end of list */
 823				reg = 0x07FF07FF;
 824			} else {
 825				u16 qid = i40e_vc_get_pf_queue_id(vf,
 826								  vsi_id,
 827								  j * 2);
 828				reg = qid;
 829				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
 830							      (j * 2) + 1);
 831				reg |= qid << 16;
 832			}
 833			i40e_write_rx_ctl(hw,
 834					  I40E_VSILAN_QTABLE(j, vsi_id),
 835					  reg);
 836		}
 837	}
 838}
 839
 840/**
 841 * i40e_map_pf_to_vf_queues
 842 * @vf: pointer to the VF info
 843 *
 844 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 845 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
 846 **/
 847static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
 848{
 849	struct i40e_pf *pf = vf->pf;
 850	struct i40e_hw *hw = &pf->hw;
 851	u32 reg, total_qps = 0;
 852	u32 qps, num_tc = 1; /* VF has at least one traffic class */
 853	u16 vsi_id, qid;
 854	int i, j;
 855
 856	if (vf->adq_enabled)
 857		num_tc = vf->num_tc;
 858
 859	for (i = 0; i < num_tc; i++) {
 860		if (vf->adq_enabled) {
 861			qps = vf->ch[i].num_qps;
 862			vsi_id =  vf->ch[i].vsi_id;
 863		} else {
 864			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 865			vsi_id = vf->lan_vsi_id;
 866		}
 867
 868		for (j = 0; j < qps; j++) {
 869			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
 870
 871			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 872			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
 873			     reg);
 874			total_qps++;
 875		}
 876	}
 877}
 878
 879/**
 880 * i40e_enable_vf_mappings
 881 * @vf: pointer to the VF info
 882 *
 883 * enable VF mappings
 884 **/
 885static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 886{
 887	struct i40e_pf *pf = vf->pf;
 888	struct i40e_hw *hw = &pf->hw;
 889	u32 reg;
 890
 891	/* Tell the hardware we're using noncontiguous mapping. HW requires
 892	 * that VF queues be mapped using this method, even when they are
 893	 * contiguous in real life
 894	 */
 895	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 896			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 897
 898	/* enable VF vplan_qtable mappings */
 899	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 900	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 901
 902	i40e_map_pf_to_vf_queues(vf);
 903	i40e_map_pf_queues_to_vsi(vf);
 904
 905	i40e_flush(hw);
 906}
 907
 908/**
 909 * i40e_disable_vf_mappings
 910 * @vf: pointer to the VF info
 911 *
 912 * disable VF mappings
 913 **/
 914static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 915{
 916	struct i40e_pf *pf = vf->pf;
 917	struct i40e_hw *hw = &pf->hw;
 918	int i;
 919
 920	/* disable qp mappings */
 921	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 922	for (i = 0; i < I40E_MAX_VSI_QP; i++)
 923		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 924		     I40E_QUEUE_END_OF_LIST);
 925	i40e_flush(hw);
 926}
 927
 928/**
 929 * i40e_free_vf_res
 930 * @vf: pointer to the VF info
 931 *
 932 * free VF resources
 933 **/
 934static void i40e_free_vf_res(struct i40e_vf *vf)
 935{
 936	struct i40e_pf *pf = vf->pf;
 937	struct i40e_hw *hw = &pf->hw;
 938	u32 reg_idx, reg;
 939	int i, j, msix_vf;
 940
 941	/* Start by disabling VF's configuration API to prevent the OS from
 942	 * accessing the VF's VSI after it's freed / invalidated.
 943	 */
 944	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
 945
 946	/* It's possible the VF had requeuested more queues than the default so
 947	 * do the accounting here when we're about to free them.
 948	 */
 949	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
 950		pf->queues_left += vf->num_queue_pairs -
 951				   I40E_DEFAULT_QUEUES_PER_VF;
 952	}
 953
 954	/* free vsi & disconnect it from the parent uplink */
 955	if (vf->lan_vsi_idx) {
 956		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 957		vf->lan_vsi_idx = 0;
 958		vf->lan_vsi_id = 0;
 959		vf->num_mac = 0;
 960	}
 961
 962	/* do the accounting and remove additional ADq VSI's */
 963	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
 964		for (j = 0; j < vf->num_tc; j++) {
 965			/* At this point VSI0 is already released so don't
 966			 * release it again and only clear their values in
 967			 * structure variables
 968			 */
 969			if (j)
 970				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
 971			vf->ch[j].vsi_idx = 0;
 972			vf->ch[j].vsi_id = 0;
 973		}
 974	}
 975	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 976
 977	/* disable interrupts so the VF starts in a known state */
 978	for (i = 0; i < msix_vf; i++) {
 979		/* format is same for both registers */
 980		if (0 == i)
 981			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 982		else
 983			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 984						      (vf->vf_id))
 985						     + (i - 1));
 986		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 987		i40e_flush(hw);
 988	}
 989
 990	/* clear the irq settings */
 991	for (i = 0; i < msix_vf; i++) {
 992		/* format is same for both registers */
 993		if (0 == i)
 994			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 995		else
 996			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 997						      (vf->vf_id))
 998						     + (i - 1));
 999		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1000		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1001		wr32(hw, reg_idx, reg);
1002		i40e_flush(hw);
1003	}
1004	/* reset some of the state variables keeping track of the resources */
1005	vf->num_queue_pairs = 0;
1006	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1007	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1008}
1009
1010/**
1011 * i40e_alloc_vf_res
1012 * @vf: pointer to the VF info
1013 *
1014 * allocate VF resources
1015 **/
1016static int i40e_alloc_vf_res(struct i40e_vf *vf)
1017{
1018	struct i40e_pf *pf = vf->pf;
1019	int total_queue_pairs = 0;
1020	int ret, idx;
1021
1022	if (vf->num_req_queues &&
1023	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1024		pf->num_vf_qps = vf->num_req_queues;
1025	else
1026		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1027
1028	/* allocate hw vsi context & associated resources */
1029	ret = i40e_alloc_vsi_res(vf, 0);
1030	if (ret)
1031		goto error_alloc;
1032	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1033
1034	/* allocate additional VSIs based on tc information for ADq */
1035	if (vf->adq_enabled) {
1036		if (pf->queues_left >=
1037		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1038			/* TC 0 always belongs to VF VSI */
1039			for (idx = 1; idx < vf->num_tc; idx++) {
1040				ret = i40e_alloc_vsi_res(vf, idx);
1041				if (ret)
1042					goto error_alloc;
1043			}
1044			/* send correct number of queues */
1045			total_queue_pairs = I40E_MAX_VF_QUEUES;
1046		} else {
1047			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1048				 vf->vf_id);
1049			vf->adq_enabled = false;
1050		}
1051	}
1052
1053	/* We account for each VF to get a default number of queue pairs.  If
1054	 * the VF has now requested more, we need to account for that to make
1055	 * certain we never request more queues than we actually have left in
1056	 * HW.
1057	 */
1058	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1059		pf->queues_left -=
1060			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1061
1062	if (vf->trusted)
1063		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1064	else
1065		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1066
1067	/* store the total qps number for the runtime
1068	 * VF req validation
1069	 */
1070	vf->num_queue_pairs = total_queue_pairs;
1071
1072	/* VF is now completely initialized */
1073	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1074
1075error_alloc:
1076	if (ret)
1077		i40e_free_vf_res(vf);
1078
1079	return ret;
1080}
1081
1082#define VF_DEVICE_STATUS 0xAA
1083#define VF_TRANS_PENDING_MASK 0x20
1084/**
1085 * i40e_quiesce_vf_pci
1086 * @vf: pointer to the VF structure
1087 *
1088 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1089 * if the transactions never clear.
1090 **/
1091static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1092{
1093	struct i40e_pf *pf = vf->pf;
1094	struct i40e_hw *hw = &pf->hw;
1095	int vf_abs_id, i;
1096	u32 reg;
1097
1098	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1099
1100	wr32(hw, I40E_PF_PCI_CIAA,
1101	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1102	for (i = 0; i < 100; i++) {
1103		reg = rd32(hw, I40E_PF_PCI_CIAD);
1104		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1105			return 0;
1106		udelay(1);
1107	}
1108	return -EIO;
1109}
1110
1111/**
1112 * i40e_trigger_vf_reset
1113 * @vf: pointer to the VF structure
1114 * @flr: VFLR was issued or not
1115 *
1116 * Trigger hardware to start a reset for a particular VF. Expects the caller
1117 * to wait the proper amount of time to allow hardware to reset the VF before
1118 * it cleans up and restores VF functionality.
1119 **/
1120static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1121{
1122	struct i40e_pf *pf = vf->pf;
1123	struct i40e_hw *hw = &pf->hw;
1124	u32 reg, reg_idx, bit_idx;
1125
1126	/* warn the VF */
1127	clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1128
1129	/* Disable VF's configuration API during reset. The flag is re-enabled
1130	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1131	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1132	 * to do it earlier to give some time to finish to any VF config
1133	 * functions that may still be running at this point.
1134	 */
1135	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1136
1137	/* In the case of a VFLR, the HW has already reset the VF and we
1138	 * just need to clean up, so don't hit the VFRTRIG register.
1139	 */
1140	if (!flr) {
1141		/* reset VF using VPGEN_VFRTRIG reg */
1142		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1143		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1144		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1145		i40e_flush(hw);
1146	}
1147	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1148	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1149	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1150	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1151	i40e_flush(hw);
1152
1153	if (i40e_quiesce_vf_pci(vf))
1154		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1155			vf->vf_id);
1156}
1157
1158/**
1159 * i40e_cleanup_reset_vf
1160 * @vf: pointer to the VF structure
1161 *
1162 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1163 * have verified whether the reset is finished properly, and ensure the
1164 * minimum amount of wait time has passed.
1165 **/
1166static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1167{
1168	struct i40e_pf *pf = vf->pf;
1169	struct i40e_hw *hw = &pf->hw;
1170	u32 reg;
1171
1172	/* free VF resources to begin resetting the VSI state */
1173	i40e_free_vf_res(vf);
1174
1175	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1176	 * By doing this we allow HW to access VF memory at any point. If we
1177	 * did it any sooner, HW could access memory while it was being freed
1178	 * in i40e_free_vf_res(), causing an IOMMU fault.
1179	 *
1180	 * On the other hand, this needs to be done ASAP, because the VF driver
1181	 * is waiting for this to happen and may report a timeout. It's
1182	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1183	 * it.
1184	 */
1185	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1186	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1187	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1188
1189	/* reallocate VF resources to finish resetting the VSI state */
1190	if (!i40e_alloc_vf_res(vf)) {
1191		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1192		i40e_enable_vf_mappings(vf);
1193		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1194		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1195		/* Do not notify the client during VF init */
1196		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1197					&vf->vf_states))
1198			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1199		vf->num_vlan = 0;
1200	}
1201
1202	/* Tell the VF driver the reset is done. This needs to be done only
1203	 * after VF has been fully initialized, because the VF driver may
1204	 * request resources immediately after setting this flag.
1205	 */
1206	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1207}
1208
1209/**
1210 * i40e_reset_vf
1211 * @vf: pointer to the VF structure
1212 * @flr: VFLR was issued or not
1213 *
1214 * Returns true if the VF is reset, false otherwise.
1215 **/
1216bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1217{
1218	struct i40e_pf *pf = vf->pf;
1219	struct i40e_hw *hw = &pf->hw;
1220	bool rsd = false;
1221	u32 reg;
1222	int i;
1223
1224	/* If the VFs have been disabled, this means something else is
1225	 * resetting the VF, so we shouldn't continue.
1226	 */
1227	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1228		return false;
1229
1230	i40e_trigger_vf_reset(vf, flr);
1231
1232	/* poll VPGEN_VFRSTAT reg to make sure
1233	 * that reset is complete
1234	 */
1235	for (i = 0; i < 10; i++) {
1236		/* VF reset requires driver to first reset the VF and then
1237		 * poll the status register to make sure that the reset
1238		 * completed successfully. Due to internal HW FIFO flushes,
1239		 * we must wait 10ms before the register will be valid.
1240		 */
1241		usleep_range(10000, 20000);
1242		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1243		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1244			rsd = true;
1245			break;
1246		}
1247	}
1248
1249	if (flr)
1250		usleep_range(10000, 20000);
1251
1252	if (!rsd)
1253		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1254			vf->vf_id);
1255	usleep_range(10000, 20000);
1256
1257	/* On initial reset, we don't have any queues to disable */
1258	if (vf->lan_vsi_idx != 0)
1259		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1260
1261	i40e_cleanup_reset_vf(vf);
1262
1263	i40e_flush(hw);
1264	clear_bit(__I40E_VF_DISABLE, pf->state);
1265
1266	return true;
1267}
1268
1269/**
1270 * i40e_reset_all_vfs
1271 * @pf: pointer to the PF structure
1272 * @flr: VFLR was issued or not
1273 *
1274 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1275 * VF, then do all the waiting in one chunk, and finally finish restoring each
1276 * VF after the wait. This is useful during PF routines which need to reset
1277 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1278 *
1279 * Returns true if any VFs were reset, and false otherwise.
1280 **/
1281bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1282{
1283	struct i40e_hw *hw = &pf->hw;
1284	struct i40e_vf *vf;
1285	int i, v;
1286	u32 reg;
1287
1288	/* If we don't have any VFs, then there is nothing to reset */
1289	if (!pf->num_alloc_vfs)
1290		return false;
1291
1292	/* If VFs have been disabled, there is no need to reset */
1293	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1294		return false;
1295
1296	/* Begin reset on all VFs at once */
1297	for (v = 0; v < pf->num_alloc_vfs; v++)
1298		i40e_trigger_vf_reset(&pf->vf[v], flr);
1299
1300	/* HW requires some time to make sure it can flush the FIFO for a VF
1301	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1302	 * sequence to make sure that it has completed. We'll keep track of
1303	 * the VFs using a simple iterator that increments once that VF has
1304	 * finished resetting.
1305	 */
1306	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1307		usleep_range(10000, 20000);
1308
1309		/* Check each VF in sequence, beginning with the VF to fail
1310		 * the previous check.
1311		 */
1312		while (v < pf->num_alloc_vfs) {
1313			vf = &pf->vf[v];
1314			reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1315			if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1316				break;
1317
1318			/* If the current VF has finished resetting, move on
1319			 * to the next VF in sequence.
1320			 */
1321			v++;
1322		}
1323	}
1324
1325	if (flr)
1326		usleep_range(10000, 20000);
1327
1328	/* Display a warning if at least one VF didn't manage to reset in
1329	 * time, but continue on with the operation.
1330	 */
1331	if (v < pf->num_alloc_vfs)
1332		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1333			pf->vf[v].vf_id);
1334	usleep_range(10000, 20000);
1335
1336	/* Begin disabling all the rings associated with VFs, but do not wait
1337	 * between each VF.
1338	 */
1339	for (v = 0; v < pf->num_alloc_vfs; v++) {
1340		/* On initial reset, we don't have any queues to disable */
1341		if (pf->vf[v].lan_vsi_idx == 0)
1342			continue;
1343
1344		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1345	}
1346
1347	/* Now that we've notified HW to disable all of the VF rings, wait
1348	 * until they finish.
1349	 */
1350	for (v = 0; v < pf->num_alloc_vfs; v++) {
1351		/* On initial reset, we don't have any queues to disable */
1352		if (pf->vf[v].lan_vsi_idx == 0)
1353			continue;
1354
1355		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1356	}
1357
1358	/* Hw may need up to 50ms to finish disabling the RX queues. We
1359	 * minimize the wait by delaying only once for all VFs.
1360	 */
1361	mdelay(50);
1362
1363	/* Finish the reset on each VF */
1364	for (v = 0; v < pf->num_alloc_vfs; v++)
1365		i40e_cleanup_reset_vf(&pf->vf[v]);
1366
1367	i40e_flush(hw);
1368	clear_bit(__I40E_VF_DISABLE, pf->state);
1369
1370	return true;
1371}
1372
1373/**
1374 * i40e_free_vfs
1375 * @pf: pointer to the PF structure
1376 *
1377 * free VF resources
1378 **/
1379void i40e_free_vfs(struct i40e_pf *pf)
1380{
1381	struct i40e_hw *hw = &pf->hw;
1382	u32 reg_idx, bit_idx;
1383	int i, tmp, vf_id;
1384
1385	if (!pf->vf)
1386		return;
1387	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1388		usleep_range(1000, 2000);
1389
1390	i40e_notify_client_of_vf_enable(pf, 0);
1391
1392	/* Amortize wait time by stopping all VFs at the same time */
1393	for (i = 0; i < pf->num_alloc_vfs; i++) {
1394		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1395			continue;
1396
1397		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1398	}
1399
1400	for (i = 0; i < pf->num_alloc_vfs; i++) {
1401		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1402			continue;
1403
1404		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1405	}
1406
1407	/* Disable IOV before freeing resources. This lets any VF drivers
1408	 * running in the host get themselves cleaned up before we yank
1409	 * the carpet out from underneath their feet.
1410	 */
1411	if (!pci_vfs_assigned(pf->pdev))
1412		pci_disable_sriov(pf->pdev);
1413	else
1414		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1415
1416	/* free up VF resources */
1417	tmp = pf->num_alloc_vfs;
1418	pf->num_alloc_vfs = 0;
1419	for (i = 0; i < tmp; i++) {
1420		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1421			i40e_free_vf_res(&pf->vf[i]);
1422		/* disable qp mappings */
1423		i40e_disable_vf_mappings(&pf->vf[i]);
1424	}
1425
1426	kfree(pf->vf);
1427	pf->vf = NULL;
1428
1429	/* This check is for when the driver is unloaded while VFs are
1430	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1431	 * before this function ever gets called.
1432	 */
1433	if (!pci_vfs_assigned(pf->pdev)) {
1434		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1435		 * work correctly when SR-IOV gets re-enabled.
1436		 */
1437		for (vf_id = 0; vf_id < tmp; vf_id++) {
1438			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1439			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1440			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1441		}
1442	}
1443	clear_bit(__I40E_VF_DISABLE, pf->state);
1444}
1445
1446#ifdef CONFIG_PCI_IOV
1447/**
1448 * i40e_alloc_vfs
1449 * @pf: pointer to the PF structure
1450 * @num_alloc_vfs: number of VFs to allocate
1451 *
1452 * allocate VF resources
1453 **/
1454int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1455{
1456	struct i40e_vf *vfs;
1457	int i, ret = 0;
1458
1459	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1460	i40e_irq_dynamic_disable_icr0(pf);
1461
1462	/* Check to see if we're just allocating resources for extant VFs */
1463	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1464		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1465		if (ret) {
1466			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1467			pf->num_alloc_vfs = 0;
1468			goto err_iov;
1469		}
1470	}
1471	/* allocate memory */
1472	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1473	if (!vfs) {
1474		ret = -ENOMEM;
1475		goto err_alloc;
1476	}
1477	pf->vf = vfs;
1478
1479	/* apply default profile */
1480	for (i = 0; i < num_alloc_vfs; i++) {
1481		vfs[i].pf = pf;
1482		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1483		vfs[i].vf_id = i;
1484
1485		/* assign default capabilities */
1486		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1487		vfs[i].spoofchk = true;
1488
1489		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1490
1491	}
1492	pf->num_alloc_vfs = num_alloc_vfs;
1493
1494	/* VF resources get allocated during reset */
1495	i40e_reset_all_vfs(pf, false);
1496
1497	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1498
1499err_alloc:
1500	if (ret)
1501		i40e_free_vfs(pf);
1502err_iov:
1503	/* Re-enable interrupt 0. */
1504	i40e_irq_dynamic_enable_icr0(pf);
1505	return ret;
1506}
1507
1508#endif
1509/**
1510 * i40e_pci_sriov_enable
1511 * @pdev: pointer to a pci_dev structure
1512 * @num_vfs: number of VFs to allocate
1513 *
1514 * Enable or change the number of VFs
1515 **/
1516static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1517{
1518#ifdef CONFIG_PCI_IOV
1519	struct i40e_pf *pf = pci_get_drvdata(pdev);
1520	int pre_existing_vfs = pci_num_vf(pdev);
1521	int err = 0;
1522
1523	if (test_bit(__I40E_TESTING, pf->state)) {
1524		dev_warn(&pdev->dev,
1525			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1526		err = -EPERM;
1527		goto err_out;
1528	}
1529
1530	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1531		i40e_free_vfs(pf);
1532	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1533		goto out;
1534
1535	if (num_vfs > pf->num_req_vfs) {
1536		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1537			 num_vfs, pf->num_req_vfs);
1538		err = -EPERM;
1539		goto err_out;
1540	}
1541
1542	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1543	err = i40e_alloc_vfs(pf, num_vfs);
1544	if (err) {
1545		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1546		goto err_out;
1547	}
1548
1549out:
1550	return num_vfs;
1551
1552err_out:
1553	return err;
1554#endif
1555	return 0;
1556}
1557
1558/**
1559 * i40e_pci_sriov_configure
1560 * @pdev: pointer to a pci_dev structure
1561 * @num_vfs: number of VFs to allocate
1562 *
1563 * Enable or change the number of VFs. Called when the user updates the number
1564 * of VFs in sysfs.
1565 **/
1566int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1567{
1568	struct i40e_pf *pf = pci_get_drvdata(pdev);
1569
1570	if (num_vfs) {
1571		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1572			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1573			i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1574		}
1575		return i40e_pci_sriov_enable(pdev, num_vfs);
1576	}
1577
1578	if (!pci_vfs_assigned(pf->pdev)) {
1579		i40e_free_vfs(pf);
1580		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1581		i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1582	} else {
1583		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1584		return -EINVAL;
1585	}
1586	return 0;
1587}
1588
1589/***********************virtual channel routines******************/
1590
1591/**
1592 * i40e_vc_send_msg_to_vf
1593 * @vf: pointer to the VF info
1594 * @v_opcode: virtual channel opcode
1595 * @v_retval: virtual channel return value
1596 * @msg: pointer to the msg buffer
1597 * @msglen: msg length
1598 *
1599 * send msg to VF
1600 **/
1601static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1602				  u32 v_retval, u8 *msg, u16 msglen)
1603{
1604	struct i40e_pf *pf;
1605	struct i40e_hw *hw;
1606	int abs_vf_id;
1607	i40e_status aq_ret;
1608
1609	/* validate the request */
1610	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1611		return -EINVAL;
1612
1613	pf = vf->pf;
1614	hw = &pf->hw;
1615	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1616
1617	/* single place to detect unsuccessful return values */
1618	if (v_retval) {
1619		vf->num_invalid_msgs++;
1620		dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1621			 vf->vf_id, v_opcode, v_retval);
1622		if (vf->num_invalid_msgs >
1623		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1624			dev_err(&pf->pdev->dev,
1625				"Number of invalid messages exceeded for VF %d\n",
1626				vf->vf_id);
1627			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1628			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1629		}
1630	} else {
1631		vf->num_valid_msgs++;
1632		/* reset the invalid counter, if a valid message is received. */
1633		vf->num_invalid_msgs = 0;
1634	}
1635
1636	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1637					msg, msglen, NULL);
1638	if (aq_ret) {
1639		dev_info(&pf->pdev->dev,
1640			 "Unable to send the message to VF %d aq_err %d\n",
1641			 vf->vf_id, pf->hw.aq.asq_last_status);
1642		return -EIO;
1643	}
1644
1645	return 0;
1646}
1647
1648/**
1649 * i40e_vc_send_resp_to_vf
1650 * @vf: pointer to the VF info
1651 * @opcode: operation code
1652 * @retval: return value
1653 *
1654 * send resp msg to VF
1655 **/
1656static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1657				   enum virtchnl_ops opcode,
1658				   i40e_status retval)
1659{
1660	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1661}
1662
1663/**
1664 * i40e_vc_get_version_msg
1665 * @vf: pointer to the VF info
1666 *
1667 * called from the VF to request the API version used by the PF
1668 **/
1669static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1670{
1671	struct virtchnl_version_info info = {
1672		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1673	};
1674
1675	vf->vf_ver = *(struct virtchnl_version_info *)msg;
1676	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1677	if (VF_IS_V10(&vf->vf_ver))
1678		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1679	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1680				      I40E_SUCCESS, (u8 *)&info,
1681				      sizeof(struct virtchnl_version_info));
1682}
1683
1684/**
1685 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1686 * @vf: pointer to VF structure
1687 **/
1688static void i40e_del_qch(struct i40e_vf *vf)
1689{
1690	struct i40e_pf *pf = vf->pf;
1691	int i;
1692
1693	/* first element in the array belongs to primary VF VSI and we shouldn't
1694	 * delete it. We should however delete the rest of the VSIs created
1695	 */
1696	for (i = 1; i < vf->num_tc; i++) {
1697		if (vf->ch[i].vsi_idx) {
1698			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1699			vf->ch[i].vsi_idx = 0;
1700			vf->ch[i].vsi_id = 0;
1701		}
1702	}
1703}
1704
1705/**
1706 * i40e_vc_get_vf_resources_msg
1707 * @vf: pointer to the VF info
1708 * @msg: pointer to the msg buffer
1709 * @msglen: msg length
1710 *
1711 * called from the VF to request its resources
1712 **/
1713static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1714{
1715	struct virtchnl_vf_resource *vfres = NULL;
1716	struct i40e_pf *pf = vf->pf;
1717	i40e_status aq_ret = 0;
1718	struct i40e_vsi *vsi;
1719	int num_vsis = 1;
1720	int len = 0;
1721	int ret;
1722
1723	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1724		aq_ret = I40E_ERR_PARAM;
1725		goto err;
1726	}
1727
1728	len = (sizeof(struct virtchnl_vf_resource) +
1729	       sizeof(struct virtchnl_vsi_resource) * num_vsis);
1730
1731	vfres = kzalloc(len, GFP_KERNEL);
1732	if (!vfres) {
1733		aq_ret = I40E_ERR_NO_MEMORY;
1734		len = 0;
1735		goto err;
1736	}
1737	if (VF_IS_V11(&vf->vf_ver))
1738		vf->driver_caps = *(u32 *)msg;
1739	else
1740		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1741				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1742				  VIRTCHNL_VF_OFFLOAD_VLAN;
1743
1744	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1745	vsi = pf->vsi[vf->lan_vsi_idx];
1746	if (!vsi->info.pvid)
1747		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1748
1749	if (i40e_vf_client_capable(pf, vf->vf_id) &&
1750	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1751		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1752		set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1753	} else {
1754		clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1755	}
1756
1757	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1758		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1759	} else {
1760		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1761		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1762			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1763		else
1764			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1765	}
1766
1767	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1768		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1769			vfres->vf_cap_flags |=
1770				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1771	}
1772
1773	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1774		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1775
1776	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1777	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1778		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1779
1780	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1781		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1782			dev_err(&pf->pdev->dev,
1783				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1784				 vf->vf_id);
1785			aq_ret = I40E_ERR_PARAM;
1786			goto err;
1787		}
1788		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1789	}
1790
1791	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1792		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1793			vfres->vf_cap_flags |=
1794					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1795	}
1796
1797	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1798		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1799
1800	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1801		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1802
1803	vfres->num_vsis = num_vsis;
1804	vfres->num_queue_pairs = vf->num_queue_pairs;
1805	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1806	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1807	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1808
1809	if (vf->lan_vsi_idx) {
1810		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1811		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1812		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1813		/* VFs only use TC 0 */
1814		vfres->vsi_res[0].qset_handle
1815					  = le16_to_cpu(vsi->info.qs_handle[0]);
1816		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1817				vf->default_lan_addr.addr);
1818	}
1819	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1820
1821err:
1822	/* send the response back to the VF */
1823	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1824				     aq_ret, (u8 *)vfres, len);
1825
1826	kfree(vfres);
1827	return ret;
1828}
1829
1830/**
1831 * i40e_vc_reset_vf_msg
1832 * @vf: pointer to the VF info
1833 * @msg: pointer to the msg buffer
1834 * @msglen: msg length
1835 *
1836 * called from the VF to reset itself,
1837 * unlike other virtchnl messages, PF driver
1838 * doesn't send the response back to the VF
1839 **/
1840static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1841{
1842	if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1843		i40e_reset_vf(vf, false);
1844}
1845
1846/**
1847 * i40e_getnum_vf_vsi_vlan_filters
1848 * @vsi: pointer to the vsi
1849 *
1850 * called to get the number of VLANs offloaded on this VF
1851 **/
1852static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1853{
1854	struct i40e_mac_filter *f;
1855	int num_vlans = 0, bkt;
1856
1857	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1858		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1859			num_vlans++;
1860	}
1861
1862	return num_vlans;
1863}
1864
1865/**
1866 * i40e_vc_config_promiscuous_mode_msg
1867 * @vf: pointer to the VF info
1868 * @msg: pointer to the msg buffer
1869 * @msglen: msg length
1870 *
1871 * called from the VF to configure the promiscuous mode of
1872 * VF vsis
1873 **/
1874static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1875					       u8 *msg, u16 msglen)
1876{
1877	struct virtchnl_promisc_info *info =
1878	    (struct virtchnl_promisc_info *)msg;
1879	struct i40e_pf *pf = vf->pf;
1880	struct i40e_hw *hw = &pf->hw;
1881	struct i40e_mac_filter *f;
1882	i40e_status aq_ret = 0;
1883	bool allmulti = false;
1884	struct i40e_vsi *vsi;
1885	bool alluni = false;
1886	int aq_err = 0;
1887	int bkt;
1888
1889	vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1890	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1891	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1892	    !vsi) {
1893		aq_ret = I40E_ERR_PARAM;
1894		goto error_param;
1895	}
1896	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1897		dev_err(&pf->pdev->dev,
1898			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
1899			vf->vf_id);
1900		/* Lie to the VF on purpose. */
1901		aq_ret = 0;
1902		goto error_param;
1903	}
1904	/* Multicast promiscuous handling*/
1905	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1906		allmulti = true;
1907
1908	if (vf->port_vlan_id) {
1909		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1910							    allmulti,
1911							    vf->port_vlan_id,
1912							    NULL);
1913	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1914		hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1915			if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1916				continue;
1917			aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1918								    vsi->seid,
1919								    allmulti,
1920								    f->vlan,
1921								    NULL);
1922			aq_err = pf->hw.aq.asq_last_status;
1923			if (aq_ret) {
1924				dev_err(&pf->pdev->dev,
1925					"Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1926					f->vlan,
1927					i40e_stat_str(&pf->hw, aq_ret),
1928					i40e_aq_str(&pf->hw, aq_err));
1929				break;
1930			}
1931		}
1932	} else {
1933		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1934							       allmulti, NULL);
1935		aq_err = pf->hw.aq.asq_last_status;
1936		if (aq_ret) {
1937			dev_err(&pf->pdev->dev,
1938				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1939				vf->vf_id,
1940				i40e_stat_str(&pf->hw, aq_ret),
1941				i40e_aq_str(&pf->hw, aq_err));
1942			goto error_param;
1943		}
1944	}
1945
1946	if (!aq_ret) {
1947		dev_info(&pf->pdev->dev,
1948			 "VF %d successfully set multicast promiscuous mode\n",
1949			 vf->vf_id);
1950		if (allmulti)
1951			set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1952		else
1953			clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1954	}
1955
1956	if (info->flags & FLAG_VF_UNICAST_PROMISC)
1957		alluni = true;
1958	if (vf->port_vlan_id) {
1959		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1960							    alluni,
1961							    vf->port_vlan_id,
1962							    NULL);
1963	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1964		hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1965			if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1966				continue;
1967			aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1968								    vsi->seid,
1969								    alluni,
1970								    f->vlan,
1971								    NULL);
1972			aq_err = pf->hw.aq.asq_last_status;
1973			if (aq_ret)
1974				dev_err(&pf->pdev->dev,
1975					"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1976					f->vlan,
1977					i40e_stat_str(&pf->hw, aq_ret),
1978					i40e_aq_str(&pf->hw, aq_err));
1979		}
1980	} else {
1981		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1982							     alluni, NULL,
1983							     true);
1984		aq_err = pf->hw.aq.asq_last_status;
1985		if (aq_ret) {
1986			dev_err(&pf->pdev->dev,
1987				"VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1988				vf->vf_id, info->flags,
1989				i40e_stat_str(&pf->hw, aq_ret),
1990				i40e_aq_str(&pf->hw, aq_err));
1991			goto error_param;
1992		}
1993	}
1994
1995	if (!aq_ret) {
1996		dev_info(&pf->pdev->dev,
1997			 "VF %d successfully set unicast promiscuous mode\n",
1998			 vf->vf_id);
1999		if (alluni)
2000			set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2001		else
2002			clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2003	}
2004
2005error_param:
2006	/* send the response to the VF */
2007	return i40e_vc_send_resp_to_vf(vf,
2008				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2009				       aq_ret);
2010}
2011
2012/**
2013 * i40e_vc_config_queues_msg
2014 * @vf: pointer to the VF info
2015 * @msg: pointer to the msg buffer
2016 * @msglen: msg length
2017 *
2018 * called from the VF to configure the rx/tx
2019 * queues
2020 **/
2021static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2022{
2023	struct virtchnl_vsi_queue_config_info *qci =
2024	    (struct virtchnl_vsi_queue_config_info *)msg;
2025	struct virtchnl_queue_pair_info *qpi;
2026	struct i40e_pf *pf = vf->pf;
2027	u16 vsi_id, vsi_queue_id = 0;
2028	i40e_status aq_ret = 0;
2029	int i, j = 0, idx = 0;
2030
2031	vsi_id = qci->vsi_id;
2032
2033	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2034		aq_ret = I40E_ERR_PARAM;
2035		goto error_param;
2036	}
2037
2038	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2039		aq_ret = I40E_ERR_PARAM;
2040		goto error_param;
2041	}
2042
2043	for (i = 0; i < qci->num_queue_pairs; i++) {
2044		qpi = &qci->qpair[i];
2045
2046		if (!vf->adq_enabled) {
2047			vsi_queue_id = qpi->txq.queue_id;
2048
2049			if (qpi->txq.vsi_id != qci->vsi_id ||
2050			    qpi->rxq.vsi_id != qci->vsi_id ||
2051			    qpi->rxq.queue_id != vsi_queue_id) {
2052				aq_ret = I40E_ERR_PARAM;
2053				goto error_param;
2054			}
2055		}
2056
2057		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
2058			aq_ret = I40E_ERR_PARAM;
2059			goto error_param;
2060		}
2061
2062		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2063					     &qpi->rxq) ||
2064		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2065					     &qpi->txq)) {
2066			aq_ret = I40E_ERR_PARAM;
2067			goto error_param;
2068		}
2069
2070		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2071		 * VF does not know about these additional VSIs and all
2072		 * it cares is about its own queues. PF configures these queues
2073		 * to its appropriate VSIs based on TC mapping
2074		 **/
2075		if (vf->adq_enabled) {
2076			if (j == (vf->ch[idx].num_qps - 1)) {
2077				idx++;
2078				j = 0; /* resetting the queue count */
2079				vsi_queue_id = 0;
2080			} else {
2081				j++;
2082				vsi_queue_id++;
2083			}
2084			vsi_id = vf->ch[idx].vsi_id;
2085		}
2086	}
2087	/* set vsi num_queue_pairs in use to num configured by VF */
2088	if (!vf->adq_enabled) {
2089		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2090			qci->num_queue_pairs;
2091	} else {
2092		for (i = 0; i < vf->num_tc; i++)
2093			pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2094			       vf->ch[i].num_qps;
2095	}
2096
2097error_param:
2098	/* send the response to the VF */
2099	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2100				       aq_ret);
2101}
2102
2103/**
2104 * i40e_validate_queue_map
2105 * @vsi_id: vsi id
2106 * @queuemap: Tx or Rx queue map
2107 *
2108 * check if Tx or Rx queue map is valid
2109 **/
2110static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2111				   unsigned long queuemap)
2112{
2113	u16 vsi_queue_id, queue_id;
2114
2115	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2116		if (vf->adq_enabled) {
2117			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2118			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2119		} else {
2120			queue_id = vsi_queue_id;
2121		}
2122
2123		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2124			return -EINVAL;
2125	}
2126
2127	return 0;
2128}
2129
2130/**
2131 * i40e_vc_config_irq_map_msg
2132 * @vf: pointer to the VF info
2133 * @msg: pointer to the msg buffer
2134 * @msglen: msg length
2135 *
2136 * called from the VF to configure the irq to
2137 * queue map
2138 **/
2139static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2140{
2141	struct virtchnl_irq_map_info *irqmap_info =
2142	    (struct virtchnl_irq_map_info *)msg;
2143	struct virtchnl_vector_map *map;
2144	u16 vsi_id, vector_id;
2145	i40e_status aq_ret = 0;
2146	int i;
2147
2148	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2149		aq_ret = I40E_ERR_PARAM;
2150		goto error_param;
2151	}
2152
2153	for (i = 0; i < irqmap_info->num_vectors; i++) {
2154		map = &irqmap_info->vecmap[i];
2155		vector_id = map->vector_id;
2156		vsi_id = map->vsi_id;
2157		/* validate msg params */
2158		if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
2159		    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2160			aq_ret = I40E_ERR_PARAM;
2161			goto error_param;
2162		}
2163
2164		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2165			aq_ret = I40E_ERR_PARAM;
2166			goto error_param;
2167		}
2168
2169		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2170			aq_ret = I40E_ERR_PARAM;
2171			goto error_param;
2172		}
2173
2174		i40e_config_irq_link_list(vf, vsi_id, map);
2175	}
2176error_param:
2177	/* send the response to the VF */
2178	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2179				       aq_ret);
2180}
2181
2182/**
2183 * i40e_vc_enable_queues_msg
2184 * @vf: pointer to the VF info
2185 * @msg: pointer to the msg buffer
2186 * @msglen: msg length
2187 *
2188 * called from the VF to enable all or specific queue(s)
2189 **/
2190static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2191{
2192	struct virtchnl_queue_select *vqs =
2193	    (struct virtchnl_queue_select *)msg;
2194	struct i40e_pf *pf = vf->pf;
2195	u16 vsi_id = vqs->vsi_id;
2196	i40e_status aq_ret = 0;
2197	int i;
2198
2199	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2200		aq_ret = I40E_ERR_PARAM;
2201		goto error_param;
2202	}
2203
2204	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2205		aq_ret = I40E_ERR_PARAM;
2206		goto error_param;
2207	}
2208
2209	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2210		aq_ret = I40E_ERR_PARAM;
2211		goto error_param;
2212	}
2213
2214	if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
2215		aq_ret = I40E_ERR_TIMEOUT;
2216
2217	/* need to start the rings for additional ADq VSI's as well */
2218	if (vf->adq_enabled) {
2219		/* zero belongs to LAN VSI */
2220		for (i = 1; i < vf->num_tc; i++) {
2221			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2222				aq_ret = I40E_ERR_TIMEOUT;
2223		}
2224	}
2225
2226error_param:
2227	/* send the response to the VF */
2228	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2229				       aq_ret);
2230}
2231
2232/**
2233 * i40e_vc_disable_queues_msg
2234 * @vf: pointer to the VF info
2235 * @msg: pointer to the msg buffer
2236 * @msglen: msg length
2237 *
2238 * called from the VF to disable all or specific
2239 * queue(s)
2240 **/
2241static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2242{
2243	struct virtchnl_queue_select *vqs =
2244	    (struct virtchnl_queue_select *)msg;
2245	struct i40e_pf *pf = vf->pf;
2246	i40e_status aq_ret = 0;
2247
2248	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2249		aq_ret = I40E_ERR_PARAM;
2250		goto error_param;
2251	}
2252
2253	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2254		aq_ret = I40E_ERR_PARAM;
2255		goto error_param;
2256	}
2257
2258	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2259		aq_ret = I40E_ERR_PARAM;
2260		goto error_param;
2261	}
2262
2263	i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
2264
2265error_param:
2266	/* send the response to the VF */
2267	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2268				       aq_ret);
2269}
2270
2271/**
2272 * i40e_vc_request_queues_msg
2273 * @vf: pointer to the VF info
2274 * @msg: pointer to the msg buffer
2275 * @msglen: msg length
2276 *
2277 * VFs get a default number of queues but can use this message to request a
2278 * different number.  If the request is successful, PF will reset the VF and
2279 * return 0.  If unsuccessful, PF will send message informing VF of number of
2280 * available queues and return result of sending VF a message.
2281 **/
2282static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2283{
2284	struct virtchnl_vf_res_request *vfres =
2285		(struct virtchnl_vf_res_request *)msg;
2286	int req_pairs = vfres->num_queue_pairs;
2287	int cur_pairs = vf->num_queue_pairs;
2288	struct i40e_pf *pf = vf->pf;
2289
2290	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2291		return -EINVAL;
2292
2293	if (req_pairs <= 0) {
2294		dev_err(&pf->pdev->dev,
2295			"VF %d tried to request %d queues.  Ignoring.\n",
2296			vf->vf_id, req_pairs);
2297	} else if (req_pairs > I40E_MAX_VF_QUEUES) {
2298		dev_err(&pf->pdev->dev,
2299			"VF %d tried to request more than %d queues.\n",
2300			vf->vf_id,
2301			I40E_MAX_VF_QUEUES);
2302		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2303	} else if (req_pairs - cur_pairs > pf->queues_left) {
2304		dev_warn(&pf->pdev->dev,
2305			 "VF %d requested %d more queues, but only %d left.\n",
2306			 vf->vf_id,
2307			 req_pairs - cur_pairs,
2308			 pf->queues_left);
2309		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2310	} else {
2311		/* successful request */
2312		vf->num_req_queues = req_pairs;
2313		i40e_vc_notify_vf_reset(vf);
2314		i40e_reset_vf(vf, false);
2315		return 0;
2316	}
2317
2318	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2319				      (u8 *)vfres, sizeof(*vfres));
2320}
2321
2322/**
2323 * i40e_vc_get_stats_msg
2324 * @vf: pointer to the VF info
2325 * @msg: pointer to the msg buffer
2326 * @msglen: msg length
2327 *
2328 * called from the VF to get vsi stats
2329 **/
2330static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2331{
2332	struct virtchnl_queue_select *vqs =
2333	    (struct virtchnl_queue_select *)msg;
2334	struct i40e_pf *pf = vf->pf;
2335	struct i40e_eth_stats stats;
2336	i40e_status aq_ret = 0;
2337	struct i40e_vsi *vsi;
2338
2339	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2340
2341	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2342		aq_ret = I40E_ERR_PARAM;
2343		goto error_param;
2344	}
2345
2346	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2347		aq_ret = I40E_ERR_PARAM;
2348		goto error_param;
2349	}
2350
2351	vsi = pf->vsi[vf->lan_vsi_idx];
2352	if (!vsi) {
2353		aq_ret = I40E_ERR_PARAM;
2354		goto error_param;
2355	}
2356	i40e_update_eth_stats(vsi);
2357	stats = vsi->eth_stats;
2358
2359error_param:
2360	/* send the response back to the VF */
2361	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2362				      (u8 *)&stats, sizeof(stats));
2363}
2364
2365/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
2366#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
2367#define I40E_VC_MAX_VLAN_PER_VF 8
2368
2369/**
2370 * i40e_check_vf_permission
2371 * @vf: pointer to the VF info
2372 * @al: MAC address list from virtchnl
2373 *
2374 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2375 * if any address in the list is not valid. Checks the following conditions:
2376 *
2377 * 1) broadcast and zero addresses are never valid
2378 * 2) unicast addresses are not allowed if the VMM has administratively set
2379 *    the VF MAC address, unless the VF is marked as privileged.
2380 * 3) There is enough space to add all the addresses.
2381 *
2382 * Note that to guarantee consistency, it is expected this function be called
2383 * while holding the mac_filter_hash_lock, as otherwise the current number of
2384 * addresses might not be accurate.
2385 **/
2386static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2387					   struct virtchnl_ether_addr_list *al)
2388{
2389	struct i40e_pf *pf = vf->pf;
2390	int i;
2391
2392	/* If this VF is not privileged, then we can't add more than a limited
2393	 * number of addresses. Check to make sure that the additions do not
2394	 * push us over the limit.
2395	 */
2396	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2397	    (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2398		dev_err(&pf->pdev->dev,
2399			"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2400		return -EPERM;
2401	}
2402
2403	for (i = 0; i < al->num_elements; i++) {
2404		u8 *addr = al->list[i].addr;
2405
2406		if (is_broadcast_ether_addr(addr) ||
2407		    is_zero_ether_addr(addr)) {
2408			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2409				addr);
2410			return I40E_ERR_INVALID_MAC_ADDR;
2411		}
2412
2413		/* If the host VMM administrator has set the VF MAC address
2414		 * administratively via the ndo_set_vf_mac command then deny
2415		 * permission to the VF to add or delete unicast MAC addresses.
2416		 * Unless the VF is privileged and then it can do whatever.
2417		 * The VF may request to set the MAC address filter already
2418		 * assigned to it so do not return an error in that case.
2419		 */
2420		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2421		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2422		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2423			dev_err(&pf->pdev->dev,
2424				"VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2425			return -EPERM;
2426		}
2427	}
2428
2429	return 0;
2430}
2431
2432/**
2433 * i40e_vc_add_mac_addr_msg
2434 * @vf: pointer to the VF info
2435 * @msg: pointer to the msg buffer
2436 * @msglen: msg length
2437 *
2438 * add guest mac address filter
2439 **/
2440static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2441{
2442	struct virtchnl_ether_addr_list *al =
2443	    (struct virtchnl_ether_addr_list *)msg;
2444	struct i40e_pf *pf = vf->pf;
2445	struct i40e_vsi *vsi = NULL;
2446	u16 vsi_id = al->vsi_id;
2447	i40e_status ret = 0;
2448	int i;
2449
2450	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2451	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2452		ret = I40E_ERR_PARAM;
2453		goto error_param;
2454	}
2455
2456	vsi = pf->vsi[vf->lan_vsi_idx];
2457
2458	/* Lock once, because all function inside for loop accesses VSI's
2459	 * MAC filter list which needs to be protected using same lock.
2460	 */
2461	spin_lock_bh(&vsi->mac_filter_hash_lock);
2462
2463	ret = i40e_check_vf_permission(vf, al);
2464	if (ret) {
2465		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2466		goto error_param;
2467	}
2468
2469	/* add new addresses to the list */
2470	for (i = 0; i < al->num_elements; i++) {
2471		struct i40e_mac_filter *f;
2472
2473		f = i40e_find_mac(vsi, al->list[i].addr);
2474		if (!f) {
2475			f = i40e_add_mac_filter(vsi, al->list[i].addr);
2476
2477			if (!f) {
2478				dev_err(&pf->pdev->dev,
2479					"Unable to add MAC filter %pM for VF %d\n",
2480					al->list[i].addr, vf->vf_id);
2481				ret = I40E_ERR_PARAM;
2482				spin_unlock_bh(&vsi->mac_filter_hash_lock);
2483				goto error_param;
2484			} else {
2485				vf->num_mac++;
2486			}
2487		}
2488	}
2489	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2490
2491	/* program the updated filter list */
2492	ret = i40e_sync_vsi_filters(vsi);
2493	if (ret)
2494		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2495			vf->vf_id, ret);
2496
2497error_param:
2498	/* send the response to the VF */
2499	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2500				       ret);
2501}
2502
2503/**
2504 * i40e_vc_del_mac_addr_msg
2505 * @vf: pointer to the VF info
2506 * @msg: pointer to the msg buffer
2507 * @msglen: msg length
2508 *
2509 * remove guest mac address filter
2510 **/
2511static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2512{
2513	struct virtchnl_ether_addr_list *al =
2514	    (struct virtchnl_ether_addr_list *)msg;
2515	struct i40e_pf *pf = vf->pf;
2516	struct i40e_vsi *vsi = NULL;
2517	u16 vsi_id = al->vsi_id;
2518	i40e_status ret = 0;
2519	int i;
2520
2521	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2522	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2523		ret = I40E_ERR_PARAM;
2524		goto error_param;
2525	}
2526
2527	for (i = 0; i < al->num_elements; i++) {
2528		if (is_broadcast_ether_addr(al->list[i].addr) ||
2529		    is_zero_ether_addr(al->list[i].addr)) {
2530			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2531				al->list[i].addr, vf->vf_id);
2532			ret = I40E_ERR_INVALID_MAC_ADDR;
2533			goto error_param;
2534		}
2535	}
2536	vsi = pf->vsi[vf->lan_vsi_idx];
2537
2538	spin_lock_bh(&vsi->mac_filter_hash_lock);
2539	/* delete addresses from the list */
2540	for (i = 0; i < al->num_elements; i++)
2541		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2542			ret = I40E_ERR_INVALID_MAC_ADDR;
2543			spin_unlock_bh(&vsi->mac_filter_hash_lock);
2544			goto error_param;
2545		} else {
2546			vf->num_mac--;
2547		}
2548
2549	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2550
2551	/* program the updated filter list */
2552	ret = i40e_sync_vsi_filters(vsi);
2553	if (ret)
2554		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2555			vf->vf_id, ret);
2556
2557error_param:
2558	/* send the response to the VF */
2559	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2560				       ret);
2561}
2562
2563/**
2564 * i40e_vc_add_vlan_msg
2565 * @vf: pointer to the VF info
2566 * @msg: pointer to the msg buffer
2567 * @msglen: msg length
2568 *
2569 * program guest vlan id
2570 **/
2571static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2572{
2573	struct virtchnl_vlan_filter_list *vfl =
2574	    (struct virtchnl_vlan_filter_list *)msg;
2575	struct i40e_pf *pf = vf->pf;
2576	struct i40e_vsi *vsi = NULL;
2577	u16 vsi_id = vfl->vsi_id;
2578	i40e_status aq_ret = 0;
2579	int i;
2580
2581	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2582	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2583		dev_err(&pf->pdev->dev,
2584			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2585		goto error_param;
2586	}
2587	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2588	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2589		aq_ret = I40E_ERR_PARAM;
2590		goto error_param;
2591	}
2592
2593	for (i = 0; i < vfl->num_elements; i++) {
2594		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2595			aq_ret = I40E_ERR_PARAM;
2596			dev_err(&pf->pdev->dev,
2597				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2598			goto error_param;
2599		}
2600	}
2601	vsi = pf->vsi[vf->lan_vsi_idx];
2602	if (vsi->info.pvid) {
2603		aq_ret = I40E_ERR_PARAM;
2604		goto error_param;
2605	}
2606
2607	i40e_vlan_stripping_enable(vsi);
2608	for (i = 0; i < vfl->num_elements; i++) {
2609		/* add new VLAN filter */
2610		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2611		if (!ret)
2612			vf->num_vlan++;
2613
2614		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2615			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2616							   true,
2617							   vfl->vlan_id[i],
2618							   NULL);
2619		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2620			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2621							   true,
2622							   vfl->vlan_id[i],
2623							   NULL);
2624
2625		if (ret)
2626			dev_err(&pf->pdev->dev,
2627				"Unable to add VLAN filter %d for VF %d, error %d\n",
2628				vfl->vlan_id[i], vf->vf_id, ret);
2629	}
2630
2631error_param:
2632	/* send the response to the VF */
2633	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2634}
2635
2636/**
2637 * i40e_vc_remove_vlan_msg
2638 * @vf: pointer to the VF info
2639 * @msg: pointer to the msg buffer
2640 * @msglen: msg length
2641 *
2642 * remove programmed guest vlan id
2643 **/
2644static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2645{
2646	struct virtchnl_vlan_filter_list *vfl =
2647	    (struct virtchnl_vlan_filter_list *)msg;
2648	struct i40e_pf *pf = vf->pf;
2649	struct i40e_vsi *vsi = NULL;
2650	u16 vsi_id = vfl->vsi_id;
2651	i40e_status aq_ret = 0;
2652	int i;
2653
2654	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2655	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2656		aq_ret = I40E_ERR_PARAM;
2657		goto error_param;
2658	}
2659
2660	for (i = 0; i < vfl->num_elements; i++) {
2661		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2662			aq_ret = I40E_ERR_PARAM;
2663			goto error_param;
2664		}
2665	}
2666
2667	vsi = pf->vsi[vf->lan_vsi_idx];
2668	if (vsi->info.pvid) {
2669		aq_ret = I40E_ERR_PARAM;
2670		goto error_param;
2671	}
2672
2673	for (i = 0; i < vfl->num_elements; i++) {
2674		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2675		vf->num_vlan--;
2676
2677		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2678			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2679							   false,
2680							   vfl->vlan_id[i],
2681							   NULL);
2682		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2683			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2684							   false,
2685							   vfl->vlan_id[i],
2686							   NULL);
2687	}
2688
2689error_param:
2690	/* send the response to the VF */
2691	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2692}
2693
2694/**
2695 * i40e_vc_iwarp_msg
2696 * @vf: pointer to the VF info
2697 * @msg: pointer to the msg buffer
2698 * @msglen: msg length
2699 *
2700 * called from the VF for the iwarp msgs
2701 **/
2702static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2703{
2704	struct i40e_pf *pf = vf->pf;
2705	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2706	i40e_status aq_ret = 0;
2707
2708	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2709	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2710		aq_ret = I40E_ERR_PARAM;
2711		goto error_param;
2712	}
2713
2714	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2715				     msg, msglen);
2716
2717error_param:
2718	/* send the response to the VF */
2719	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2720				       aq_ret);
2721}
2722
2723/**
2724 * i40e_vc_iwarp_qvmap_msg
2725 * @vf: pointer to the VF info
2726 * @msg: pointer to the msg buffer
2727 * @msglen: msg length
2728 * @config: config qvmap or release it
2729 *
2730 * called from the VF for the iwarp msgs
2731 **/
2732static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2733				   bool config)
2734{
2735	struct virtchnl_iwarp_qvlist_info *qvlist_info =
2736				(struct virtchnl_iwarp_qvlist_info *)msg;
2737	i40e_status aq_ret = 0;
2738
2739	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2740	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2741		aq_ret = I40E_ERR_PARAM;
2742		goto error_param;
2743	}
2744
2745	if (config) {
2746		if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2747			aq_ret = I40E_ERR_PARAM;
2748	} else {
2749		i40e_release_iwarp_qvlist(vf);
2750	}
2751
2752error_param:
2753	/* send the response to the VF */
2754	return i40e_vc_send_resp_to_vf(vf,
2755			       config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2756			       VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2757			       aq_ret);
2758}
2759
2760/**
2761 * i40e_vc_config_rss_key
2762 * @vf: pointer to the VF info
2763 * @msg: pointer to the msg buffer
2764 * @msglen: msg length
2765 *
2766 * Configure the VF's RSS key
2767 **/
2768static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2769{
2770	struct virtchnl_rss_key *vrk =
2771		(struct virtchnl_rss_key *)msg;
2772	struct i40e_pf *pf = vf->pf;
2773	struct i40e_vsi *vsi = NULL;
2774	u16 vsi_id = vrk->vsi_id;
2775	i40e_status aq_ret = 0;
2776
2777	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2778	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2779	    (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2780		aq_ret = I40E_ERR_PARAM;
2781		goto err;
2782	}
2783
2784	vsi = pf->vsi[vf->lan_vsi_idx];
2785	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2786err:
2787	/* send the response to the VF */
2788	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2789				       aq_ret);
2790}
2791
2792/**
2793 * i40e_vc_config_rss_lut
2794 * @vf: pointer to the VF info
2795 * @msg: pointer to the msg buffer
2796 * @msglen: msg length
2797 *
2798 * Configure the VF's RSS LUT
2799 **/
2800static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2801{
2802	struct virtchnl_rss_lut *vrl =
2803		(struct virtchnl_rss_lut *)msg;
2804	struct i40e_pf *pf = vf->pf;
2805	struct i40e_vsi *vsi = NULL;
2806	u16 vsi_id = vrl->vsi_id;
2807	i40e_status aq_ret = 0;
2808
2809	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2810	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2811	    (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2812		aq_ret = I40E_ERR_PARAM;
2813		goto err;
2814	}
2815
2816	vsi = pf->vsi[vf->lan_vsi_idx];
2817	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2818	/* send the response to the VF */
2819err:
2820	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2821				       aq_ret);
2822}
2823
2824/**
2825 * i40e_vc_get_rss_hena
2826 * @vf: pointer to the VF info
2827 * @msg: pointer to the msg buffer
2828 * @msglen: msg length
2829 *
2830 * Return the RSS HENA bits allowed by the hardware
2831 **/
2832static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2833{
2834	struct virtchnl_rss_hena *vrh = NULL;
2835	struct i40e_pf *pf = vf->pf;
2836	i40e_status aq_ret = 0;
2837	int len = 0;
2838
2839	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2840		aq_ret = I40E_ERR_PARAM;
2841		goto err;
2842	}
2843	len = sizeof(struct virtchnl_rss_hena);
2844
2845	vrh = kzalloc(len, GFP_KERNEL);
2846	if (!vrh) {
2847		aq_ret = I40E_ERR_NO_MEMORY;
2848		len = 0;
2849		goto err;
2850	}
2851	vrh->hena = i40e_pf_get_default_rss_hena(pf);
2852err:
2853	/* send the response back to the VF */
2854	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2855					aq_ret, (u8 *)vrh, len);
2856	kfree(vrh);
2857	return aq_ret;
2858}
2859
2860/**
2861 * i40e_vc_set_rss_hena
2862 * @vf: pointer to the VF info
2863 * @msg: pointer to the msg buffer
2864 * @msglen: msg length
2865 *
2866 * Set the RSS HENA bits for the VF
2867 **/
2868static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2869{
2870	struct virtchnl_rss_hena *vrh =
2871		(struct virtchnl_rss_hena *)msg;
2872	struct i40e_pf *pf = vf->pf;
2873	struct i40e_hw *hw = &pf->hw;
2874	i40e_status aq_ret = 0;
2875
2876	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2877		aq_ret = I40E_ERR_PARAM;
2878		goto err;
2879	}
2880	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2881	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2882			  (u32)(vrh->hena >> 32));
2883
2884	/* send the response to the VF */
2885err:
2886	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2887}
2888
2889/**
2890 * i40e_vc_enable_vlan_stripping
2891 * @vf: pointer to the VF info
2892 * @msg: pointer to the msg buffer
2893 * @msglen: msg length
2894 *
2895 * Enable vlan header stripping for the VF
2896 **/
2897static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2898					 u16 msglen)
2899{
2900	struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2901	i40e_status aq_ret = 0;
2902
2903	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2904		aq_ret = I40E_ERR_PARAM;
2905		goto err;
2906	}
2907
2908	i40e_vlan_stripping_enable(vsi);
2909
2910	/* send the response to the VF */
2911err:
2912	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2913				       aq_ret);
2914}
2915
2916/**
2917 * i40e_vc_disable_vlan_stripping
2918 * @vf: pointer to the VF info
2919 * @msg: pointer to the msg buffer
2920 * @msglen: msg length
2921 *
2922 * Disable vlan header stripping for the VF
2923 **/
2924static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2925					  u16 msglen)
2926{
2927	struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2928	i40e_status aq_ret = 0;
2929
2930	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2931		aq_ret = I40E_ERR_PARAM;
2932		goto err;
2933	}
2934
2935	i40e_vlan_stripping_disable(vsi);
2936
2937	/* send the response to the VF */
2938err:
2939	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2940				       aq_ret);
2941}
2942
2943/**
2944 * i40e_validate_cloud_filter
2945 * @mask: mask for TC filter
2946 * @data: data for TC filter
2947 *
2948 * This function validates cloud filter programmed as TC filter for ADq
2949 **/
2950static int i40e_validate_cloud_filter(struct i40e_vf *vf,
2951				      struct virtchnl_filter *tc_filter)
2952{
2953	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
2954	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
2955	struct i40e_pf *pf = vf->pf;
2956	struct i40e_vsi *vsi = NULL;
2957	struct i40e_mac_filter *f;
2958	struct hlist_node *h;
2959	bool found = false;
2960	int bkt;
2961
2962	if (!tc_filter->action) {
2963		dev_info(&pf->pdev->dev,
2964			 "VF %d: Currently ADq doesn't support Drop Action\n",
2965			 vf->vf_id);
2966		goto err;
2967	}
2968
2969	/* action_meta is TC number here to which the filter is applied */
2970	if (!tc_filter->action_meta ||
2971	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
2972		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
2973			 vf->vf_id, tc_filter->action_meta);
2974		goto err;
2975	}
2976
2977	/* Check filter if it's programmed for advanced mode or basic mode.
2978	 * There are two ADq modes (for VF only),
2979	 * 1. Basic mode: intended to allow as many filter options as possible
2980	 *		  to be added to a VF in Non-trusted mode. Main goal is
2981	 *		  to add filters to its own MAC and VLAN id.
2982	 * 2. Advanced mode: is for allowing filters to be applied other than
2983	 *		  its own MAC or VLAN. This mode requires the VF to be
2984	 *		  Trusted.
2985	 */
2986	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
2987		vsi = pf->vsi[vf->lan_vsi_idx];
2988		f = i40e_find_mac(vsi, data.dst_mac);
2989
2990		if (!f) {
2991			dev_info(&pf->pdev->dev,
2992				 "Destination MAC %pM doesn't belong to VF %d\n",
2993				 data.dst_mac, vf->vf_id);
2994			goto err;
2995		}
2996
2997		if (mask.vlan_id) {
2998			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
2999					   hlist) {
3000				if (f->vlan == ntohs(data.vlan_id)) {
3001					found = true;
3002					break;
3003				}
3004			}
3005			if (!found) {
3006				dev_info(&pf->pdev->dev,
3007					 "VF %d doesn't have any VLAN id %u\n",
3008					 vf->vf_id, ntohs(data.vlan_id));
3009				goto err;
3010			}
3011		}
3012	} else {
3013		/* Check if VF is trusted */
3014		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3015			dev_err(&pf->pdev->dev,
3016				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3017				vf->vf_id);
3018			return I40E_ERR_CONFIG;
3019		}
3020	}
3021
3022	if (mask.dst_mac[0] & data.dst_mac[0]) {
3023		if (is_broadcast_ether_addr(data.dst_mac) ||
3024		    is_zero_ether_addr(data.dst_mac)) {
3025			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3026				 vf->vf_id, data.dst_mac);
3027			goto err;
3028		}
3029	}
3030
3031	if (mask.src_mac[0] & data.src_mac[0]) {
3032		if (is_broadcast_ether_addr(data.src_mac) ||
3033		    is_zero_ether_addr(data.src_mac)) {
3034			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3035				 vf->vf_id, data.src_mac);
3036			goto err;
3037		}
3038	}
3039
3040	if (mask.dst_port & data.dst_port) {
3041		if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
3042			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3043				 vf->vf_id);
3044			goto err;
3045		}
3046	}
3047
3048	if (mask.src_port & data.src_port) {
3049		if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
3050			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3051				 vf->vf_id);
3052			goto err;
3053		}
3054	}
3055
3056	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3057	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3058		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3059			 vf->vf_id);
3060		goto err;
3061	}
3062
3063	if (mask.vlan_id & data.vlan_id) {
3064		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3065			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3066				 vf->vf_id);
3067			goto err;
3068		}
3069	}
3070
3071	return I40E_SUCCESS;
3072err:
3073	return I40E_ERR_CONFIG;
3074}
3075
3076/**
3077 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3078 * @vf: pointer to the VF info
3079 * @seid - seid of the vsi it is searching for
3080 **/
3081static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3082{
3083	struct i40e_pf *pf = vf->pf;
3084	struct i40e_vsi *vsi = NULL;
3085	int i;
3086
3087	for (i = 0; i < vf->num_tc ; i++) {
3088		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3089		if (vsi && vsi->seid == seid)
3090			return vsi;
3091	}
3092	return NULL;
3093}
3094
3095/**
3096 * i40e_del_all_cloud_filters
3097 * @vf: pointer to the VF info
3098 *
3099 * This function deletes all cloud filters
3100 **/
3101static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3102{
3103	struct i40e_cloud_filter *cfilter = NULL;
3104	struct i40e_pf *pf = vf->pf;
3105	struct i40e_vsi *vsi = NULL;
3106	struct hlist_node *node;
3107	int ret;
3108
3109	hlist_for_each_entry_safe(cfilter, node,
3110				  &vf->cloud_filter_list, cloud_node) {
3111		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3112
3113		if (!vsi) {
3114			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3115				vf->vf_id, cfilter->seid);
3116			continue;
3117		}
3118
3119		if (cfilter->dst_port)
3120			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3121								false);
3122		else
3123			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3124		if (ret)
3125			dev_err(&pf->pdev->dev,
3126				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3127				vf->vf_id, i40e_stat_str(&pf->hw, ret),
3128				i40e_aq_str(&pf->hw,
3129					    pf->hw.aq.asq_last_status));
3130
3131		hlist_del(&cfilter->cloud_node);
3132		kfree(cfilter);
3133		vf->num_cloud_filters--;
3134	}
3135}
3136
3137/**
3138 * i40e_vc_del_cloud_filter
3139 * @vf: pointer to the VF info
3140 * @msg: pointer to the msg buffer
3141 *
3142 * This function deletes a cloud filter programmed as TC filter for ADq
3143 **/
3144static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3145{
3146	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3147	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3148	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3149	struct i40e_cloud_filter cfilter, *cf = NULL;
3150	struct i40e_pf *pf = vf->pf;
3151	struct i40e_vsi *vsi = NULL;
3152	struct hlist_node *node;
3153	i40e_status aq_ret = 0;
3154	int i, ret;
3155
3156	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3157		aq_ret = I40E_ERR_PARAM;
3158		goto err;
3159	}
3160
3161	if (!vf->adq_enabled) {
3162		dev_info(&pf->pdev->dev,
3163			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3164			 vf->vf_id);
3165		aq_ret = I40E_ERR_PARAM;
3166		goto err;
3167	}
3168
3169	if (i40e_validate_cloud_filter(vf, vcf)) {
3170		dev_info(&pf->pdev->dev,
3171			 "VF %d: Invalid input, can't apply cloud filter\n",
3172			 vf->vf_id);
3173		aq_ret = I40E_ERR_PARAM;
3174		goto err;
3175	}
3176
3177	memset(&cfilter, 0, sizeof(cfilter));
3178	/* parse destination mac address */
3179	for (i = 0; i < ETH_ALEN; i++)
3180		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3181
3182	/* parse source mac address */
3183	for (i = 0; i < ETH_ALEN; i++)
3184		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3185
3186	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3187	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3188	cfilter.src_port = mask.src_port & tcf.src_port;
3189
3190	switch (vcf->flow_type) {
3191	case VIRTCHNL_TCP_V4_FLOW:
3192		cfilter.n_proto = ETH_P_IP;
3193		if (mask.dst_ip[0] & tcf.dst_ip[0])
3194			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3195			       ARRAY_SIZE(tcf.dst_ip));
3196		else if (mask.src_ip[0] & tcf.dst_ip[0])
3197			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3198			       ARRAY_SIZE(tcf.dst_ip));
3199		break;
3200	case VIRTCHNL_TCP_V6_FLOW:
3201		cfilter.n_proto = ETH_P_IPV6;
3202		if (mask.dst_ip[3] & tcf.dst_ip[3])
3203			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3204			       sizeof(cfilter.ip.v6.dst_ip6));
3205		if (mask.src_ip[3] & tcf.src_ip[3])
3206			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3207			       sizeof(cfilter.ip.v6.src_ip6));
3208		break;
3209	default:
3210		/* TC filter can be configured based on different combinations
3211		 * and in this case IP is not a part of filter config
3212		 */
3213		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3214			 vf->vf_id);
3215	}
3216
3217	/* get the vsi to which the tc belongs to */
3218	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3219	cfilter.seid = vsi->seid;
3220	cfilter.flags = vcf->field_flags;
3221
3222	/* Deleting TC filter */
3223	if (tcf.dst_port)
3224		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3225	else
3226		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3227	if (ret) {
3228		dev_err(&pf->pdev->dev,
3229			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3230			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3231			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3232		goto err;
3233	}
3234
3235	hlist_for_each_entry_safe(cf, node,
3236				  &vf->cloud_filter_list, cloud_node) {
3237		if (cf->seid != cfilter.seid)
3238			continue;
3239		if (mask.dst_port)
3240			if (cfilter.dst_port != cf->dst_port)
3241				continue;
3242		if (mask.dst_mac[0])
3243			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3244				continue;
3245		/* for ipv4 data to be valid, only first byte of mask is set */
3246		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3247			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3248				   ARRAY_SIZE(tcf.dst_ip)))
3249				continue;
3250		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3251		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3252			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3253				   sizeof(cfilter.ip.v6.src_ip6)))
3254				continue;
3255		if (mask.vlan_id)
3256			if (cfilter.vlan_id != cf->vlan_id)
3257				continue;
3258
3259		hlist_del(&cf->cloud_node);
3260		kfree(cf);
3261		vf->num_cloud_filters--;
3262	}
3263
3264err:
3265	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3266				       aq_ret);
3267}
3268
3269/**
3270 * i40e_vc_add_cloud_filter
3271 * @vf: pointer to the VF info
3272 * @msg: pointer to the msg buffer
3273 *
3274 * This function adds a cloud filter programmed as TC filter for ADq
3275 **/
3276static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3277{
3278	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3279	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3280	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3281	struct i40e_cloud_filter *cfilter = NULL;
3282	struct i40e_pf *pf = vf->pf;
3283	struct i40e_vsi *vsi = NULL;
3284	i40e_status aq_ret = 0;
3285	int i, ret;
3286
3287	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3288		aq_ret = I40E_ERR_PARAM;
3289		goto err;
3290	}
3291
3292	if (!vf->adq_enabled) {
3293		dev_info(&pf->pdev->dev,
3294			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3295			 vf->vf_id);
3296		aq_ret = I40E_ERR_PARAM;
3297		goto err;
3298	}
3299
3300	if (i40e_validate_cloud_filter(vf, vcf)) {
3301		dev_info(&pf->pdev->dev,
3302			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3303			 vf->vf_id);
3304			aq_ret = I40E_ERR_PARAM;
3305			goto err;
3306	}
3307
3308	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3309	if (!cfilter)
3310		return -ENOMEM;
3311
3312	/* parse destination mac address */
3313	for (i = 0; i < ETH_ALEN; i++)
3314		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3315
3316	/* parse source mac address */
3317	for (i = 0; i < ETH_ALEN; i++)
3318		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3319
3320	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3321	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3322	cfilter->src_port = mask.src_port & tcf.src_port;
3323
3324	switch (vcf->flow_type) {
3325	case VIRTCHNL_TCP_V4_FLOW:
3326		cfilter->n_proto = ETH_P_IP;
3327		if (mask.dst_ip[0] & tcf.dst_ip[0])
3328			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3329			       ARRAY_SIZE(tcf.dst_ip));
3330		else if (mask.src_ip[0] & tcf.dst_ip[0])
3331			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3332			       ARRAY_SIZE(tcf.dst_ip));
3333		break;
3334	case VIRTCHNL_TCP_V6_FLOW:
3335		cfilter->n_proto = ETH_P_IPV6;
3336		if (mask.dst_ip[3] & tcf.dst_ip[3])
3337			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3338			       sizeof(cfilter->ip.v6.dst_ip6));
3339		if (mask.src_ip[3] & tcf.src_ip[3])
3340			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3341			       sizeof(cfilter->ip.v6.src_ip6));
3342		break;
3343	default:
3344		/* TC filter can be configured based on different combinations
3345		 * and in this case IP is not a part of filter config
3346		 */
3347		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3348			 vf->vf_id);
3349	}
3350
3351	/* get the VSI to which the TC belongs to */
3352	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3353	cfilter->seid = vsi->seid;
3354	cfilter->flags = vcf->field_flags;
3355
3356	/* Adding cloud filter programmed as TC filter */
3357	if (tcf.dst_port)
3358		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3359	else
3360		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3361	if (ret) {
3362		dev_err(&pf->pdev->dev,
3363			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3364			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3365			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3366		goto err;
3367	}
3368
3369	INIT_HLIST_NODE(&cfilter->cloud_node);
3370	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3371	vf->num_cloud_filters++;
3372err:
3373	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3374				       aq_ret);
3375}
3376
3377/**
3378 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3379 * @vf: pointer to the VF info
3380 * @msg: pointer to the msg buffer
3381 **/
3382static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3383{
3384	struct virtchnl_tc_info *tci =
3385		(struct virtchnl_tc_info *)msg;
3386	struct i40e_pf *pf = vf->pf;
3387	struct i40e_link_status *ls = &pf->hw.phy.link_info;
3388	int i, adq_request_qps = 0, speed = 0;
3389	i40e_status aq_ret = 0;
3390
3391	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3392		aq_ret = I40E_ERR_PARAM;
3393		goto err;
3394	}
3395
3396	/* ADq cannot be applied if spoof check is ON */
3397	if (vf->spoofchk) {
3398		dev_err(&pf->pdev->dev,
3399			"Spoof check is ON, turn it OFF to enable ADq\n");
3400		aq_ret = I40E_ERR_PARAM;
3401		goto err;
3402	}
3403
3404	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3405		dev_err(&pf->pdev->dev,
3406			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3407			vf->vf_id);
3408		aq_ret = I40E_ERR_PARAM;
3409		goto err;
3410	}
3411
3412	/* max number of traffic classes for VF currently capped at 4 */
3413	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3414		dev_err(&pf->pdev->dev,
3415			"VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
3416			vf->vf_id, tci->num_tc);
3417		aq_ret = I40E_ERR_PARAM;
3418		goto err;
3419	}
3420
3421	/* validate queues for each TC */
3422	for (i = 0; i < tci->num_tc; i++)
3423		if (!tci->list[i].count ||
3424		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3425			dev_err(&pf->pdev->dev,
3426				"VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
3427				vf->vf_id, i, tci->list[i].count);
3428			aq_ret = I40E_ERR_PARAM;
3429			goto err;
3430		}
3431
3432	/* need Max VF queues but already have default number of queues */
3433	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3434
3435	if (pf->queues_left < adq_request_qps) {
3436		dev_err(&pf->pdev->dev,
3437			"No queues left to allocate to VF %d\n",
3438			vf->vf_id);
3439		aq_ret = I40E_ERR_PARAM;
3440		goto err;
3441	} else {
3442		/* we need to allocate max VF queues to enable ADq so as to
3443		 * make sure ADq enabled VF always gets back queues when it
3444		 * goes through a reset.
3445		 */
3446		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3447	}
3448
3449	/* get link speed in MB to validate rate limit */
3450	switch (ls->link_speed) {
3451	case VIRTCHNL_LINK_SPEED_100MB:
3452		speed = SPEED_100;
3453		break;
3454	case VIRTCHNL_LINK_SPEED_1GB:
3455		speed = SPEED_1000;
3456		break;
3457	case VIRTCHNL_LINK_SPEED_10GB:
3458		speed = SPEED_10000;
3459		break;
3460	case VIRTCHNL_LINK_SPEED_20GB:
3461		speed = SPEED_20000;
3462		break;
3463	case VIRTCHNL_LINK_SPEED_25GB:
3464		speed = SPEED_25000;
3465		break;
3466	case VIRTCHNL_LINK_SPEED_40GB:
3467		speed = SPEED_40000;
3468		break;
3469	default:
3470		dev_err(&pf->pdev->dev,
3471			"Cannot detect link speed\n");
3472		aq_ret = I40E_ERR_PARAM;
3473		goto err;
3474	}
3475
3476	/* parse data from the queue channel info */
3477	vf->num_tc = tci->num_tc;
3478	for (i = 0; i < vf->num_tc; i++) {
3479		if (tci->list[i].max_tx_rate) {
3480			if (tci->list[i].max_tx_rate > speed) {
3481				dev_err(&pf->pdev->dev,
3482					"Invalid max tx rate %llu specified for VF %d.",
3483					tci->list[i].max_tx_rate,
3484					vf->vf_id);
3485				aq_ret = I40E_ERR_PARAM;
3486				goto err;
3487			} else {
3488				vf->ch[i].max_tx_rate =
3489					tci->list[i].max_tx_rate;
3490			}
3491		}
3492		vf->ch[i].num_qps = tci->list[i].count;
3493	}
3494
3495	/* set this flag only after making sure all inputs are sane */
3496	vf->adq_enabled = true;
3497	/* num_req_queues is set when user changes number of queues via ethtool
3498	 * and this causes issue for default VSI(which depends on this variable)
3499	 * when ADq is enabled, hence reset it.
3500	 */
3501	vf->num_req_queues = 0;
3502
3503	/* reset the VF in order to allocate resources */
3504	i40e_vc_notify_vf_reset(vf);
3505	i40e_reset_vf(vf, false);
3506
3507	return I40E_SUCCESS;
3508
3509	/* send the response to the VF */
3510err:
3511	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3512				       aq_ret);
3513}
3514
3515/**
3516 * i40e_vc_del_qch_msg
3517 * @vf: pointer to the VF info
3518 * @msg: pointer to the msg buffer
3519 **/
3520static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3521{
3522	struct i40e_pf *pf = vf->pf;
3523	i40e_status aq_ret = 0;
3524
3525	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3526		aq_ret = I40E_ERR_PARAM;
3527		goto err;
3528	}
3529
3530	if (vf->adq_enabled) {
3531		i40e_del_all_cloud_filters(vf);
3532		i40e_del_qch(vf);
3533		vf->adq_enabled = false;
3534		vf->num_tc = 0;
3535		dev_info(&pf->pdev->dev,
3536			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3537			 vf->vf_id);
3538	} else {
3539		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3540			 vf->vf_id);
3541		aq_ret = I40E_ERR_PARAM;
3542	}
3543
3544	/* reset the VF in order to allocate resources */
3545	i40e_vc_notify_vf_reset(vf);
3546	i40e_reset_vf(vf, false);
3547
3548	return I40E_SUCCESS;
3549
3550err:
3551	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3552				       aq_ret);
3553}
3554
3555/**
3556 * i40e_vc_process_vf_msg
3557 * @pf: pointer to the PF structure
3558 * @vf_id: source VF id
3559 * @msg: pointer to the msg buffer
3560 * @msglen: msg length
3561 * @msghndl: msg handle
3562 *
3563 * called from the common aeq/arq handler to
3564 * process request from VF
3565 **/
3566int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3567			   u32 v_retval, u8 *msg, u16 msglen)
3568{
3569	struct i40e_hw *hw = &pf->hw;
3570	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3571	struct i40e_vf *vf;
3572	int ret;
3573
3574	pf->vf_aq_requests++;
3575	if (local_vf_id >= pf->num_alloc_vfs)
3576		return -EINVAL;
3577	vf = &(pf->vf[local_vf_id]);
3578
3579	/* Check if VF is disabled. */
3580	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3581		return I40E_ERR_PARAM;
3582
3583	/* perform basic checks on the msg */
3584	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3585
3586	/* perform additional checks specific to this driver */
3587	if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
3588		struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
3589
3590		if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
3591			ret = -EINVAL;
3592	} else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
3593		struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
3594
3595		if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
3596			ret = -EINVAL;
3597	}
3598
3599	if (ret) {
3600		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3601		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3602			local_vf_id, v_opcode, msglen);
3603		switch (ret) {
3604		case VIRTCHNL_ERR_PARAM:
3605			return -EPERM;
3606		default:
3607			return -EINVAL;
3608		}
3609	}
3610
3611	switch (v_opcode) {
3612	case VIRTCHNL_OP_VERSION:
3613		ret = i40e_vc_get_version_msg(vf, msg);
3614		break;
3615	case VIRTCHNL_OP_GET_VF_RESOURCES:
3616		ret = i40e_vc_get_vf_resources_msg(vf, msg);
3617		i40e_vc_notify_vf_link_state(vf);
3618		break;
3619	case VIRTCHNL_OP_RESET_VF:
3620		i40e_vc_reset_vf_msg(vf);
3621		ret = 0;
3622		break;
3623	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3624		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
3625		break;
3626	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3627		ret = i40e_vc_config_queues_msg(vf, msg, msglen);
3628		break;
3629	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3630		ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
3631		break;
3632	case VIRTCHNL_OP_ENABLE_QUEUES:
3633		ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
3634		i40e_vc_notify_vf_link_state(vf);
3635		break;
3636	case VIRTCHNL_OP_DISABLE_QUEUES:
3637		ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
3638		break;
3639	case VIRTCHNL_OP_ADD_ETH_ADDR:
3640		ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
3641		break;
3642	case VIRTCHNL_OP_DEL_ETH_ADDR:
3643		ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
3644		break;
3645	case VIRTCHNL_OP_ADD_VLAN:
3646		ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
3647		break;
3648	case VIRTCHNL_OP_DEL_VLAN:
3649		ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
3650		break;
3651	case VIRTCHNL_OP_GET_STATS:
3652		ret = i40e_vc_get_stats_msg(vf, msg, msglen);
3653		break;
3654	case VIRTCHNL_OP_IWARP:
3655		ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3656		break;
3657	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3658		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
3659		break;
3660	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3661		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
3662		break;
3663	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3664		ret = i40e_vc_config_rss_key(vf, msg, msglen);
3665		break;
3666	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3667		ret = i40e_vc_config_rss_lut(vf, msg, msglen);
3668		break;
3669	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3670		ret = i40e_vc_get_rss_hena(vf, msg, msglen);
3671		break;
3672	case VIRTCHNL_OP_SET_RSS_HENA:
3673		ret = i40e_vc_set_rss_hena(vf, msg, msglen);
3674		break;
3675	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3676		ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
3677		break;
3678	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3679		ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
3680		break;
3681	case VIRTCHNL_OP_REQUEST_QUEUES:
3682		ret = i40e_vc_request_queues_msg(vf, msg, msglen);
3683		break;
3684	case VIRTCHNL_OP_ENABLE_CHANNELS:
3685		ret = i40e_vc_add_qch_msg(vf, msg);
3686		break;
3687	case VIRTCHNL_OP_DISABLE_CHANNELS:
3688		ret = i40e_vc_del_qch_msg(vf, msg);
3689		break;
3690	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3691		ret = i40e_vc_add_cloud_filter(vf, msg);
3692		break;
3693	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3694		ret = i40e_vc_del_cloud_filter(vf, msg);
3695		break;
3696	case VIRTCHNL_OP_UNKNOWN:
3697	default:
3698		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3699			v_opcode, local_vf_id);
3700		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3701					      I40E_ERR_NOT_IMPLEMENTED);
3702		break;
3703	}
3704
3705	return ret;
3706}
3707
3708/**
3709 * i40e_vc_process_vflr_event
3710 * @pf: pointer to the PF structure
3711 *
3712 * called from the vlfr irq handler to
3713 * free up VF resources and state variables
3714 **/
3715int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3716{
3717	struct i40e_hw *hw = &pf->hw;
3718	u32 reg, reg_idx, bit_idx;
3719	struct i40e_vf *vf;
3720	int vf_id;
3721
3722	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3723		return 0;
3724
3725	/* Re-enable the VFLR interrupt cause here, before looking for which
3726	 * VF got reset. Otherwise, if another VF gets a reset while the
3727	 * first one is being processed, that interrupt will be lost, and
3728	 * that VF will be stuck in reset forever.
3729	 */
3730	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3731	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3732	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3733	i40e_flush(hw);
3734
3735	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3736	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3737		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3738		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3739		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
3740		vf = &pf->vf[vf_id];
3741		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3742		if (reg & BIT(bit_idx))
3743			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3744			i40e_reset_vf(vf, true);
3745	}
3746
3747	return 0;
3748}
3749
3750/**
3751 * i40e_ndo_set_vf_mac
3752 * @netdev: network interface device structure
3753 * @vf_id: VF identifier
3754 * @mac: mac address
3755 *
3756 * program VF mac address
3757 **/
3758int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3759{
3760	struct i40e_netdev_priv *np = netdev_priv(netdev);
3761	struct i40e_vsi *vsi = np->vsi;
3762	struct i40e_pf *pf = vsi->back;
3763	struct i40e_mac_filter *f;
3764	struct i40e_vf *vf;
3765	int ret = 0;
3766	struct hlist_node *h;
3767	int bkt;
3768	u8 i;
3769
3770	/* validate the request */
3771	if (vf_id >= pf->num_alloc_vfs) {
3772		dev_err(&pf->pdev->dev,
3773			"Invalid VF Identifier %d\n", vf_id);
3774		ret = -EINVAL;
3775		goto error_param;
3776	}
3777
3778	vf = &(pf->vf[vf_id]);
3779	vsi = pf->vsi[vf->lan_vsi_idx];
3780
3781	/* When the VF is resetting wait until it is done.
3782	 * It can take up to 200 milliseconds,
3783	 * but wait for up to 300 milliseconds to be safe.
3784	 */
3785	for (i = 0; i < 15; i++) {
3786		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3787			break;
3788		msleep(20);
3789	}
3790	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3791		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3792			vf_id);
3793		ret = -EAGAIN;
3794		goto error_param;
3795	}
3796
3797	if (is_multicast_ether_addr(mac)) {
3798		dev_err(&pf->pdev->dev,
3799			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3800		ret = -EINVAL;
3801		goto error_param;
3802	}
3803
3804	/* Lock once because below invoked function add/del_filter requires
3805	 * mac_filter_hash_lock to be held
3806	 */
3807	spin_lock_bh(&vsi->mac_filter_hash_lock);
3808
3809	/* delete the temporary mac address */
3810	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
3811		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
3812
3813	/* Delete all the filters for this VSI - we're going to kill it
3814	 * anyway.
3815	 */
3816	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
3817		__i40e_del_filter(vsi, f);
3818
3819	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3820
3821	/* program mac filter */
3822	if (i40e_sync_vsi_filters(vsi)) {
3823		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
3824		ret = -EIO;
3825		goto error_param;
3826	}
3827	ether_addr_copy(vf->default_lan_addr.addr, mac);
3828
3829	if (is_zero_ether_addr(mac)) {
3830		vf->pf_set_mac = false;
3831		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
3832	} else {
3833		vf->pf_set_mac = true;
3834		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
3835			 mac, vf_id);
3836	}
3837
3838	/* Force the VF driver stop so it has to reload with new MAC address */
3839	i40e_vc_disable_vf(vf);
3840	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
3841
3842error_param:
3843	return ret;
3844}
3845
3846/**
3847 * i40e_vsi_has_vlans - True if VSI has configured VLANs
3848 * @vsi: pointer to the vsi
3849 *
3850 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
3851 * we have no configured VLANs. Do not call while holding the
3852 * mac_filter_hash_lock.
3853 */
3854static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
3855{
3856	bool have_vlans;
3857
3858	/* If we have a port VLAN, then the VSI cannot have any VLANs
3859	 * configured, as all MAC/VLAN filters will be assigned to the PVID.
3860	 */
3861	if (vsi->info.pvid)
3862		return false;
3863
3864	/* Since we don't have a PVID, we know that if the device is in VLAN
3865	 * mode it must be because of a VLAN filter configured on this VSI.
3866	 */
3867	spin_lock_bh(&vsi->mac_filter_hash_lock);
3868	have_vlans = i40e_is_vsi_in_vlan(vsi);
3869	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3870
3871	return have_vlans;
3872}
3873
3874/**
3875 * i40e_ndo_set_vf_port_vlan
3876 * @netdev: network interface device structure
3877 * @vf_id: VF identifier
3878 * @vlan_id: mac address
3879 * @qos: priority setting
3880 * @vlan_proto: vlan protocol
3881 *
3882 * program VF vlan id and/or qos
3883 **/
3884int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3885			      u16 vlan_id, u8 qos, __be16 vlan_proto)
3886{
3887	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3888	struct i40e_netdev_priv *np = netdev_priv(netdev);
3889	struct i40e_pf *pf = np->vsi->back;
3890	struct i40e_vsi *vsi;
3891	struct i40e_vf *vf;
3892	int ret = 0;
3893
3894	/* validate the request */
3895	if (vf_id >= pf->num_alloc_vfs) {
3896		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3897		ret = -EINVAL;
3898		goto error_pvid;
3899	}
3900
3901	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3902		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3903		ret = -EINVAL;
3904		goto error_pvid;
3905	}
3906
3907	if (vlan_proto != htons(ETH_P_8021Q)) {
3908		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3909		ret = -EPROTONOSUPPORT;
3910		goto error_pvid;
3911	}
3912
3913	vf = &(pf->vf[vf_id]);
3914	vsi = pf->vsi[vf->lan_vsi_idx];
3915	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3916		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3917			vf_id);
3918		ret = -EAGAIN;
3919		goto error_pvid;
3920	}
3921
3922	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
3923		/* duplicate request, so just return success */
3924		goto error_pvid;
3925
3926	if (i40e_vsi_has_vlans(vsi)) {
3927		dev_err(&pf->pdev->dev,
3928			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
3929			vf_id);
3930		/* Administrator Error - knock the VF offline until he does
3931		 * the right thing by reconfiguring his network correctly
3932		 * and then reloading the VF driver.
3933		 */
3934		i40e_vc_disable_vf(vf);
3935		/* During reset the VF got a new VSI, so refresh the pointer. */
3936		vsi = pf->vsi[vf->lan_vsi_idx];
3937	}
3938
3939	/* Locked once because multiple functions below iterate list */
3940	spin_lock_bh(&vsi->mac_filter_hash_lock);
3941
3942	/* Check for condition where there was already a port VLAN ID
3943	 * filter set and now it is being deleted by setting it to zero.
3944	 * Additionally check for the condition where there was a port
3945	 * VLAN but now there is a new and different port VLAN being set.
3946	 * Before deleting all the old VLAN filters we must add new ones
3947	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
3948	 * MAC addresses deleted.
3949	 */
3950	if ((!(vlan_id || qos) ||
3951	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
3952	    vsi->info.pvid) {
3953		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
3954		if (ret) {
3955			dev_info(&vsi->back->pdev->dev,
3956				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3957				 vsi->back->hw.aq.asq_last_status);
3958			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3959			goto error_pvid;
3960		}
3961	}
3962
3963	if (vsi->info.pvid) {
3964		/* remove all filters on the old VLAN */
3965		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
3966					   VLAN_VID_MASK));
3967	}
3968
3969	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3970	if (vlan_id || qos)
3971		ret = i40e_vsi_add_pvid(vsi, vlanprio);
3972	else
3973		i40e_vsi_remove_pvid(vsi);
3974	spin_lock_bh(&vsi->mac_filter_hash_lock);
3975
3976	if (vlan_id) {
3977		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
3978			 vlan_id, qos, vf_id);
3979
3980		/* add new VLAN filter for each MAC */
3981		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
3982		if (ret) {
3983			dev_info(&vsi->back->pdev->dev,
3984				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3985				 vsi->back->hw.aq.asq_last_status);
3986			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3987			goto error_pvid;
3988		}
3989
3990		/* remove the previously added non-VLAN MAC filters */
3991		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
3992	}
3993
3994	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3995
3996	/* Schedule the worker thread to take care of applying changes */
3997	i40e_service_event_schedule(vsi->back);
3998
3999	if (ret) {
4000		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4001		goto error_pvid;
4002	}
4003
4004	/* The Port VLAN needs to be saved across resets the same as the
4005	 * default LAN MAC address.
4006	 */
4007	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4008	ret = 0;
4009
4010error_pvid:
4011	return ret;
4012}
4013
4014/**
4015 * i40e_ndo_set_vf_bw
4016 * @netdev: network interface device structure
4017 * @vf_id: VF identifier
4018 * @tx_rate: Tx rate
4019 *
4020 * configure VF Tx rate
4021 **/
4022int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4023		       int max_tx_rate)
4024{
4025	struct i40e_netdev_priv *np = netdev_priv(netdev);
4026	struct i40e_pf *pf = np->vsi->back;
4027	struct i40e_vsi *vsi;
4028	struct i40e_vf *vf;
4029	int ret = 0;
4030
4031	/* validate the request */
4032	if (vf_id >= pf->num_alloc_vfs) {
4033		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
4034		ret = -EINVAL;
4035		goto error;
4036	}
4037
4038	if (min_tx_rate) {
4039		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4040			min_tx_rate, vf_id);
4041		return -EINVAL;
4042	}
4043
4044	vf = &(pf->vf[vf_id]);
4045	vsi = pf->vsi[vf->lan_vsi_idx];
4046	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4047		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4048			vf_id);
4049		ret = -EAGAIN;
4050		goto error;
4051	}
4052
4053	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4054	if (ret)
4055		goto error;
4056
4057	vf->tx_rate = max_tx_rate;
4058error:
4059	return ret;
4060}
4061
4062/**
4063 * i40e_ndo_get_vf_config
4064 * @netdev: network interface device structure
4065 * @vf_id: VF identifier
4066 * @ivi: VF configuration structure
4067 *
4068 * return VF configuration
4069 **/
4070int i40e_ndo_get_vf_config(struct net_device *netdev,
4071			   int vf_id, struct ifla_vf_info *ivi)
4072{
4073	struct i40e_netdev_priv *np = netdev_priv(netdev);
4074	struct i40e_vsi *vsi = np->vsi;
4075	struct i40e_pf *pf = vsi->back;
4076	struct i40e_vf *vf;
4077	int ret = 0;
4078
4079	/* validate the request */
4080	if (vf_id >= pf->num_alloc_vfs) {
4081		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4082		ret = -EINVAL;
4083		goto error_param;
4084	}
4085
4086	vf = &(pf->vf[vf_id]);
4087	/* first vsi is always the LAN vsi */
4088	vsi = pf->vsi[vf->lan_vsi_idx];
4089	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4090		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4091			vf_id);
4092		ret = -EAGAIN;
4093		goto error_param;
4094	}
4095
4096	ivi->vf = vf_id;
4097
4098	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4099
4100	ivi->max_tx_rate = vf->tx_rate;
4101	ivi->min_tx_rate = 0;
4102	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4103	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4104		   I40E_VLAN_PRIORITY_SHIFT;
4105	if (vf->link_forced == false)
4106		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4107	else if (vf->link_up == true)
4108		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4109	else
4110		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4111	ivi->spoofchk = vf->spoofchk;
4112	ivi->trusted = vf->trusted;
4113	ret = 0;
4114
4115error_param:
4116	return ret;
4117}
4118
4119/**
4120 * i40e_ndo_set_vf_link_state
4121 * @netdev: network interface device structure
4122 * @vf_id: VF identifier
4123 * @link: required link state
4124 *
4125 * Set the link state of a specified VF, regardless of physical link state
4126 **/
4127int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4128{
4129	struct i40e_netdev_priv *np = netdev_priv(netdev);
4130	struct i40e_pf *pf = np->vsi->back;
4131	struct virtchnl_pf_event pfe;
4132	struct i40e_hw *hw = &pf->hw;
4133	struct i40e_vf *vf;
4134	int abs_vf_id;
4135	int ret = 0;
4136
4137	/* validate the request */
4138	if (vf_id >= pf->num_alloc_vfs) {
4139		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4140		ret = -EINVAL;
4141		goto error_out;
4142	}
4143
4144	vf = &pf->vf[vf_id];
4145	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4146
4147	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4148	pfe.severity = PF_EVENT_SEVERITY_INFO;
4149
4150	switch (link) {
4151	case IFLA_VF_LINK_STATE_AUTO:
4152		vf->link_forced = false;
4153		pfe.event_data.link_event.link_status =
4154			pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4155		pfe.event_data.link_event.link_speed =
4156			(enum virtchnl_link_speed)
4157			pf->hw.phy.link_info.link_speed;
4158		break;
4159	case IFLA_VF_LINK_STATE_ENABLE:
4160		vf->link_forced = true;
4161		vf->link_up = true;
4162		pfe.event_data.link_event.link_status = true;
4163		pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
4164		break;
4165	case IFLA_VF_LINK_STATE_DISABLE:
4166		vf->link_forced = true;
4167		vf->link_up = false;
4168		pfe.event_data.link_event.link_status = false;
4169		pfe.event_data.link_event.link_speed = 0;
4170		break;
4171	default:
4172		ret = -EINVAL;
4173		goto error_out;
4174	}
4175	/* Notify the VF of its new link state */
4176	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4177			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4178
4179error_out:
4180	return ret;
4181}
4182
4183/**
4184 * i40e_ndo_set_vf_spoofchk
4185 * @netdev: network interface device structure
4186 * @vf_id: VF identifier
4187 * @enable: flag to enable or disable feature
4188 *
4189 * Enable or disable VF spoof checking
4190 **/
4191int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4192{
4193	struct i40e_netdev_priv *np = netdev_priv(netdev);
4194	struct i40e_vsi *vsi = np->vsi;
4195	struct i40e_pf *pf = vsi->back;
4196	struct i40e_vsi_context ctxt;
4197	struct i40e_hw *hw = &pf->hw;
4198	struct i40e_vf *vf;
4199	int ret = 0;
4200
4201	/* validate the request */
4202	if (vf_id >= pf->num_alloc_vfs) {
4203		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4204		ret = -EINVAL;
4205		goto out;
4206	}
4207
4208	vf = &(pf->vf[vf_id]);
4209	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4210		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4211			vf_id);
4212		ret = -EAGAIN;
4213		goto out;
4214	}
4215
4216	if (enable == vf->spoofchk)
4217		goto out;
4218
4219	vf->spoofchk = enable;
4220	memset(&ctxt, 0, sizeof(ctxt));
4221	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4222	ctxt.pf_num = pf->hw.pf_id;
4223	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4224	if (enable)
4225		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4226					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4227	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4228	if (ret) {
4229		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4230			ret);
4231		ret = -EIO;
4232	}
4233out:
4234	return ret;
4235}
4236
4237/**
4238 * i40e_ndo_set_vf_trust
4239 * @netdev: network interface device structure of the pf
4240 * @vf_id: VF identifier
4241 * @setting: trust setting
4242 *
4243 * Enable or disable VF trust setting
4244 **/
4245int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4246{
4247	struct i40e_netdev_priv *np = netdev_priv(netdev);
4248	struct i40e_pf *pf = np->vsi->back;
4249	struct i40e_vf *vf;
4250	int ret = 0;
4251
4252	/* validate the request */
4253	if (vf_id >= pf->num_alloc_vfs) {
4254		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4255		return -EINVAL;
4256	}
4257
4258	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4259		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4260		return -EINVAL;
4261	}
4262
4263	vf = &pf->vf[vf_id];
4264
4265	if (setting == vf->trusted)
4266		goto out;
4267
4268	vf->trusted = setting;
4269	i40e_vc_disable_vf(vf);
4270	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4271		 vf_id, setting ? "" : "un");
4272
4273	if (vf->adq_enabled) {
4274		if (!vf->trusted) {
4275			dev_info(&pf->pdev->dev,
4276				 "VF %u no longer Trusted, deleting all cloud filters\n",
4277				 vf_id);
4278			i40e_del_all_cloud_filters(vf);
4279		}
4280	}
4281
4282out:
4283	return ret;
4284}