Linux Audio

Check our new training course

Loading...
v4.6
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2016 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27#include "i40e.h"
  28
  29/*********************notification routines***********************/
  30
  31/**
  32 * i40e_vc_vf_broadcast
  33 * @pf: pointer to the PF structure
  34 * @opcode: operation code
  35 * @retval: return value
  36 * @msg: pointer to the msg buffer
  37 * @msglen: msg length
  38 *
  39 * send a message to all VFs on a given PF
  40 **/
  41static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  42				 enum i40e_virtchnl_ops v_opcode,
  43				 i40e_status v_retval, u8 *msg,
  44				 u16 msglen)
  45{
  46	struct i40e_hw *hw = &pf->hw;
  47	struct i40e_vf *vf = pf->vf;
  48	int i;
  49
  50	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  51		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
  52		/* Not all vfs are enabled so skip the ones that are not */
  53		if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
  54		    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
  55			continue;
  56
  57		/* Ignore return value on purpose - a given VF may fail, but
  58		 * we need to keep going and send to all of them
  59		 */
  60		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  61				       msg, msglen, NULL);
  62	}
  63}
  64
  65/**
  66 * i40e_vc_notify_link_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67 * @vf: pointer to the VF structure
  68 *
  69 * send a link status message to a single VF
  70 **/
  71static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
  72{
  73	struct i40e_virtchnl_pf_event pfe;
  74	struct i40e_pf *pf = vf->pf;
  75	struct i40e_hw *hw = &pf->hw;
  76	struct i40e_link_status *ls = &pf->hw.phy.link_info;
  77	int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
  78
  79	pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
  80	pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
  81	if (vf->link_forced) {
  82		pfe.event_data.link_event.link_status = vf->link_up;
  83		pfe.event_data.link_event.link_speed =
  84			(vf->link_up ? I40E_LINK_SPEED_40GB : 0);
  85	} else {
  86		pfe.event_data.link_event.link_status =
  87			ls->link_info & I40E_AQ_LINK_UP;
  88		pfe.event_data.link_event.link_speed = ls->link_speed;
  89	}
  90	i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
  91			       0, (u8 *)&pfe, sizeof(pfe), NULL);
  92}
  93
  94/**
  95 * i40e_vc_notify_link_state
  96 * @pf: pointer to the PF structure
  97 *
  98 * send a link status message to all VFs on a given PF
  99 **/
 100void i40e_vc_notify_link_state(struct i40e_pf *pf)
 101{
 102	int i;
 103
 104	for (i = 0; i < pf->num_alloc_vfs; i++)
 105		i40e_vc_notify_vf_link_state(&pf->vf[i]);
 106}
 107
 108/**
 109 * i40e_vc_notify_reset
 110 * @pf: pointer to the PF structure
 111 *
 112 * indicate a pending reset to all VFs on a given PF
 113 **/
 114void i40e_vc_notify_reset(struct i40e_pf *pf)
 115{
 116	struct i40e_virtchnl_pf_event pfe;
 117
 118	pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
 119	pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
 120	i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0,
 121			     (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
 122}
 123
 124/**
 125 * i40e_vc_notify_vf_reset
 126 * @vf: pointer to the VF structure
 127 *
 128 * indicate a pending reset to the given VF
 129 **/
 130void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 131{
 132	struct i40e_virtchnl_pf_event pfe;
 133	int abs_vf_id;
 134
 135	/* validate the request */
 136	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 137		return;
 138
 139	/* verify if the VF is in either init or active before proceeding */
 140	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
 141	    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
 142		return;
 143
 144	abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
 145
 146	pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
 147	pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
 148	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
 149			       0, (u8 *)&pfe,
 150			       sizeof(struct i40e_virtchnl_pf_event), NULL);
 151}
 152/***********************misc routines*****************************/
 153
 154/**
 155 * i40e_vc_disable_vf
 156 * @pf: pointer to the PF info
 157 * @vf: pointer to the VF info
 158 *
 159 * Disable the VF through a SW reset
 160 **/
 161static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 162{
 163	i40e_vc_notify_vf_reset(vf);
 164	i40e_reset_vf(vf, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 165}
 166
 167/**
 168 * i40e_vc_isvalid_vsi_id
 169 * @vf: pointer to the VF info
 170 * @vsi_id: VF relative VSI id
 171 *
 172 * check for the valid VSI id
 173 **/
 174static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 175{
 176	struct i40e_pf *pf = vf->pf;
 177	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 178
 179	return (vsi && (vsi->vf_id == vf->vf_id));
 180}
 181
 182/**
 183 * i40e_vc_isvalid_queue_id
 184 * @vf: pointer to the VF info
 185 * @vsi_id: vsi id
 186 * @qid: vsi relative queue id
 187 *
 188 * check for the valid queue id
 189 **/
 190static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 191					    u8 qid)
 192{
 193	struct i40e_pf *pf = vf->pf;
 194	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 195
 196	return (vsi && (qid < vsi->alloc_queue_pairs));
 197}
 198
 199/**
 200 * i40e_vc_isvalid_vector_id
 201 * @vf: pointer to the VF info
 202 * @vector_id: VF relative vector id
 203 *
 204 * check for the valid vector id
 205 **/
 206static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 207{
 208	struct i40e_pf *pf = vf->pf;
 209
 210	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 211}
 212
 213/***********************vf resource mgmt routines*****************/
 214
 215/**
 216 * i40e_vc_get_pf_queue_id
 217 * @vf: pointer to the VF info
 218 * @vsi_id: id of VSI as provided by the FW
 219 * @vsi_queue_id: vsi relative queue id
 220 *
 221 * return PF relative queue id
 222 **/
 223static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 224				   u8 vsi_queue_id)
 225{
 226	struct i40e_pf *pf = vf->pf;
 227	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 228	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 229
 230	if (!vsi)
 231		return pf_queue_id;
 232
 233	if (le16_to_cpu(vsi->info.mapping_flags) &
 234	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 235		pf_queue_id =
 236			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 237	else
 238		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 239			      vsi_queue_id;
 240
 241	return pf_queue_id;
 242}
 243
 244/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245 * i40e_config_irq_link_list
 246 * @vf: pointer to the VF info
 247 * @vsi_id: id of VSI as given by the FW
 248 * @vecmap: irq map info
 249 *
 250 * configure irq link list from the map
 251 **/
 252static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 253				      struct i40e_virtchnl_vector_map *vecmap)
 254{
 255	unsigned long linklistmap = 0, tempmap;
 256	struct i40e_pf *pf = vf->pf;
 257	struct i40e_hw *hw = &pf->hw;
 258	u16 vsi_queue_id, pf_queue_id;
 259	enum i40e_queue_type qtype;
 260	u16 next_q, vector_id;
 261	u32 reg, reg_idx;
 262	u16 itr_idx = 0;
 263
 264	vector_id = vecmap->vector_id;
 265	/* setup the head */
 266	if (0 == vector_id)
 267		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 268	else
 269		reg_idx = I40E_VPINT_LNKLSTN(
 270		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 271		     (vector_id - 1));
 272
 273	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 274		/* Special case - No queues mapped on this vector */
 275		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 276		goto irq_list_done;
 277	}
 278	tempmap = vecmap->rxq_map;
 279	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 280		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 281				    vsi_queue_id));
 282	}
 283
 284	tempmap = vecmap->txq_map;
 285	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 286		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 287				     vsi_queue_id + 1));
 288	}
 289
 290	next_q = find_first_bit(&linklistmap,
 291				(I40E_MAX_VSI_QP *
 292				 I40E_VIRTCHNL_SUPPORTED_QTYPES));
 
 
 293	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 294	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 295	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 296	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 297
 298	wr32(hw, reg_idx, reg);
 299
 300	while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 301		switch (qtype) {
 302		case I40E_QUEUE_TYPE_RX:
 303			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 304			itr_idx = vecmap->rxitr_idx;
 305			break;
 306		case I40E_QUEUE_TYPE_TX:
 307			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 308			itr_idx = vecmap->txitr_idx;
 309			break;
 310		default:
 311			break;
 312		}
 313
 314		next_q = find_next_bit(&linklistmap,
 315				       (I40E_MAX_VSI_QP *
 316					I40E_VIRTCHNL_SUPPORTED_QTYPES),
 317				       next_q + 1);
 318		if (next_q <
 319		    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 320			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 321			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 322			pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
 323							      vsi_queue_id);
 
 324		} else {
 325			pf_queue_id = I40E_QUEUE_END_OF_LIST;
 326			qtype = 0;
 327		}
 328
 329		/* format for the RQCTL & TQCTL regs is same */
 330		reg = (vector_id) |
 331		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 332		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 333		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 334		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 335		wr32(hw, reg_idx, reg);
 336	}
 337
 338	/* if the vf is running in polling mode and using interrupt zero,
 339	 * need to disable auto-mask on enabling zero interrupt for VFs.
 340	 */
 341	if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
 342	    (vector_id == 0)) {
 343		reg = rd32(hw, I40E_GLINT_CTL);
 344		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
 345			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
 346			wr32(hw, I40E_GLINT_CTL, reg);
 347		}
 348	}
 349
 350irq_list_done:
 351	i40e_flush(hw);
 352}
 353
 354/**
 355 * i40e_release_iwarp_qvlist
 356 * @vf: pointer to the VF.
 357 *
 358 **/
 359static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
 360{
 361	struct i40e_pf *pf = vf->pf;
 362	struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
 363	u32 msix_vf;
 364	u32 i;
 365
 366	if (!vf->qvlist_info)
 367		return;
 368
 369	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 370	for (i = 0; i < qvlist_info->num_vectors; i++) {
 371		struct i40e_virtchnl_iwarp_qv_info *qv_info;
 372		u32 next_q_index, next_q_type;
 373		struct i40e_hw *hw = &pf->hw;
 374		u32 v_idx, reg_idx, reg;
 375
 376		qv_info = &qvlist_info->qv_info[i];
 377		if (!qv_info)
 378			continue;
 379		v_idx = qv_info->v_idx;
 380		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 381			/* Figure out the queue after CEQ and make that the
 382			 * first queue.
 383			 */
 384			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 385			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
 386			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
 387					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
 388			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
 389					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
 390
 391			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 392			reg = (next_q_index &
 393			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 394			       (next_q_type <<
 395			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 396
 397			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 398		}
 399	}
 400	kfree(vf->qvlist_info);
 401	vf->qvlist_info = NULL;
 402}
 403
 404/**
 405 * i40e_config_iwarp_qvlist
 406 * @vf: pointer to the VF info
 407 * @qvlist_info: queue and vector list
 408 *
 409 * Return 0 on success or < 0 on error
 410 **/
 411static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
 412				    struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
 413{
 414	struct i40e_pf *pf = vf->pf;
 415	struct i40e_hw *hw = &pf->hw;
 416	struct i40e_virtchnl_iwarp_qv_info *qv_info;
 417	u32 v_idx, i, reg_idx, reg;
 418	u32 next_q_idx, next_q_type;
 419	u32 msix_vf, size;
 
 
 
 
 
 
 
 
 
 
 
 
 420
 421	size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
 422	       (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
 423						(qvlist_info->num_vectors - 1));
 424	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
 
 
 
 
 425	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 426
 427	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 428	for (i = 0; i < qvlist_info->num_vectors; i++) {
 429		qv_info = &qvlist_info->qv_info[i];
 430		if (!qv_info)
 431			continue;
 432		v_idx = qv_info->v_idx;
 433
 434		/* Validate vector id belongs to this vf */
 435		if (!i40e_vc_isvalid_vector_id(vf, v_idx))
 436			goto err;
 
 
 
 
 437
 438		vf->qvlist_info->qv_info[i] = *qv_info;
 439
 440		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 441		/* We might be sharing the interrupt, so get the first queue
 442		 * index and type, push it down the list by adding the new
 443		 * queue on top. Also link it with the new queue in CEQCTL.
 444		 */
 445		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
 446		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
 447				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
 448		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
 449				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 450
 451		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 452			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 453			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
 454			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
 455			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
 456			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
 457			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
 458			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
 459
 460			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 461			reg = (qv_info->ceq_idx &
 462			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 463			       (I40E_QUEUE_TYPE_PE_CEQ <<
 464			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 465			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 466		}
 467
 468		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
 469			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
 470			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
 471			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
 472
 473			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
 474		}
 475	}
 476
 477	return 0;
 478err:
 479	kfree(vf->qvlist_info);
 480	vf->qvlist_info = NULL;
 481	return -EINVAL;
 
 482}
 483
 484/**
 485 * i40e_config_vsi_tx_queue
 486 * @vf: pointer to the VF info
 487 * @vsi_id: id of VSI as provided by the FW
 488 * @vsi_queue_id: vsi relative queue index
 489 * @info: config. info
 490 *
 491 * configure tx queue
 492 **/
 493static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 494				    u16 vsi_queue_id,
 495				    struct i40e_virtchnl_txq_info *info)
 496{
 497	struct i40e_pf *pf = vf->pf;
 498	struct i40e_hw *hw = &pf->hw;
 499	struct i40e_hmc_obj_txq tx_ctx;
 500	struct i40e_vsi *vsi;
 501	u16 pf_queue_id;
 502	u32 qtx_ctl;
 503	int ret = 0;
 504
 
 
 
 
 505	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 506	vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
 
 
 
 507
 508	/* clear the context structure first */
 509	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 510
 511	/* only set the required fields */
 512	tx_ctx.base = info->dma_ring_addr / 128;
 513	tx_ctx.qlen = info->ring_len;
 514	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 515	tx_ctx.rdylist_act = 0;
 516	tx_ctx.head_wb_ena = info->headwb_enabled;
 517	tx_ctx.head_wb_addr = info->dma_headwb_addr;
 518
 519	/* clear the context in the HMC */
 520	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 521	if (ret) {
 522		dev_err(&pf->pdev->dev,
 523			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
 524			pf_queue_id, ret);
 525		ret = -ENOENT;
 526		goto error_context;
 527	}
 528
 529	/* set the context in the HMC */
 530	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 531	if (ret) {
 532		dev_err(&pf->pdev->dev,
 533			"Failed to set VF LAN Tx queue context %d error: %d\n",
 534			pf_queue_id, ret);
 535		ret = -ENOENT;
 536		goto error_context;
 537	}
 538
 539	/* associate this queue with the PCI VF function */
 540	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 541	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 542		    & I40E_QTX_CTL_PF_INDX_MASK);
 543	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 544		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 545		    & I40E_QTX_CTL_VFVM_INDX_MASK);
 546	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 547	i40e_flush(hw);
 548
 549error_context:
 550	return ret;
 551}
 552
 553/**
 554 * i40e_config_vsi_rx_queue
 555 * @vf: pointer to the VF info
 556 * @vsi_id: id of VSI  as provided by the FW
 557 * @vsi_queue_id: vsi relative queue index
 558 * @info: config. info
 559 *
 560 * configure rx queue
 561 **/
 562static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 563				    u16 vsi_queue_id,
 564				    struct i40e_virtchnl_rxq_info *info)
 565{
 
 566	struct i40e_pf *pf = vf->pf;
 
 567	struct i40e_hw *hw = &pf->hw;
 568	struct i40e_hmc_obj_rxq rx_ctx;
 569	u16 pf_queue_id;
 570	int ret = 0;
 571
 572	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 573
 574	/* clear the context structure first */
 575	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 576
 577	/* only set the required fields */
 578	rx_ctx.base = info->dma_ring_addr / 128;
 579	rx_ctx.qlen = info->ring_len;
 580
 581	if (info->splithdr_enabled) {
 582		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 583				  I40E_RX_SPLIT_IP      |
 584				  I40E_RX_SPLIT_TCP_UDP |
 585				  I40E_RX_SPLIT_SCTP;
 586		/* header length validation */
 587		if (info->hdr_size > ((2 * 1024) - 64)) {
 588			ret = -EINVAL;
 589			goto error_param;
 590		}
 591		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 592
 593		/* set splitalways mode 10b */
 594		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 595	}
 596
 597	/* databuffer length validation */
 598	if (info->databuffer_size > ((16 * 1024) - 128)) {
 599		ret = -EINVAL;
 600		goto error_param;
 601	}
 602	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 603
 604	/* max pkt. length validation */
 605	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 606		ret = -EINVAL;
 607		goto error_param;
 608	}
 609	rx_ctx.rxmax = info->max_pkt_size;
 610
 
 
 
 
 611	/* enable 32bytes desc always */
 612	rx_ctx.dsize = 1;
 613
 614	/* default values */
 615	rx_ctx.lrxqthresh = 2;
 616	rx_ctx.crcstrip = 1;
 617	rx_ctx.prefena = 1;
 618	rx_ctx.l2tsel = 1;
 619
 620	/* clear the context in the HMC */
 621	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 622	if (ret) {
 623		dev_err(&pf->pdev->dev,
 624			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
 625			pf_queue_id, ret);
 626		ret = -ENOENT;
 627		goto error_param;
 628	}
 629
 630	/* set the context in the HMC */
 631	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 632	if (ret) {
 633		dev_err(&pf->pdev->dev,
 634			"Failed to set VF LAN Rx queue context %d error: %d\n",
 635			pf_queue_id, ret);
 636		ret = -ENOENT;
 637		goto error_param;
 638	}
 639
 640error_param:
 641	return ret;
 642}
 643
 644/**
 645 * i40e_alloc_vsi_res
 646 * @vf: pointer to the VF info
 647 * @type: type of VSI to allocate
 648 *
 649 * alloc VF vsi context & resources
 650 **/
 651static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 652{
 653	struct i40e_mac_filter *f = NULL;
 654	struct i40e_pf *pf = vf->pf;
 655	struct i40e_vsi *vsi;
 
 656	int ret = 0;
 657
 658	vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
 
 659
 660	if (!vsi) {
 661		dev_err(&pf->pdev->dev,
 662			"add vsi failed for VF %d, aq_err %d\n",
 663			vf->vf_id, pf->hw.aq.asq_last_status);
 664		ret = -ENOENT;
 665		goto error_alloc_vsi_res;
 666	}
 667	if (type == I40E_VSI_SRIOV) {
 668		u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
 
 669
 670		vf->lan_vsi_idx = vsi->idx;
 671		vf->lan_vsi_id = vsi->id;
 672		/* If the port VLAN has been configured and then the
 673		 * VF driver was removed then the VSI port VLAN
 674		 * configuration was destroyed.  Check if there is
 675		 * a port VLAN and restore the VSI configuration if
 676		 * needed.
 677		 */
 678		if (vf->port_vlan_id)
 679			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 680
 681		spin_lock_bh(&vsi->mac_filter_list_lock);
 682		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
 683			f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
 684				       vf->port_vlan_id ? vf->port_vlan_id : -1,
 685				       true, false);
 686			if (!f)
 687				dev_info(&pf->pdev->dev,
 688					 "Could not add MAC filter %pM for VF %d\n",
 689					vf->default_lan_addr.addr, vf->vf_id);
 690		}
 691		f = i40e_add_filter(vsi, brdcast,
 692				    vf->port_vlan_id ? vf->port_vlan_id : -1,
 693				    true, false);
 694		if (!f)
 695			dev_info(&pf->pdev->dev,
 696				 "Could not allocate VF broadcast filter\n");
 697		spin_unlock_bh(&vsi->mac_filter_list_lock);
 
 
 
 
 
 
 698	}
 699
 700	/* program mac filter */
 701	ret = i40e_sync_vsi_filters(vsi);
 702	if (ret)
 703		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 
 704
 705	/* Set VF bandwidth if specified */
 706	if (vf->tx_rate) {
 
 
 
 
 
 
 
 707		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 708						  vf->tx_rate / 50, 0, NULL);
 709		if (ret)
 710			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 711				vf->vf_id, ret);
 712	}
 713
 714error_alloc_vsi_res:
 715	return ret;
 716}
 717
 718/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 719 * i40e_enable_vf_mappings
 720 * @vf: pointer to the VF info
 721 *
 722 * enable VF mappings
 723 **/
 724static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 725{
 726	struct i40e_pf *pf = vf->pf;
 727	struct i40e_hw *hw = &pf->hw;
 728	u32 reg, total_queue_pairs = 0;
 729	int j;
 730
 731	/* Tell the hardware we're using noncontiguous mapping. HW requires
 732	 * that VF queues be mapped using this method, even when they are
 733	 * contiguous in real life
 734	 */
 735	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 736			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 737
 738	/* enable VF vplan_qtable mappings */
 739	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 740	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 741
 742	/* map PF queues to VF queues */
 743	for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
 744		u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
 745
 746		reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 747		wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
 748		total_queue_pairs++;
 749	}
 750
 751	/* map PF queues to VSI */
 752	for (j = 0; j < 7; j++) {
 753		if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
 754			reg = 0x07FF07FF;	/* unused */
 755		} else {
 756			u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
 757							  j * 2);
 758			reg = qid;
 759			qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
 760						      (j * 2) + 1);
 761			reg |= qid << 16;
 762		}
 763		i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
 764				  reg);
 765	}
 766
 767	i40e_flush(hw);
 768}
 769
 770/**
 771 * i40e_disable_vf_mappings
 772 * @vf: pointer to the VF info
 773 *
 774 * disable VF mappings
 775 **/
 776static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 777{
 778	struct i40e_pf *pf = vf->pf;
 779	struct i40e_hw *hw = &pf->hw;
 780	int i;
 781
 782	/* disable qp mappings */
 783	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 784	for (i = 0; i < I40E_MAX_VSI_QP; i++)
 785		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 786		     I40E_QUEUE_END_OF_LIST);
 787	i40e_flush(hw);
 788}
 789
 790/**
 791 * i40e_free_vf_res
 792 * @vf: pointer to the VF info
 793 *
 794 * free VF resources
 795 **/
 796static void i40e_free_vf_res(struct i40e_vf *vf)
 797{
 798	struct i40e_pf *pf = vf->pf;
 799	struct i40e_hw *hw = &pf->hw;
 800	u32 reg_idx, reg;
 801	int i, msix_vf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803	/* free vsi & disconnect it from the parent uplink */
 804	if (vf->lan_vsi_idx) {
 805		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 806		vf->lan_vsi_idx = 0;
 807		vf->lan_vsi_id = 0;
 808	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 809	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 810
 811	/* disable interrupts so the VF starts in a known state */
 812	for (i = 0; i < msix_vf; i++) {
 813		/* format is same for both registers */
 814		if (0 == i)
 815			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 816		else
 817			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 818						      (vf->vf_id))
 819						     + (i - 1));
 820		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 821		i40e_flush(hw);
 822	}
 823
 824	/* clear the irq settings */
 825	for (i = 0; i < msix_vf; i++) {
 826		/* format is same for both registers */
 827		if (0 == i)
 828			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 829		else
 830			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 831						      (vf->vf_id))
 832						     + (i - 1));
 833		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
 834		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
 835		wr32(hw, reg_idx, reg);
 836		i40e_flush(hw);
 837	}
 838	/* reset some of the state varibles keeping
 839	 * track of the resources
 840	 */
 841	vf->num_queue_pairs = 0;
 842	vf->vf_states = 0;
 843	clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 844}
 845
 846/**
 847 * i40e_alloc_vf_res
 848 * @vf: pointer to the VF info
 849 *
 850 * allocate VF resources
 851 **/
 852static int i40e_alloc_vf_res(struct i40e_vf *vf)
 853{
 854	struct i40e_pf *pf = vf->pf;
 855	int total_queue_pairs = 0;
 856	int ret;
 
 
 
 
 
 
 857
 858	/* allocate hw vsi context & associated resources */
 859	ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
 860	if (ret)
 861		goto error_alloc;
 862	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 863	set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 864
 865	/* store the total qps number for the runtime
 866	 * VF req validation
 867	 */
 868	vf->num_queue_pairs = total_queue_pairs;
 869
 870	/* VF is now completely initialized */
 871	set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 872
 873error_alloc:
 874	if (ret)
 875		i40e_free_vf_res(vf);
 876
 877	return ret;
 878}
 879
 880#define VF_DEVICE_STATUS 0xAA
 881#define VF_TRANS_PENDING_MASK 0x20
 882/**
 883 * i40e_quiesce_vf_pci
 884 * @vf: pointer to the VF structure
 885 *
 886 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
 887 * if the transactions never clear.
 888 **/
 889static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
 890{
 891	struct i40e_pf *pf = vf->pf;
 892	struct i40e_hw *hw = &pf->hw;
 893	int vf_abs_id, i;
 894	u32 reg;
 895
 896	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 897
 898	wr32(hw, I40E_PF_PCI_CIAA,
 899	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
 900	for (i = 0; i < 100; i++) {
 901		reg = rd32(hw, I40E_PF_PCI_CIAD);
 902		if ((reg & VF_TRANS_PENDING_MASK) == 0)
 903			return 0;
 904		udelay(1);
 905	}
 906	return -EIO;
 907}
 908
 909/**
 910 * i40e_reset_vf
 911 * @vf: pointer to the VF structure
 912 * @flr: VFLR was issued or not
 913 *
 914 * reset the VF
 915 **/
 916void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 917{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 918	struct i40e_pf *pf = vf->pf;
 919	struct i40e_hw *hw = &pf->hw;
 920	bool rsd = false;
 921	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922	u32 reg;
 
 923
 924	if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
 925		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926
 927	/* warn the VF */
 928	clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 
 
 
 
 
 
 
 
 929
 930	/* In the case of a VFLR, the HW has already reset the VF and we
 931	 * just need to clean up, so don't hit the VFRTRIG register.
 932	 */
 933	if (!flr) {
 934		/* reset VF using VPGEN_VFRTRIG reg */
 
 
 
 
 
 
 
 
 
 
 
 
 935		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 936		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 937		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 938		i40e_flush(hw);
 939	}
 
 
 
 
 
 940
 941	if (i40e_quiesce_vf_pci(vf))
 942		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
 943			vf->vf_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944
 945	/* poll VPGEN_VFRSTAT reg to make sure
 946	 * that reset is complete
 947	 */
 948	for (i = 0; i < 10; i++) {
 949		/* VF reset requires driver to first reset the VF and then
 950		 * poll the status register to make sure that the reset
 951		 * completed successfully. Due to internal HW FIFO flushes,
 952		 * we must wait 10ms before the register will be valid.
 953		 */
 954		usleep_range(10000, 20000);
 955		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
 956		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
 957			rsd = true;
 958			break;
 959		}
 960	}
 961
 962	if (flr)
 963		usleep_range(10000, 20000);
 964
 965	if (!rsd)
 966		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
 967			vf->vf_id);
 968	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
 969	/* clear the reset bit in the VPGEN_VFRTRIG reg */
 970	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 971	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 972	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 973
 974	/* On initial reset, we won't have any queues */
 975	if (vf->lan_vsi_idx == 0)
 976		goto complete_reset;
 977
 978	i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
 979complete_reset:
 980	/* reallocate VF resources to reset the VSI state */
 981	i40e_free_vf_res(vf);
 982	if (!i40e_alloc_vf_res(vf)) {
 983		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 984		i40e_enable_vf_mappings(vf);
 985		set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 986		clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
 987		i40e_notify_client_of_vf_reset(pf, abs_vf_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988	}
 989	/* tell the VF the reset is done */
 990	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 991	i40e_flush(hw);
 992	clear_bit(__I40E_VF_DISABLE, &pf->state);
 
 
 
 993}
 994
 995/**
 996 * i40e_free_vfs
 997 * @pf: pointer to the PF structure
 998 *
 999 * free VF resources
1000 **/
1001void i40e_free_vfs(struct i40e_pf *pf)
1002{
1003	struct i40e_hw *hw = &pf->hw;
1004	u32 reg_idx, bit_idx;
1005	int i, tmp, vf_id;
1006
1007	if (!pf->vf)
1008		return;
1009	while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
 
 
1010		usleep_range(1000, 2000);
1011
1012	i40e_notify_client_of_vf_enable(pf, 0);
1013	for (i = 0; i < pf->num_alloc_vfs; i++)
1014		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
1015			i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
1016					       false);
1017
1018	/* Disable IOV before freeing resources. This lets any VF drivers
1019	 * running in the host get themselves cleaned up before we yank
1020	 * the carpet out from underneath their feet.
1021	 */
1022	if (!pci_vfs_assigned(pf->pdev))
1023		pci_disable_sriov(pf->pdev);
1024	else
1025		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1026
1027	msleep(20); /* let any messages in transit get finished up */
 
 
 
 
 
 
 
 
 
 
 
 
 
1028
1029	/* free up VF resources */
1030	tmp = pf->num_alloc_vfs;
1031	pf->num_alloc_vfs = 0;
1032	for (i = 0; i < tmp; i++) {
1033		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
1034			i40e_free_vf_res(&pf->vf[i]);
1035		/* disable qp mappings */
1036		i40e_disable_vf_mappings(&pf->vf[i]);
1037	}
1038
1039	kfree(pf->vf);
1040	pf->vf = NULL;
1041
1042	/* This check is for when the driver is unloaded while VFs are
1043	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1044	 * before this function ever gets called.
1045	 */
1046	if (!pci_vfs_assigned(pf->pdev)) {
1047		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1048		 * work correctly when SR-IOV gets re-enabled.
1049		 */
1050		for (vf_id = 0; vf_id < tmp; vf_id++) {
1051			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1052			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1053			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1054		}
1055	}
1056	clear_bit(__I40E_VF_DISABLE, &pf->state);
 
1057}
1058
1059#ifdef CONFIG_PCI_IOV
1060/**
1061 * i40e_alloc_vfs
1062 * @pf: pointer to the PF structure
1063 * @num_alloc_vfs: number of VFs to allocate
1064 *
1065 * allocate VF resources
1066 **/
1067int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1068{
1069	struct i40e_vf *vfs;
1070	int i, ret = 0;
1071
1072	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1073	i40e_irq_dynamic_disable_icr0(pf);
1074
1075	/* Check to see if we're just allocating resources for extant VFs */
1076	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1077		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1078		if (ret) {
1079			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1080			pf->num_alloc_vfs = 0;
1081			goto err_iov;
1082		}
1083	}
1084	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1085	/* allocate memory */
1086	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1087	if (!vfs) {
1088		ret = -ENOMEM;
1089		goto err_alloc;
1090	}
1091	pf->vf = vfs;
1092
1093	/* apply default profile */
1094	for (i = 0; i < num_alloc_vfs; i++) {
1095		vfs[i].pf = pf;
1096		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1097		vfs[i].vf_id = i;
1098
1099		/* assign default capabilities */
1100		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1101		vfs[i].spoofchk = true;
1102		/* VF resources get allocated during reset */
1103		i40e_reset_vf(&vfs[i], false);
1104
1105	}
1106	pf->num_alloc_vfs = num_alloc_vfs;
1107
 
 
 
 
 
1108err_alloc:
1109	if (ret)
1110		i40e_free_vfs(pf);
1111err_iov:
1112	/* Re-enable interrupt 0. */
1113	i40e_irq_dynamic_enable_icr0(pf, false);
1114	return ret;
1115}
1116
1117#endif
1118/**
1119 * i40e_pci_sriov_enable
1120 * @pdev: pointer to a pci_dev structure
1121 * @num_vfs: number of VFs to allocate
1122 *
1123 * Enable or change the number of VFs
1124 **/
1125static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1126{
1127#ifdef CONFIG_PCI_IOV
1128	struct i40e_pf *pf = pci_get_drvdata(pdev);
1129	int pre_existing_vfs = pci_num_vf(pdev);
1130	int err = 0;
1131
1132	if (test_bit(__I40E_TESTING, &pf->state)) {
1133		dev_warn(&pdev->dev,
1134			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1135		err = -EPERM;
1136		goto err_out;
1137	}
1138
1139	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1140		i40e_free_vfs(pf);
1141	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1142		goto out;
1143
1144	if (num_vfs > pf->num_req_vfs) {
1145		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1146			 num_vfs, pf->num_req_vfs);
1147		err = -EPERM;
1148		goto err_out;
1149	}
1150
1151	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1152	err = i40e_alloc_vfs(pf, num_vfs);
1153	if (err) {
1154		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1155		goto err_out;
1156	}
1157
1158out:
1159	return num_vfs;
1160
1161err_out:
1162	return err;
1163#endif
1164	return 0;
1165}
1166
1167/**
1168 * i40e_pci_sriov_configure
1169 * @pdev: pointer to a pci_dev structure
1170 * @num_vfs: number of VFs to allocate
1171 *
1172 * Enable or change the number of VFs. Called when the user updates the number
1173 * of VFs in sysfs.
1174 **/
1175int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1176{
1177	struct i40e_pf *pf = pci_get_drvdata(pdev);
 
 
 
 
 
 
1178
1179	if (num_vfs) {
1180		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1181			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1182			i40e_do_reset_safe(pf,
1183					   BIT_ULL(__I40E_PF_RESET_REQUESTED));
1184		}
1185		return i40e_pci_sriov_enable(pdev, num_vfs);
 
1186	}
1187
1188	if (!pci_vfs_assigned(pf->pdev)) {
1189		i40e_free_vfs(pf);
1190		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1191		i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
1192	} else {
1193		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1194		return -EINVAL;
 
1195	}
1196	return 0;
 
 
1197}
1198
1199/***********************virtual channel routines******************/
1200
1201/**
1202 * i40e_vc_send_msg_to_vf
1203 * @vf: pointer to the VF info
1204 * @v_opcode: virtual channel opcode
1205 * @v_retval: virtual channel return value
1206 * @msg: pointer to the msg buffer
1207 * @msglen: msg length
1208 *
1209 * send msg to VF
1210 **/
1211static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1212				  u32 v_retval, u8 *msg, u16 msglen)
1213{
1214	struct i40e_pf *pf;
1215	struct i40e_hw *hw;
1216	int abs_vf_id;
1217	i40e_status aq_ret;
1218
1219	/* validate the request */
1220	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1221		return -EINVAL;
1222
1223	pf = vf->pf;
1224	hw = &pf->hw;
1225	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1226
1227	/* single place to detect unsuccessful return values */
1228	if (v_retval) {
1229		vf->num_invalid_msgs++;
1230		dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n",
1231			vf->vf_id, v_opcode, v_retval);
1232		if (vf->num_invalid_msgs >
1233		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1234			dev_err(&pf->pdev->dev,
1235				"Number of invalid messages exceeded for VF %d\n",
1236				vf->vf_id);
1237			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1238			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
1239		}
1240	} else {
1241		vf->num_valid_msgs++;
1242		/* reset the invalid counter, if a valid message is received. */
1243		vf->num_invalid_msgs = 0;
1244	}
1245
1246	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1247					msg, msglen, NULL);
1248	if (aq_ret) {
1249		dev_err(&pf->pdev->dev,
1250			"Unable to send the message to VF %d aq_err %d\n",
1251			vf->vf_id, pf->hw.aq.asq_last_status);
1252		return -EIO;
1253	}
1254
1255	return 0;
1256}
1257
1258/**
1259 * i40e_vc_send_resp_to_vf
1260 * @vf: pointer to the VF info
1261 * @opcode: operation code
1262 * @retval: return value
1263 *
1264 * send resp msg to VF
1265 **/
1266static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1267				   enum i40e_virtchnl_ops opcode,
1268				   i40e_status retval)
1269{
1270	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1271}
1272
1273/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1274 * i40e_vc_get_version_msg
1275 * @vf: pointer to the VF info
 
1276 *
1277 * called from the VF to request the API version used by the PF
1278 **/
1279static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1280{
1281	struct i40e_virtchnl_version_info info = {
1282		I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1283	};
1284
1285	vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
1286	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1287	if (VF_IS_V10(vf))
1288		info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1289	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1290				      I40E_SUCCESS, (u8 *)&info,
1291				      sizeof(struct
1292					     i40e_virtchnl_version_info));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1293}
1294
1295/**
1296 * i40e_vc_get_vf_resources_msg
1297 * @vf: pointer to the VF info
1298 * @msg: pointer to the msg buffer
1299 * @msglen: msg length
1300 *
1301 * called from the VF to request its resources
1302 **/
1303static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1304{
1305	struct i40e_virtchnl_vf_resource *vfres = NULL;
1306	struct i40e_pf *pf = vf->pf;
1307	i40e_status aq_ret = 0;
1308	struct i40e_vsi *vsi;
1309	int i = 0, len = 0;
1310	int num_vsis = 1;
 
1311	int ret;
1312
1313	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1314		aq_ret = I40E_ERR_PARAM;
1315		goto err;
1316	}
1317
1318	len = (sizeof(struct i40e_virtchnl_vf_resource) +
1319	       sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1320
1321	vfres = kzalloc(len, GFP_KERNEL);
1322	if (!vfres) {
1323		aq_ret = I40E_ERR_NO_MEMORY;
1324		len = 0;
1325		goto err;
1326	}
1327	if (VF_IS_V11(vf))
1328		vf->driver_caps = *(u32 *)msg;
1329	else
1330		vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
1331				  I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
1332				  I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1333
1334	vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
 
1335	vsi = pf->vsi[vf->lan_vsi_idx];
1336	if (!vsi->info.pvid)
1337		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1338
1339	if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
1340	    (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
1341		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
1342		set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
 
 
1343	}
1344
1345	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
1346		if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1347			vfres->vf_offload_flags |=
1348				I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1349	} else {
1350		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
 
 
 
 
1351	}
1352
1353	if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1354		if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1355			vfres->vf_offload_flags |=
1356				I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1357	}
1358
1359	if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1360		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1361
1362	if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
1363		if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1364			vfres->vf_offload_flags |=
1365					I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1366	}
1367
 
 
 
 
 
 
1368	vfres->num_vsis = num_vsis;
1369	vfres->num_queue_pairs = vf->num_queue_pairs;
1370	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
 
 
 
 
1371	if (vf->lan_vsi_idx) {
1372		vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
1373		vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1374		vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
1375		/* VFs only use TC 0 */
1376		vfres->vsi_res[i].qset_handle
1377					  = le16_to_cpu(vsi->info.qs_handle[0]);
1378		ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
 
 
 
 
1379				vf->default_lan_addr.addr);
1380		i++;
1381	}
1382	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1383
1384err:
1385	/* send the response back to the VF */
1386	ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1387				     aq_ret, (u8 *)vfres, len);
1388
1389	kfree(vfres);
1390	return ret;
1391}
1392
1393/**
1394 * i40e_vc_reset_vf_msg
1395 * @vf: pointer to the VF info
1396 * @msg: pointer to the msg buffer
1397 * @msglen: msg length
1398 *
1399 * called from the VF to reset itself,
1400 * unlike other virtchnl messages, PF driver
1401 * doesn't send the response back to the VF
1402 **/
1403static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1404{
1405	if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1406		i40e_reset_vf(vf, false);
1407}
1408
1409/**
1410 * i40e_vc_config_promiscuous_mode_msg
1411 * @vf: pointer to the VF info
1412 * @msg: pointer to the msg buffer
1413 * @msglen: msg length
1414 *
1415 * called from the VF to configure the promiscuous mode of
1416 * VF vsis
1417 **/
1418static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1419					       u8 *msg, u16 msglen)
1420{
1421	struct i40e_virtchnl_promisc_info *info =
1422	    (struct i40e_virtchnl_promisc_info *)msg;
1423	struct i40e_pf *pf = vf->pf;
1424	struct i40e_hw *hw = &pf->hw;
1425	struct i40e_vsi *vsi;
1426	bool allmulti = false;
1427	i40e_status aq_ret;
1428
1429	vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1430	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1431	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1432	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1433	    (vsi->type != I40E_VSI_FCOE)) {
1434		aq_ret = I40E_ERR_PARAM;
1435		goto error_param;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1436	}
1437	if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
 
 
1438		allmulti = true;
1439	aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1440						       allmulti, NULL);
1441
1442error_param:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1443	/* send the response to the VF */
1444	return i40e_vc_send_resp_to_vf(vf,
1445				       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1446				       aq_ret);
1447}
1448
1449/**
1450 * i40e_vc_config_queues_msg
1451 * @vf: pointer to the VF info
1452 * @msg: pointer to the msg buffer
1453 * @msglen: msg length
1454 *
1455 * called from the VF to configure the rx/tx
1456 * queues
1457 **/
1458static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1459{
1460	struct i40e_virtchnl_vsi_queue_config_info *qci =
1461	    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1462	struct i40e_virtchnl_queue_pair_info *qpi;
 
1463	struct i40e_pf *pf = vf->pf;
1464	u16 vsi_id, vsi_queue_id;
1465	i40e_status aq_ret = 0;
1466	int i;
 
 
1467
1468	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1469		aq_ret = I40E_ERR_PARAM;
1470		goto error_param;
1471	}
1472
1473	vsi_id = qci->vsi_id;
1474	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1475		aq_ret = I40E_ERR_PARAM;
1476		goto error_param;
1477	}
1478	for (i = 0; i < qci->num_queue_pairs; i++) {
1479		qpi = &qci->qpair[i];
1480		vsi_queue_id = qpi->txq.queue_id;
1481		if ((qpi->txq.vsi_id != vsi_id) ||
1482		    (qpi->rxq.vsi_id != vsi_id) ||
1483		    (qpi->rxq.queue_id != vsi_queue_id) ||
1484		    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
 
 
 
1485			aq_ret = I40E_ERR_PARAM;
1486			goto error_param;
1487		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1488
1489		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1490					     &qpi->rxq) ||
1491		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1492					     &qpi->txq)) {
1493			aq_ret = I40E_ERR_PARAM;
1494			goto error_param;
1495		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1496	}
1497	/* set vsi num_queue_pairs in use to num configured by VF */
1498	pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
 
 
 
 
 
 
 
 
 
 
 
 
 
1499
1500error_param:
1501	/* send the response to the VF */
1502	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1503				       aq_ret);
1504}
1505
1506/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1507 * i40e_vc_config_irq_map_msg
1508 * @vf: pointer to the VF info
1509 * @msg: pointer to the msg buffer
1510 * @msglen: msg length
1511 *
1512 * called from the VF to configure the irq to
1513 * queue map
1514 **/
1515static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1516{
1517	struct i40e_virtchnl_irq_map_info *irqmap_info =
1518	    (struct i40e_virtchnl_irq_map_info *)msg;
1519	struct i40e_virtchnl_vector_map *map;
1520	u16 vsi_id, vsi_queue_id, vector_id;
1521	i40e_status aq_ret = 0;
1522	unsigned long tempmap;
1523	int i;
1524
1525	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
 
 
 
 
 
 
1526		aq_ret = I40E_ERR_PARAM;
1527		goto error_param;
1528	}
1529
1530	for (i = 0; i < irqmap_info->num_vectors; i++) {
1531		map = &irqmap_info->vecmap[i];
1532
1533		vector_id = map->vector_id;
1534		vsi_id = map->vsi_id;
1535		/* validate msg params */
1536		if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1537		    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1538			aq_ret = I40E_ERR_PARAM;
1539			goto error_param;
1540		}
 
1541
1542		/* lookout for the invalid queue index */
1543		tempmap = map->rxq_map;
1544		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1545			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1546						      vsi_queue_id)) {
1547				aq_ret = I40E_ERR_PARAM;
1548				goto error_param;
1549			}
1550		}
1551
1552		tempmap = map->txq_map;
1553		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1554			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1555						      vsi_queue_id)) {
1556				aq_ret = I40E_ERR_PARAM;
1557				goto error_param;
1558			}
1559		}
1560
1561		i40e_config_irq_link_list(vf, vsi_id, map);
1562	}
1563error_param:
1564	/* send the response to the VF */
1565	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1566				       aq_ret);
1567}
1568
1569/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1570 * i40e_vc_enable_queues_msg
1571 * @vf: pointer to the VF info
1572 * @msg: pointer to the msg buffer
1573 * @msglen: msg length
1574 *
1575 * called from the VF to enable all or specific queue(s)
1576 **/
1577static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1578{
1579	struct i40e_virtchnl_queue_select *vqs =
1580	    (struct i40e_virtchnl_queue_select *)msg;
1581	struct i40e_pf *pf = vf->pf;
1582	u16 vsi_id = vqs->vsi_id;
1583	i40e_status aq_ret = 0;
 
1584
1585	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1586		aq_ret = I40E_ERR_PARAM;
1587		goto error_param;
1588	}
1589
1590	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1591		aq_ret = I40E_ERR_PARAM;
1592		goto error_param;
1593	}
1594
1595	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1596		aq_ret = I40E_ERR_PARAM;
1597		goto error_param;
1598	}
1599
1600	if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
 
 
 
 
 
 
 
1601		aq_ret = I40E_ERR_TIMEOUT;
 
 
 
 
 
 
 
 
 
 
 
 
1602error_param:
1603	/* send the response to the VF */
1604	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1605				       aq_ret);
1606}
1607
1608/**
1609 * i40e_vc_disable_queues_msg
1610 * @vf: pointer to the VF info
1611 * @msg: pointer to the msg buffer
1612 * @msglen: msg length
1613 *
1614 * called from the VF to disable all or specific
1615 * queue(s)
1616 **/
1617static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1618{
1619	struct i40e_virtchnl_queue_select *vqs =
1620	    (struct i40e_virtchnl_queue_select *)msg;
1621	struct i40e_pf *pf = vf->pf;
1622	i40e_status aq_ret = 0;
1623
1624	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1625		aq_ret = I40E_ERR_PARAM;
1626		goto error_param;
1627	}
1628
1629	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1630		aq_ret = I40E_ERR_PARAM;
1631		goto error_param;
1632	}
1633
1634	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1635		aq_ret = I40E_ERR_PARAM;
1636		goto error_param;
1637	}
1638
1639	if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
 
 
1640		aq_ret = I40E_ERR_TIMEOUT;
1641
 
 
 
 
 
 
1642error_param:
1643	/* send the response to the VF */
1644	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1645				       aq_ret);
1646}
1647
1648/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1649 * i40e_vc_get_stats_msg
1650 * @vf: pointer to the VF info
1651 * @msg: pointer to the msg buffer
1652 * @msglen: msg length
1653 *
1654 * called from the VF to get vsi stats
1655 **/
1656static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1657{
1658	struct i40e_virtchnl_queue_select *vqs =
1659	    (struct i40e_virtchnl_queue_select *)msg;
1660	struct i40e_pf *pf = vf->pf;
1661	struct i40e_eth_stats stats;
1662	i40e_status aq_ret = 0;
1663	struct i40e_vsi *vsi;
1664
1665	memset(&stats, 0, sizeof(struct i40e_eth_stats));
1666
1667	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1668		aq_ret = I40E_ERR_PARAM;
1669		goto error_param;
1670	}
1671
1672	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1673		aq_ret = I40E_ERR_PARAM;
1674		goto error_param;
1675	}
1676
1677	vsi = pf->vsi[vf->lan_vsi_idx];
1678	if (!vsi) {
1679		aq_ret = I40E_ERR_PARAM;
1680		goto error_param;
1681	}
1682	i40e_update_eth_stats(vsi);
1683	stats = vsi->eth_stats;
1684
1685error_param:
1686	/* send the response back to the VF */
1687	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1688				      (u8 *)&stats, sizeof(stats));
1689}
1690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1691/**
1692 * i40e_check_vf_permission
1693 * @vf: pointer to the VF info
1694 * @macaddr: pointer to the MAC Address being checked
 
 
 
1695 *
1696 * Check if the VF has permission to add or delete unicast MAC address
1697 * filters and return error code -EPERM if not.  Then check if the
1698 * address filter requested is broadcast or zero and if so return
1699 * an invalid MAC address error code.
 
 
 
 
1700 **/
1701static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
 
1702{
1703	struct i40e_pf *pf = vf->pf;
1704	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1705
1706	if (is_broadcast_ether_addr(macaddr) ||
1707		   is_zero_ether_addr(macaddr)) {
1708		dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
1709		ret = I40E_ERR_INVALID_MAC_ADDR;
1710	} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
1711		   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
1712		/* If the host VMM administrator has set the VF MAC address
1713		 * administratively via the ndo_set_vf_mac command then deny
1714		 * permission to the VF to add or delete unicast MAC addresses.
 
1715		 * The VF may request to set the MAC address filter already
1716		 * assigned to it so do not return an error in that case.
1717		 */
1718		dev_err(&pf->pdev->dev,
1719			"VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1720		ret = -EPERM;
 
 
 
 
 
 
 
 
 
1721	}
1722	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1723}
1724
1725/**
1726 * i40e_vc_add_mac_addr_msg
1727 * @vf: pointer to the VF info
1728 * @msg: pointer to the msg buffer
1729 * @msglen: msg length
1730 *
1731 * add guest mac address filter
1732 **/
1733static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1734{
1735	struct i40e_virtchnl_ether_addr_list *al =
1736	    (struct i40e_virtchnl_ether_addr_list *)msg;
1737	struct i40e_pf *pf = vf->pf;
1738	struct i40e_vsi *vsi = NULL;
1739	u16 vsi_id = al->vsi_id;
1740	i40e_status ret = 0;
1741	int i;
1742
1743	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1744	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1745	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1746		ret = I40E_ERR_PARAM;
1747		goto error_param;
1748	}
1749
1750	for (i = 0; i < al->num_elements; i++) {
1751		ret = i40e_check_vf_permission(vf, al->list[i].addr);
1752		if (ret)
1753			goto error_param;
1754	}
1755	vsi = pf->vsi[vf->lan_vsi_idx];
1756
1757	/* Lock once, because all function inside for loop accesses VSI's
1758	 * MAC filter list which needs to be protected using same lock.
1759	 */
1760	spin_lock_bh(&vsi->mac_filter_list_lock);
 
 
 
 
 
 
1761
1762	/* add new addresses to the list */
1763	for (i = 0; i < al->num_elements; i++) {
1764		struct i40e_mac_filter *f;
1765
1766		f = i40e_find_mac(vsi, al->list[i].addr, true, false);
1767		if (!f) {
1768			if (i40e_is_vsi_in_vlan(vsi))
1769				f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1770							 true, false);
1771			else
1772				f = i40e_add_filter(vsi, al->list[i].addr, -1,
1773						    true, false);
1774		}
1775
1776		if (!f) {
1777			dev_err(&pf->pdev->dev,
1778				"Unable to add MAC filter %pM for VF %d\n",
1779				 al->list[i].addr, vf->vf_id);
1780			ret = I40E_ERR_PARAM;
1781			spin_unlock_bh(&vsi->mac_filter_list_lock);
1782			goto error_param;
 
 
 
 
 
1783		}
1784	}
1785	spin_unlock_bh(&vsi->mac_filter_list_lock);
1786
1787	/* program the updated filter list */
1788	ret = i40e_sync_vsi_filters(vsi);
1789	if (ret)
1790		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
1791			vf->vf_id, ret);
1792
1793error_param:
1794	/* send the response to the VF */
1795	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1796				       ret);
1797}
1798
1799/**
1800 * i40e_vc_del_mac_addr_msg
1801 * @vf: pointer to the VF info
1802 * @msg: pointer to the msg buffer
1803 * @msglen: msg length
1804 *
1805 * remove guest mac address filter
1806 **/
1807static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1808{
1809	struct i40e_virtchnl_ether_addr_list *al =
1810	    (struct i40e_virtchnl_ether_addr_list *)msg;
 
1811	struct i40e_pf *pf = vf->pf;
1812	struct i40e_vsi *vsi = NULL;
1813	u16 vsi_id = al->vsi_id;
1814	i40e_status ret = 0;
1815	int i;
1816
1817	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1818	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1819	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1820		ret = I40E_ERR_PARAM;
1821		goto error_param;
1822	}
1823
1824	for (i = 0; i < al->num_elements; i++) {
1825		if (is_broadcast_ether_addr(al->list[i].addr) ||
1826		    is_zero_ether_addr(al->list[i].addr)) {
1827			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
1828				al->list[i].addr, vf->vf_id);
1829			ret = I40E_ERR_INVALID_MAC_ADDR;
1830			goto error_param;
1831		}
 
 
1832	}
1833	vsi = pf->vsi[vf->lan_vsi_idx];
1834
1835	spin_lock_bh(&vsi->mac_filter_list_lock);
1836	/* delete addresses from the list */
1837	for (i = 0; i < al->num_elements; i++)
1838		if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
1839			ret = I40E_ERR_INVALID_MAC_ADDR;
1840			spin_unlock_bh(&vsi->mac_filter_list_lock);
1841			goto error_param;
1842		}
1843
1844	spin_unlock_bh(&vsi->mac_filter_list_lock);
1845
1846	/* program the updated filter list */
1847	ret = i40e_sync_vsi_filters(vsi);
1848	if (ret)
1849		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
1850			vf->vf_id, ret);
1851
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1852error_param:
1853	/* send the response to the VF */
1854	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1855				       ret);
1856}
1857
1858/**
1859 * i40e_vc_add_vlan_msg
1860 * @vf: pointer to the VF info
1861 * @msg: pointer to the msg buffer
1862 * @msglen: msg length
1863 *
1864 * program guest vlan id
1865 **/
1866static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1867{
1868	struct i40e_virtchnl_vlan_filter_list *vfl =
1869	    (struct i40e_virtchnl_vlan_filter_list *)msg;
1870	struct i40e_pf *pf = vf->pf;
1871	struct i40e_vsi *vsi = NULL;
1872	u16 vsi_id = vfl->vsi_id;
1873	i40e_status aq_ret = 0;
1874	int i;
1875
1876	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1877	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1878	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
 
 
 
 
 
1879		aq_ret = I40E_ERR_PARAM;
1880		goto error_param;
1881	}
1882
1883	for (i = 0; i < vfl->num_elements; i++) {
1884		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1885			aq_ret = I40E_ERR_PARAM;
1886			dev_err(&pf->pdev->dev,
1887				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1888			goto error_param;
1889		}
1890	}
1891	vsi = pf->vsi[vf->lan_vsi_idx];
1892	if (vsi->info.pvid) {
1893		aq_ret = I40E_ERR_PARAM;
1894		goto error_param;
1895	}
1896
1897	i40e_vlan_stripping_enable(vsi);
1898	for (i = 0; i < vfl->num_elements; i++) {
1899		/* add new VLAN filter */
1900		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
1901
1902		if (ret)
1903			dev_err(&pf->pdev->dev,
1904				"Unable to add VLAN filter %d for VF %d, error %d\n",
1905				vfl->vlan_id[i], vf->vf_id, ret);
1906	}
1907
1908error_param:
1909	/* send the response to the VF */
1910	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1911}
1912
1913/**
1914 * i40e_vc_remove_vlan_msg
1915 * @vf: pointer to the VF info
1916 * @msg: pointer to the msg buffer
1917 * @msglen: msg length
1918 *
1919 * remove programmed guest vlan id
1920 **/
1921static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1922{
1923	struct i40e_virtchnl_vlan_filter_list *vfl =
1924	    (struct i40e_virtchnl_vlan_filter_list *)msg;
1925	struct i40e_pf *pf = vf->pf;
1926	struct i40e_vsi *vsi = NULL;
1927	u16 vsi_id = vfl->vsi_id;
1928	i40e_status aq_ret = 0;
1929	int i;
1930
1931	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1932	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1933	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1934		aq_ret = I40E_ERR_PARAM;
1935		goto error_param;
1936	}
1937
1938	for (i = 0; i < vfl->num_elements; i++) {
1939		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1940			aq_ret = I40E_ERR_PARAM;
1941			goto error_param;
1942		}
1943	}
1944
1945	vsi = pf->vsi[vf->lan_vsi_idx];
1946	if (vsi->info.pvid) {
1947		aq_ret = I40E_ERR_PARAM;
 
1948		goto error_param;
1949	}
1950
1951	for (i = 0; i < vfl->num_elements; i++) {
1952		int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
 
1953
1954		if (ret)
1955			dev_err(&pf->pdev->dev,
1956				"Unable to delete VLAN filter %d for VF %d, error %d\n",
1957				vfl->vlan_id[i], vf->vf_id, ret);
 
 
 
 
 
 
1958	}
1959
1960error_param:
1961	/* send the response to the VF */
1962	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1963}
1964
1965/**
1966 * i40e_vc_iwarp_msg
1967 * @vf: pointer to the VF info
1968 * @msg: pointer to the msg buffer
1969 * @msglen: msg length
1970 *
1971 * called from the VF for the iwarp msgs
1972 **/
1973static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1974{
1975	struct i40e_pf *pf = vf->pf;
1976	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
1977	i40e_status aq_ret = 0;
1978
1979	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1980	    !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
1981		aq_ret = I40E_ERR_PARAM;
1982		goto error_param;
1983	}
1984
1985	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
1986				     msg, msglen);
1987
1988error_param:
1989	/* send the response to the VF */
1990	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
1991				       aq_ret);
1992}
1993
1994/**
1995 * i40e_vc_iwarp_qvmap_msg
1996 * @vf: pointer to the VF info
1997 * @msg: pointer to the msg buffer
1998 * @msglen: msg length
1999 * @config: config qvmap or release it
2000 *
2001 * called from the VF for the iwarp msgs
2002 **/
2003static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2004				   bool config)
2005{
2006	struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
2007				(struct i40e_virtchnl_iwarp_qvlist_info *)msg;
2008	i40e_status aq_ret = 0;
2009
2010	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
2011	    !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
2012		aq_ret = I40E_ERR_PARAM;
2013		goto error_param;
2014	}
2015
2016	if (config) {
2017		if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2018			aq_ret = I40E_ERR_PARAM;
2019	} else {
2020		i40e_release_iwarp_qvlist(vf);
2021	}
2022
2023error_param:
2024	/* send the response to the VF */
2025	return i40e_vc_send_resp_to_vf(vf,
2026			       config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
2027			       I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
2028			       aq_ret);
2029}
2030
2031/**
2032 * i40e_vc_validate_vf_msg
2033 * @vf: pointer to the VF info
2034 * @msg: pointer to the msg buffer
2035 * @msglen: msg length
2036 * @msghndl: msg handle
2037 *
2038 * validate msg
2039 **/
2040static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
2041				   u32 v_retval, u8 *msg, u16 msglen)
2042{
2043	bool err_msg_format = false;
2044	int valid_len;
 
 
 
2045
2046	/* Check if VF is disabled. */
2047	if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
2048		return I40E_ERR_PARAM;
 
 
 
2049
2050	/* Validate message length. */
2051	switch (v_opcode) {
2052	case I40E_VIRTCHNL_OP_VERSION:
2053		valid_len = sizeof(struct i40e_virtchnl_version_info);
2054		break;
2055	case I40E_VIRTCHNL_OP_RESET_VF:
2056		valid_len = 0;
2057		break;
2058	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
2059		if (VF_IS_V11(vf))
2060			valid_len = sizeof(u32);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2061		else
2062			valid_len = 0;
2063		break;
2064	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
2065		valid_len = sizeof(struct i40e_virtchnl_txq_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2066		break;
2067	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
2068		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
 
 
 
 
 
 
2069		break;
2070	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2071		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
2072		if (msglen >= valid_len) {
2073			struct i40e_virtchnl_vsi_queue_config_info *vqc =
2074			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
2075			valid_len += (vqc->num_queue_pairs *
2076				      sizeof(struct
2077					     i40e_virtchnl_queue_pair_info));
2078			if (vqc->num_queue_pairs == 0)
2079				err_msg_format = true;
2080		}
2081		break;
2082	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
2083		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
2084		if (msglen >= valid_len) {
2085			struct i40e_virtchnl_irq_map_info *vimi =
2086			    (struct i40e_virtchnl_irq_map_info *)msg;
2087			valid_len += (vimi->num_vectors *
2088				      sizeof(struct i40e_virtchnl_vector_map));
2089			if (vimi->num_vectors == 0)
2090				err_msg_format = true;
2091		}
2092		break;
2093	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
2094	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
2095		valid_len = sizeof(struct i40e_virtchnl_queue_select);
2096		break;
2097	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
2098	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
2099		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
2100		if (msglen >= valid_len) {
2101			struct i40e_virtchnl_ether_addr_list *veal =
2102			    (struct i40e_virtchnl_ether_addr_list *)msg;
2103			valid_len += veal->num_elements *
2104			    sizeof(struct i40e_virtchnl_ether_addr);
2105			if (veal->num_elements == 0)
2106				err_msg_format = true;
2107		}
2108		break;
2109	case I40E_VIRTCHNL_OP_ADD_VLAN:
2110	case I40E_VIRTCHNL_OP_DEL_VLAN:
2111		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
2112		if (msglen >= valid_len) {
2113			struct i40e_virtchnl_vlan_filter_list *vfl =
2114			    (struct i40e_virtchnl_vlan_filter_list *)msg;
2115			valid_len += vfl->num_elements * sizeof(u16);
2116			if (vfl->num_elements == 0)
2117				err_msg_format = true;
2118		}
2119		break;
2120	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2121		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
2122		break;
2123	case I40E_VIRTCHNL_OP_GET_STATS:
2124		valid_len = sizeof(struct i40e_virtchnl_queue_select);
2125		break;
2126	case I40E_VIRTCHNL_OP_IWARP:
2127		/* These messages are opaque to us and will be validated in
2128		 * the RDMA client code. We just need to check for nonzero
2129		 * length. The firmware will enforce max length restrictions.
2130		 */
2131		if (msglen)
2132			valid_len = msglen;
2133		else
2134			err_msg_format = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2135		break;
2136	case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
2137		valid_len = 0;
 
 
 
 
 
 
2138		break;
2139	case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2140		valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
2141		if (msglen >= valid_len) {
2142			struct i40e_virtchnl_iwarp_qvlist_info *qv =
2143				(struct i40e_virtchnl_iwarp_qvlist_info *)msg;
2144			if (qv->num_vectors == 0) {
2145				err_msg_format = true;
2146				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2147			}
2148			valid_len += ((qv->num_vectors - 1) *
2149				sizeof(struct i40e_virtchnl_iwarp_qv_info));
2150		}
2151		break;
2152	/* These are always errors coming from the VF. */
2153	case I40E_VIRTCHNL_OP_EVENT:
2154	case I40E_VIRTCHNL_OP_UNKNOWN:
2155	default:
2156		return -EPERM;
2157	}
2158	/* few more checks */
2159	if ((valid_len != msglen) || (err_msg_format)) {
2160		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
2161		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2162	} else {
2163		return 0;
 
 
2164	}
 
 
 
 
 
 
 
 
 
2165}
2166
2167/**
2168 * i40e_vc_process_vf_msg
2169 * @pf: pointer to the PF structure
2170 * @vf_id: source VF id
 
 
2171 * @msg: pointer to the msg buffer
2172 * @msglen: msg length
2173 * @msghndl: msg handle
2174 *
2175 * called from the common aeq/arq handler to
2176 * process request from VF
2177 **/
2178int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
2179			   u32 v_retval, u8 *msg, u16 msglen)
2180{
2181	struct i40e_hw *hw = &pf->hw;
2182	unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
2183	struct i40e_vf *vf;
2184	int ret;
2185
2186	pf->vf_aq_requests++;
2187	if (local_vf_id >= pf->num_alloc_vfs)
2188		return -EINVAL;
2189	vf = &(pf->vf[local_vf_id]);
 
 
 
 
 
2190	/* perform basic checks on the msg */
2191	ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
2192
2193	if (ret) {
 
2194		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
2195			local_vf_id, v_opcode, msglen);
2196		return ret;
 
 
 
 
 
2197	}
2198
2199	switch (v_opcode) {
2200	case I40E_VIRTCHNL_OP_VERSION:
2201		ret = i40e_vc_get_version_msg(vf, msg);
2202		break;
2203	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
2204		ret = i40e_vc_get_vf_resources_msg(vf, msg);
 
2205		break;
2206	case I40E_VIRTCHNL_OP_RESET_VF:
2207		i40e_vc_reset_vf_msg(vf);
2208		ret = 0;
2209		break;
2210	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2211		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
2212		break;
2213	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2214		ret = i40e_vc_config_queues_msg(vf, msg, msglen);
2215		break;
2216	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
2217		ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
2218		break;
2219	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
2220		ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
2221		i40e_vc_notify_vf_link_state(vf);
2222		break;
2223	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
2224		ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
2225		break;
2226	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
2227		ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
2228		break;
2229	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
2230		ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
2231		break;
2232	case I40E_VIRTCHNL_OP_ADD_VLAN:
2233		ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
2234		break;
2235	case I40E_VIRTCHNL_OP_DEL_VLAN:
2236		ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
2237		break;
2238	case I40E_VIRTCHNL_OP_GET_STATS:
2239		ret = i40e_vc_get_stats_msg(vf, msg, msglen);
2240		break;
2241	case I40E_VIRTCHNL_OP_IWARP:
2242		ret = i40e_vc_iwarp_msg(vf, msg, msglen);
2243		break;
2244	case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2245		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2246		break;
2247	case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
2248		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
2249		break;
2250	case I40E_VIRTCHNL_OP_UNKNOWN:
 
 
 
 
 
 
 
 
 
 
 
 
2251	default:
2252		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2253			v_opcode, local_vf_id);
2254		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
2255					      I40E_ERR_NOT_IMPLEMENTED);
2256		break;
2257	}
2258
2259	return ret;
2260}
2261
2262/**
2263 * i40e_vc_process_vflr_event
2264 * @pf: pointer to the PF structure
2265 *
2266 * called from the vlfr irq handler to
2267 * free up VF resources and state variables
2268 **/
2269int i40e_vc_process_vflr_event(struct i40e_pf *pf)
2270{
2271	u32 reg, reg_idx, bit_idx, vf_id;
2272	struct i40e_hw *hw = &pf->hw;
 
2273	struct i40e_vf *vf;
 
2274
2275	if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
2276		return 0;
2277
2278	/* Re-enable the VFLR interrupt cause here, before looking for which
2279	 * VF got reset. Otherwise, if another VF gets a reset while the
2280	 * first one is being processed, that interrupt will be lost, and
2281	 * that VF will be stuck in reset forever.
2282	 */
2283	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2284	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
2285	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2286	i40e_flush(hw);
2287
2288	clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2289	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
2290		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2291		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2292		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
2293		vf = &pf->vf[vf_id];
2294		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2295		if (reg & BIT(bit_idx)) {
2296			/* clear the bit in GLGEN_VFLRSTAT */
2297			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
2298
2299			if (!test_bit(__I40E_DOWN, &pf->state))
2300				i40e_reset_vf(vf, true);
2301		}
2302	}
2303
2304	return 0;
2305}
2306
2307/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2308 * i40e_ndo_set_vf_mac
2309 * @netdev: network interface device structure
2310 * @vf_id: VF identifier
2311 * @mac: mac address
2312 *
2313 * program VF mac address
2314 **/
2315int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2316{
2317	struct i40e_netdev_priv *np = netdev_priv(netdev);
2318	struct i40e_vsi *vsi = np->vsi;
2319	struct i40e_pf *pf = vsi->back;
2320	struct i40e_mac_filter *f;
2321	struct i40e_vf *vf;
2322	int ret = 0;
 
 
 
 
 
 
 
 
2323
2324	/* validate the request */
2325	if (vf_id >= pf->num_alloc_vfs) {
2326		dev_err(&pf->pdev->dev,
2327			"Invalid VF Identifier %d\n", vf_id);
2328		ret = -EINVAL;
2329		goto error_param;
2330	}
2331
2332	vf = &(pf->vf[vf_id]);
2333	vsi = pf->vsi[vf->lan_vsi_idx];
2334	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
 
 
 
 
 
 
 
 
 
 
 
2335		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2336			vf_id);
2337		ret = -EAGAIN;
2338		goto error_param;
2339	}
 
2340
2341	if (is_multicast_ether_addr(mac)) {
2342		dev_err(&pf->pdev->dev,
2343			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
2344		ret = -EINVAL;
2345		goto error_param;
2346	}
2347
2348	/* Lock once because below invoked function add/del_filter requires
2349	 * mac_filter_list_lock to be held
2350	 */
2351	spin_lock_bh(&vsi->mac_filter_list_lock);
2352
2353	/* delete the temporary mac address */
2354	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
2355		i40e_del_filter(vsi, vf->default_lan_addr.addr,
2356				vf->port_vlan_id ? vf->port_vlan_id : -1,
2357				true, false);
2358
2359	/* Delete all the filters for this VSI - we're going to kill it
2360	 * anyway.
2361	 */
2362	list_for_each_entry(f, &vsi->mac_filter_list, list)
2363		i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2364
2365	spin_unlock_bh(&vsi->mac_filter_list_lock);
2366
2367	dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2368	/* program mac filter */
2369	if (i40e_sync_vsi_filters(vsi)) {
2370		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2371		ret = -EIO;
2372		goto error_param;
2373	}
2374	ether_addr_copy(vf->default_lan_addr.addr, mac);
2375	vf->pf_set_mac = true;
2376	/* Force the VF driver stop so it has to reload with new MAC address */
2377	i40e_vc_disable_vf(pf, vf);
2378	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
 
 
 
 
 
 
 
 
 
 
 
2379
2380error_param:
 
2381	return ret;
2382}
2383
2384/**
2385 * i40e_ndo_set_vf_port_vlan
2386 * @netdev: network interface device structure
2387 * @vf_id: VF identifier
2388 * @vlan_id: mac address
2389 * @qos: priority setting
 
2390 *
2391 * program VF vlan id and/or qos
2392 **/
2393int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2394			      int vf_id, u16 vlan_id, u8 qos)
2395{
2396	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
2397	struct i40e_netdev_priv *np = netdev_priv(netdev);
 
2398	struct i40e_pf *pf = np->vsi->back;
2399	bool is_vsi_in_vlan = false;
2400	struct i40e_vsi *vsi;
2401	struct i40e_vf *vf;
2402	int ret = 0;
2403
 
 
 
 
 
2404	/* validate the request */
2405	if (vf_id >= pf->num_alloc_vfs) {
2406		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2407		ret = -EINVAL;
2408		goto error_pvid;
2409	}
2410
2411	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2412		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2413		ret = -EINVAL;
2414		goto error_pvid;
2415	}
2416
2417	vf = &(pf->vf[vf_id]);
 
 
 
 
 
 
2418	vsi = pf->vsi[vf->lan_vsi_idx];
2419	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2420		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2421			vf_id);
2422		ret = -EAGAIN;
2423		goto error_pvid;
2424	}
2425
2426	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
2427		/* duplicate request, so just return success */
2428		goto error_pvid;
2429
2430	spin_lock_bh(&vsi->mac_filter_list_lock);
2431	is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
2432	spin_unlock_bh(&vsi->mac_filter_list_lock);
2433
2434	if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
2435		dev_err(&pf->pdev->dev,
2436			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2437			vf_id);
2438		/* Administrator Error - knock the VF offline until he does
2439		 * the right thing by reconfiguring his network correctly
2440		 * and then reloading the VF driver.
2441		 */
2442		i40e_vc_disable_vf(pf, vf);
2443		/* During reset the VF got a new VSI, so refresh the pointer. */
2444		vsi = pf->vsi[vf->lan_vsi_idx];
2445	}
2446
2447	/* Check for condition where there was already a port VLAN ID
2448	 * filter set and now it is being deleted by setting it to zero.
2449	 * Additionally check for the condition where there was a port
2450	 * VLAN but now there is a new and different port VLAN being set.
2451	 * Before deleting all the old VLAN filters we must add new ones
2452	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2453	 * MAC addresses deleted.
2454	 */
2455	if ((!(vlan_id || qos) ||
2456	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
2457	    vsi->info.pvid)
2458		ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
2459
2460	if (vsi->info.pvid) {
2461		/* kill old VLAN */
2462		ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2463					       VLAN_VID_MASK));
2464		if (ret) {
2465			dev_info(&vsi->back->pdev->dev,
2466				 "remove VLAN failed, ret=%d, aq_err=%d\n",
2467				 ret, pf->hw.aq.asq_last_status);
 
 
2468		}
2469	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2470	if (vlan_id || qos)
2471		ret = i40e_vsi_add_pvid(vsi, vlanprio);
2472	else
2473		i40e_vsi_remove_pvid(vsi);
 
2474
2475	if (vlan_id) {
2476		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2477			 vlan_id, qos, vf_id);
2478
2479		/* add new VLAN filter */
2480		ret = i40e_vsi_add_vlan(vsi, vlan_id);
2481		if (ret) {
2482			dev_info(&vsi->back->pdev->dev,
2483				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2484				 vsi->back->hw.aq.asq_last_status);
 
2485			goto error_pvid;
2486		}
2487		/* Kill non-vlan MAC filters - ignore error return since
2488		 * there might not be any non-vlan MAC filters.
2489		 */
2490		i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
2491	}
2492
 
 
 
 
 
 
 
 
 
 
 
2493	if (ret) {
2494		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2495		goto error_pvid;
2496	}
 
2497	/* The Port VLAN needs to be saved across resets the same as the
2498	 * default LAN MAC address.
2499	 */
2500	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
 
 
 
 
 
 
 
2501	ret = 0;
2502
2503error_pvid:
 
2504	return ret;
2505}
2506
2507#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
2508#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
2509/**
2510 * i40e_ndo_set_vf_bw
2511 * @netdev: network interface device structure
2512 * @vf_id: VF identifier
2513 * @tx_rate: Tx rate
 
2514 *
2515 * configure VF Tx rate
2516 **/
2517int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2518		       int max_tx_rate)
2519{
2520	struct i40e_netdev_priv *np = netdev_priv(netdev);
2521	struct i40e_pf *pf = np->vsi->back;
2522	struct i40e_vsi *vsi;
2523	struct i40e_vf *vf;
2524	int speed = 0;
2525	int ret = 0;
2526
 
 
 
 
 
2527	/* validate the request */
2528	if (vf_id >= pf->num_alloc_vfs) {
2529		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
2530		ret = -EINVAL;
2531		goto error;
2532	}
2533
2534	if (min_tx_rate) {
2535		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
2536			min_tx_rate, vf_id);
2537		return -EINVAL;
 
2538	}
2539
2540	vf = &(pf->vf[vf_id]);
2541	vsi = pf->vsi[vf->lan_vsi_idx];
2542	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2543		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2544			vf_id);
2545		ret = -EAGAIN;
2546		goto error;
2547	}
2548
2549	switch (pf->hw.phy.link_info.link_speed) {
2550	case I40E_LINK_SPEED_40GB:
2551		speed = 40000;
2552		break;
2553	case I40E_LINK_SPEED_20GB:
2554		speed = 20000;
2555		break;
2556	case I40E_LINK_SPEED_10GB:
2557		speed = 10000;
2558		break;
2559	case I40E_LINK_SPEED_1GB:
2560		speed = 1000;
2561		break;
2562	default:
2563		break;
2564	}
2565
2566	if (max_tx_rate > speed) {
2567		dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
2568			max_tx_rate, vf->vf_id);
2569		ret = -EINVAL;
2570		goto error;
2571	}
2572
2573	if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
2574		dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2575		max_tx_rate = 50;
2576	}
2577
2578	/* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2579	ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
2580					  max_tx_rate / I40E_BW_CREDIT_DIVISOR,
2581					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
2582	if (ret) {
2583		dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
2584			ret);
2585		ret = -EIO;
2586		goto error;
2587	}
2588	vf->tx_rate = max_tx_rate;
2589error:
 
2590	return ret;
2591}
2592
2593/**
2594 * i40e_ndo_get_vf_config
2595 * @netdev: network interface device structure
2596 * @vf_id: VF identifier
2597 * @ivi: VF configuration structure
2598 *
2599 * return VF configuration
2600 **/
2601int i40e_ndo_get_vf_config(struct net_device *netdev,
2602			   int vf_id, struct ifla_vf_info *ivi)
2603{
2604	struct i40e_netdev_priv *np = netdev_priv(netdev);
2605	struct i40e_vsi *vsi = np->vsi;
2606	struct i40e_pf *pf = vsi->back;
2607	struct i40e_vf *vf;
2608	int ret = 0;
2609
 
 
 
 
 
2610	/* validate the request */
2611	if (vf_id >= pf->num_alloc_vfs) {
2612		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2613		ret = -EINVAL;
2614		goto error_param;
2615	}
2616
2617	vf = &(pf->vf[vf_id]);
2618	/* first vsi is always the LAN vsi */
2619	vsi = pf->vsi[vf->lan_vsi_idx];
2620	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2621		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2622			vf_id);
2623		ret = -EAGAIN;
2624		goto error_param;
2625	}
2626
2627	ivi->vf = vf_id;
2628
2629	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
2630
2631	ivi->max_tx_rate = vf->tx_rate;
2632	ivi->min_tx_rate = 0;
2633	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2634	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2635		   I40E_VLAN_PRIORITY_SHIFT;
2636	if (vf->link_forced == false)
2637		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2638	else if (vf->link_up == true)
2639		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2640	else
2641		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2642	ivi->spoofchk = vf->spoofchk;
 
2643	ret = 0;
2644
2645error_param:
 
2646	return ret;
2647}
2648
2649/**
2650 * i40e_ndo_set_vf_link_state
2651 * @netdev: network interface device structure
2652 * @vf_id: VF identifier
2653 * @link: required link state
2654 *
2655 * Set the link state of a specified VF, regardless of physical link state
2656 **/
2657int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2658{
2659	struct i40e_netdev_priv *np = netdev_priv(netdev);
2660	struct i40e_pf *pf = np->vsi->back;
2661	struct i40e_virtchnl_pf_event pfe;
 
2662	struct i40e_hw *hw = &pf->hw;
2663	struct i40e_vf *vf;
2664	int abs_vf_id;
2665	int ret = 0;
2666
 
 
 
 
 
2667	/* validate the request */
2668	if (vf_id >= pf->num_alloc_vfs) {
2669		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2670		ret = -EINVAL;
2671		goto error_out;
2672	}
2673
2674	vf = &pf->vf[vf_id];
2675	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
2676
2677	pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2678	pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2679
2680	switch (link) {
2681	case IFLA_VF_LINK_STATE_AUTO:
2682		vf->link_forced = false;
2683		pfe.event_data.link_event.link_status =
2684			pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2685		pfe.event_data.link_event.link_speed =
2686			pf->hw.phy.link_info.link_speed;
2687		break;
2688	case IFLA_VF_LINK_STATE_ENABLE:
2689		vf->link_forced = true;
2690		vf->link_up = true;
2691		pfe.event_data.link_event.link_status = true;
2692		pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
2693		break;
2694	case IFLA_VF_LINK_STATE_DISABLE:
2695		vf->link_forced = true;
2696		vf->link_up = false;
2697		pfe.event_data.link_event.link_status = false;
2698		pfe.event_data.link_event.link_speed = 0;
2699		break;
2700	default:
2701		ret = -EINVAL;
2702		goto error_out;
2703	}
2704	/* Notify the VF of its new link state */
2705	i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
2706			       0, (u8 *)&pfe, sizeof(pfe), NULL);
2707
2708error_out:
 
2709	return ret;
2710}
2711
2712/**
2713 * i40e_ndo_set_vf_spoofchk
2714 * @netdev: network interface device structure
2715 * @vf_id: VF identifier
2716 * @enable: flag to enable or disable feature
2717 *
2718 * Enable or disable VF spoof checking
2719 **/
2720int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
2721{
2722	struct i40e_netdev_priv *np = netdev_priv(netdev);
2723	struct i40e_vsi *vsi = np->vsi;
2724	struct i40e_pf *pf = vsi->back;
2725	struct i40e_vsi_context ctxt;
2726	struct i40e_hw *hw = &pf->hw;
2727	struct i40e_vf *vf;
2728	int ret = 0;
2729
 
 
 
 
 
2730	/* validate the request */
2731	if (vf_id >= pf->num_alloc_vfs) {
2732		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2733		ret = -EINVAL;
2734		goto out;
2735	}
2736
2737	vf = &(pf->vf[vf_id]);
2738	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2739		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2740			vf_id);
2741		ret = -EAGAIN;
2742		goto out;
2743	}
2744
2745	if (enable == vf->spoofchk)
2746		goto out;
2747
2748	vf->spoofchk = enable;
2749	memset(&ctxt, 0, sizeof(ctxt));
2750	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
2751	ctxt.pf_num = pf->hw.pf_id;
2752	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2753	if (enable)
2754		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
2755					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
2756	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2757	if (ret) {
2758		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2759			ret);
2760		ret = -EIO;
2761	}
2762out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2763	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2764}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3
   4#include "i40e.h"
   5
   6/*********************notification routines***********************/
   7
   8/**
   9 * i40e_vc_vf_broadcast
  10 * @pf: pointer to the PF structure
  11 * @v_opcode: operation code
  12 * @v_retval: return value
  13 * @msg: pointer to the msg buffer
  14 * @msglen: msg length
  15 *
  16 * send a message to all VFs on a given PF
  17 **/
  18static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  19				 enum virtchnl_ops v_opcode,
  20				 i40e_status v_retval, u8 *msg,
  21				 u16 msglen)
  22{
  23	struct i40e_hw *hw = &pf->hw;
  24	struct i40e_vf *vf = pf->vf;
  25	int i;
  26
  27	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  28		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  29		/* Not all vfs are enabled so skip the ones that are not */
  30		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
  31		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
  32			continue;
  33
  34		/* Ignore return value on purpose - a given VF may fail, but
  35		 * we need to keep going and send to all of them
  36		 */
  37		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  38				       msg, msglen, NULL);
  39	}
  40}
  41
  42/**
  43 * i40e_vc_link_speed2mbps
  44 * converts i40e_aq_link_speed to integer value of Mbps
  45 * @link_speed: the speed to convert
  46 *
  47 * return the speed as direct value of Mbps.
  48 **/
  49static u32
  50i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
  51{
  52	switch (link_speed) {
  53	case I40E_LINK_SPEED_100MB:
  54		return SPEED_100;
  55	case I40E_LINK_SPEED_1GB:
  56		return SPEED_1000;
  57	case I40E_LINK_SPEED_2_5GB:
  58		return SPEED_2500;
  59	case I40E_LINK_SPEED_5GB:
  60		return SPEED_5000;
  61	case I40E_LINK_SPEED_10GB:
  62		return SPEED_10000;
  63	case I40E_LINK_SPEED_20GB:
  64		return SPEED_20000;
  65	case I40E_LINK_SPEED_25GB:
  66		return SPEED_25000;
  67	case I40E_LINK_SPEED_40GB:
  68		return SPEED_40000;
  69	case I40E_LINK_SPEED_UNKNOWN:
  70		return SPEED_UNKNOWN;
  71	}
  72	return SPEED_UNKNOWN;
  73}
  74
  75/**
  76 * i40e_set_vf_link_state
  77 * @vf: pointer to the VF structure
  78 * @pfe: pointer to PF event structure
  79 * @ls: pointer to link status structure
  80 *
  81 * set a link state on a single vf
  82 **/
  83static void i40e_set_vf_link_state(struct i40e_vf *vf,
  84				   struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
  85{
  86	u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
  87
  88	if (vf->link_forced)
  89		link_status = vf->link_up;
  90
  91	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
  92		pfe->event_data.link_event_adv.link_speed = link_status ?
  93			i40e_vc_link_speed2mbps(ls->link_speed) : 0;
  94		pfe->event_data.link_event_adv.link_status = link_status;
  95	} else {
  96		pfe->event_data.link_event.link_speed = link_status ?
  97			i40e_virtchnl_link_speed(ls->link_speed) : 0;
  98		pfe->event_data.link_event.link_status = link_status;
  99	}
 100}
 101
 102/**
 103 * i40e_vc_notify_vf_link_state
 104 * @vf: pointer to the VF structure
 105 *
 106 * send a link status message to a single VF
 107 **/
 108static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
 109{
 110	struct virtchnl_pf_event pfe;
 111	struct i40e_pf *pf = vf->pf;
 112	struct i40e_hw *hw = &pf->hw;
 113	struct i40e_link_status *ls = &pf->hw.phy.link_info;
 114	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
 115
 116	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 117	pfe.severity = PF_EVENT_SEVERITY_INFO;
 118
 119	i40e_set_vf_link_state(vf, &pfe, ls);
 120
 121	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 
 
 
 
 
 
 122			       0, (u8 *)&pfe, sizeof(pfe), NULL);
 123}
 124
 125/**
 126 * i40e_vc_notify_link_state
 127 * @pf: pointer to the PF structure
 128 *
 129 * send a link status message to all VFs on a given PF
 130 **/
 131void i40e_vc_notify_link_state(struct i40e_pf *pf)
 132{
 133	int i;
 134
 135	for (i = 0; i < pf->num_alloc_vfs; i++)
 136		i40e_vc_notify_vf_link_state(&pf->vf[i]);
 137}
 138
 139/**
 140 * i40e_vc_notify_reset
 141 * @pf: pointer to the PF structure
 142 *
 143 * indicate a pending reset to all VFs on a given PF
 144 **/
 145void i40e_vc_notify_reset(struct i40e_pf *pf)
 146{
 147	struct virtchnl_pf_event pfe;
 148
 149	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 150	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 151	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
 152			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
 153}
 154
 155/**
 156 * i40e_vc_notify_vf_reset
 157 * @vf: pointer to the VF structure
 158 *
 159 * indicate a pending reset to the given VF
 160 **/
 161void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 162{
 163	struct virtchnl_pf_event pfe;
 164	int abs_vf_id;
 165
 166	/* validate the request */
 167	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 168		return;
 169
 170	/* verify if the VF is in either init or active before proceeding */
 171	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
 172	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
 173		return;
 174
 175	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 176
 177	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 178	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 179	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 180			       0, (u8 *)&pfe,
 181			       sizeof(struct virtchnl_pf_event), NULL);
 182}
 183/***********************misc routines*****************************/
 184
 185/**
 186 * i40e_vc_reset_vf
 
 187 * @vf: pointer to the VF info
 188 * @notify_vf: notify vf about reset or not
 189 * Reset VF handler.
 190 **/
 191static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
 192{
 193	struct i40e_pf *pf = vf->pf;
 194	int i;
 195
 196	if (notify_vf)
 197		i40e_vc_notify_vf_reset(vf);
 198
 199	/* We want to ensure that an actual reset occurs initiated after this
 200	 * function was called. However, we do not want to wait forever, so
 201	 * we'll give a reasonable time and print a message if we failed to
 202	 * ensure a reset.
 203	 */
 204	for (i = 0; i < 20; i++) {
 205		/* If PF is in VFs releasing state reset VF is impossible,
 206		 * so leave it.
 207		 */
 208		if (test_bit(__I40E_VFS_RELEASING, pf->state))
 209			return;
 210		if (i40e_reset_vf(vf, false))
 211			return;
 212		usleep_range(10000, 20000);
 213	}
 214
 215	if (notify_vf)
 216		dev_warn(&vf->pf->pdev->dev,
 217			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
 218			 vf->vf_id);
 219	else
 220		dev_dbg(&vf->pf->pdev->dev,
 221			"Failed to initiate reset for VF %d after 200 milliseconds\n",
 222			vf->vf_id);
 223}
 224
 225/**
 226 * i40e_vc_isvalid_vsi_id
 227 * @vf: pointer to the VF info
 228 * @vsi_id: VF relative VSI id
 229 *
 230 * check for the valid VSI id
 231 **/
 232static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 233{
 234	struct i40e_pf *pf = vf->pf;
 235	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 236
 237	return (vsi && (vsi->vf_id == vf->vf_id));
 238}
 239
 240/**
 241 * i40e_vc_isvalid_queue_id
 242 * @vf: pointer to the VF info
 243 * @vsi_id: vsi id
 244 * @qid: vsi relative queue id
 245 *
 246 * check for the valid queue id
 247 **/
 248static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 249					    u16 qid)
 250{
 251	struct i40e_pf *pf = vf->pf;
 252	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 253
 254	return (vsi && (qid < vsi->alloc_queue_pairs));
 255}
 256
 257/**
 258 * i40e_vc_isvalid_vector_id
 259 * @vf: pointer to the VF info
 260 * @vector_id: VF relative vector id
 261 *
 262 * check for the valid vector id
 263 **/
 264static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
 265{
 266	struct i40e_pf *pf = vf->pf;
 267
 268	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 269}
 270
 271/***********************vf resource mgmt routines*****************/
 272
 273/**
 274 * i40e_vc_get_pf_queue_id
 275 * @vf: pointer to the VF info
 276 * @vsi_id: id of VSI as provided by the FW
 277 * @vsi_queue_id: vsi relative queue id
 278 *
 279 * return PF relative queue id
 280 **/
 281static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 282				   u8 vsi_queue_id)
 283{
 284	struct i40e_pf *pf = vf->pf;
 285	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 286	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 287
 288	if (!vsi)
 289		return pf_queue_id;
 290
 291	if (le16_to_cpu(vsi->info.mapping_flags) &
 292	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 293		pf_queue_id =
 294			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 295	else
 296		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 297			      vsi_queue_id;
 298
 299	return pf_queue_id;
 300}
 301
 302/**
 303 * i40e_get_real_pf_qid
 304 * @vf: pointer to the VF info
 305 * @vsi_id: vsi id
 306 * @queue_id: queue number
 307 *
 308 * wrapper function to get pf_queue_id handling ADq code as well
 309 **/
 310static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
 311{
 312	int i;
 313
 314	if (vf->adq_enabled) {
 315		/* Although VF considers all the queues(can be 1 to 16) as its
 316		 * own but they may actually belong to different VSIs(up to 4).
 317		 * We need to find which queues belongs to which VSI.
 318		 */
 319		for (i = 0; i < vf->num_tc; i++) {
 320			if (queue_id < vf->ch[i].num_qps) {
 321				vsi_id = vf->ch[i].vsi_id;
 322				break;
 323			}
 324			/* find right queue id which is relative to a
 325			 * given VSI.
 326			 */
 327			queue_id -= vf->ch[i].num_qps;
 328			}
 329		}
 330
 331	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
 332}
 333
 334/**
 335 * i40e_config_irq_link_list
 336 * @vf: pointer to the VF info
 337 * @vsi_id: id of VSI as given by the FW
 338 * @vecmap: irq map info
 339 *
 340 * configure irq link list from the map
 341 **/
 342static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 343				      struct virtchnl_vector_map *vecmap)
 344{
 345	unsigned long linklistmap = 0, tempmap;
 346	struct i40e_pf *pf = vf->pf;
 347	struct i40e_hw *hw = &pf->hw;
 348	u16 vsi_queue_id, pf_queue_id;
 349	enum i40e_queue_type qtype;
 350	u16 next_q, vector_id, size;
 351	u32 reg, reg_idx;
 352	u16 itr_idx = 0;
 353
 354	vector_id = vecmap->vector_id;
 355	/* setup the head */
 356	if (0 == vector_id)
 357		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 358	else
 359		reg_idx = I40E_VPINT_LNKLSTN(
 360		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 361		     (vector_id - 1));
 362
 363	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 364		/* Special case - No queues mapped on this vector */
 365		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 366		goto irq_list_done;
 367	}
 368	tempmap = vecmap->rxq_map;
 369	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 370		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 371				    vsi_queue_id));
 372	}
 373
 374	tempmap = vecmap->txq_map;
 375	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 376		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 377				     vsi_queue_id + 1));
 378	}
 379
 380	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
 381	next_q = find_first_bit(&linklistmap, size);
 382	if (unlikely(next_q == size))
 383		goto irq_list_done;
 384
 385	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 386	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 387	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
 388	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 389
 390	wr32(hw, reg_idx, reg);
 391
 392	while (next_q < size) {
 393		switch (qtype) {
 394		case I40E_QUEUE_TYPE_RX:
 395			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 396			itr_idx = vecmap->rxitr_idx;
 397			break;
 398		case I40E_QUEUE_TYPE_TX:
 399			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 400			itr_idx = vecmap->txitr_idx;
 401			break;
 402		default:
 403			break;
 404		}
 405
 406		next_q = find_next_bit(&linklistmap, size, next_q + 1);
 407		if (next_q < size) {
 
 
 
 
 408			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 409			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 410			pf_queue_id = i40e_get_real_pf_qid(vf,
 411							   vsi_id,
 412							   vsi_queue_id);
 413		} else {
 414			pf_queue_id = I40E_QUEUE_END_OF_LIST;
 415			qtype = 0;
 416		}
 417
 418		/* format for the RQCTL & TQCTL regs is same */
 419		reg = (vector_id) |
 420		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 421		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 422		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 423		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 424		wr32(hw, reg_idx, reg);
 425	}
 426
 427	/* if the vf is running in polling mode and using interrupt zero,
 428	 * need to disable auto-mask on enabling zero interrupt for VFs.
 429	 */
 430	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
 431	    (vector_id == 0)) {
 432		reg = rd32(hw, I40E_GLINT_CTL);
 433		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
 434			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
 435			wr32(hw, I40E_GLINT_CTL, reg);
 436		}
 437	}
 438
 439irq_list_done:
 440	i40e_flush(hw);
 441}
 442
 443/**
 444 * i40e_release_iwarp_qvlist
 445 * @vf: pointer to the VF.
 446 *
 447 **/
 448static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
 449{
 450	struct i40e_pf *pf = vf->pf;
 451	struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
 452	u32 msix_vf;
 453	u32 i;
 454
 455	if (!vf->qvlist_info)
 456		return;
 457
 458	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 459	for (i = 0; i < qvlist_info->num_vectors; i++) {
 460		struct virtchnl_iwarp_qv_info *qv_info;
 461		u32 next_q_index, next_q_type;
 462		struct i40e_hw *hw = &pf->hw;
 463		u32 v_idx, reg_idx, reg;
 464
 465		qv_info = &qvlist_info->qv_info[i];
 466		if (!qv_info)
 467			continue;
 468		v_idx = qv_info->v_idx;
 469		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 470			/* Figure out the queue after CEQ and make that the
 471			 * first queue.
 472			 */
 473			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 474			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
 475			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
 476					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
 477			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
 478					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
 479
 480			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 481			reg = (next_q_index &
 482			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 483			       (next_q_type <<
 484			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 485
 486			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 487		}
 488	}
 489	kfree(vf->qvlist_info);
 490	vf->qvlist_info = NULL;
 491}
 492
 493/**
 494 * i40e_config_iwarp_qvlist
 495 * @vf: pointer to the VF info
 496 * @qvlist_info: queue and vector list
 497 *
 498 * Return 0 on success or < 0 on error
 499 **/
 500static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
 501				    struct virtchnl_iwarp_qvlist_info *qvlist_info)
 502{
 503	struct i40e_pf *pf = vf->pf;
 504	struct i40e_hw *hw = &pf->hw;
 505	struct virtchnl_iwarp_qv_info *qv_info;
 506	u32 v_idx, i, reg_idx, reg;
 507	u32 next_q_idx, next_q_type;
 508	u32 msix_vf;
 509	int ret = 0;
 510
 511	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 512
 513	if (qvlist_info->num_vectors > msix_vf) {
 514		dev_warn(&pf->pdev->dev,
 515			 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
 516			 qvlist_info->num_vectors,
 517			 msix_vf);
 518		ret = -EINVAL;
 519		goto err_out;
 520	}
 521
 522	kfree(vf->qvlist_info);
 523	vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
 524					      qvlist_info->num_vectors - 1),
 525				  GFP_KERNEL);
 526	if (!vf->qvlist_info) {
 527		ret = -ENOMEM;
 528		goto err_out;
 529	}
 530	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 531
 532	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 533	for (i = 0; i < qvlist_info->num_vectors; i++) {
 534		qv_info = &qvlist_info->qv_info[i];
 535		if (!qv_info)
 536			continue;
 
 537
 538		/* Validate vector id belongs to this vf */
 539		if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
 540			ret = -EINVAL;
 541			goto err_free;
 542		}
 543
 544		v_idx = qv_info->v_idx;
 545
 546		vf->qvlist_info->qv_info[i] = *qv_info;
 547
 548		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 549		/* We might be sharing the interrupt, so get the first queue
 550		 * index and type, push it down the list by adding the new
 551		 * queue on top. Also link it with the new queue in CEQCTL.
 552		 */
 553		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
 554		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
 555				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
 556		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
 557				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 558
 559		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 560			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 561			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
 562			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
 563			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
 564			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
 565			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
 566			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
 567
 568			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 569			reg = (qv_info->ceq_idx &
 570			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 571			       (I40E_QUEUE_TYPE_PE_CEQ <<
 572			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 573			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 574		}
 575
 576		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
 577			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
 578			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
 579			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
 580
 581			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
 582		}
 583	}
 584
 585	return 0;
 586err_free:
 587	kfree(vf->qvlist_info);
 588	vf->qvlist_info = NULL;
 589err_out:
 590	return ret;
 591}
 592
 593/**
 594 * i40e_config_vsi_tx_queue
 595 * @vf: pointer to the VF info
 596 * @vsi_id: id of VSI as provided by the FW
 597 * @vsi_queue_id: vsi relative queue index
 598 * @info: config. info
 599 *
 600 * configure tx queue
 601 **/
 602static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 603				    u16 vsi_queue_id,
 604				    struct virtchnl_txq_info *info)
 605{
 606	struct i40e_pf *pf = vf->pf;
 607	struct i40e_hw *hw = &pf->hw;
 608	struct i40e_hmc_obj_txq tx_ctx;
 609	struct i40e_vsi *vsi;
 610	u16 pf_queue_id;
 611	u32 qtx_ctl;
 612	int ret = 0;
 613
 614	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
 615		ret = -ENOENT;
 616		goto error_context;
 617	}
 618	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 619	vsi = i40e_find_vsi_from_id(pf, vsi_id);
 620	if (!vsi) {
 621		ret = -ENOENT;
 622		goto error_context;
 623	}
 624
 625	/* clear the context structure first */
 626	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 627
 628	/* only set the required fields */
 629	tx_ctx.base = info->dma_ring_addr / 128;
 630	tx_ctx.qlen = info->ring_len;
 631	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 632	tx_ctx.rdylist_act = 0;
 633	tx_ctx.head_wb_ena = info->headwb_enabled;
 634	tx_ctx.head_wb_addr = info->dma_headwb_addr;
 635
 636	/* clear the context in the HMC */
 637	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 638	if (ret) {
 639		dev_err(&pf->pdev->dev,
 640			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
 641			pf_queue_id, ret);
 642		ret = -ENOENT;
 643		goto error_context;
 644	}
 645
 646	/* set the context in the HMC */
 647	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 648	if (ret) {
 649		dev_err(&pf->pdev->dev,
 650			"Failed to set VF LAN Tx queue context %d error: %d\n",
 651			pf_queue_id, ret);
 652		ret = -ENOENT;
 653		goto error_context;
 654	}
 655
 656	/* associate this queue with the PCI VF function */
 657	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 658	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 659		    & I40E_QTX_CTL_PF_INDX_MASK);
 660	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 661		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 662		    & I40E_QTX_CTL_VFVM_INDX_MASK);
 663	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 664	i40e_flush(hw);
 665
 666error_context:
 667	return ret;
 668}
 669
 670/**
 671 * i40e_config_vsi_rx_queue
 672 * @vf: pointer to the VF info
 673 * @vsi_id: id of VSI  as provided by the FW
 674 * @vsi_queue_id: vsi relative queue index
 675 * @info: config. info
 676 *
 677 * configure rx queue
 678 **/
 679static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 680				    u16 vsi_queue_id,
 681				    struct virtchnl_rxq_info *info)
 682{
 683	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 684	struct i40e_pf *pf = vf->pf;
 685	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
 686	struct i40e_hw *hw = &pf->hw;
 687	struct i40e_hmc_obj_rxq rx_ctx;
 
 688	int ret = 0;
 689
 
 
 690	/* clear the context structure first */
 691	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 692
 693	/* only set the required fields */
 694	rx_ctx.base = info->dma_ring_addr / 128;
 695	rx_ctx.qlen = info->ring_len;
 696
 697	if (info->splithdr_enabled) {
 698		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 699				  I40E_RX_SPLIT_IP      |
 700				  I40E_RX_SPLIT_TCP_UDP |
 701				  I40E_RX_SPLIT_SCTP;
 702		/* header length validation */
 703		if (info->hdr_size > ((2 * 1024) - 64)) {
 704			ret = -EINVAL;
 705			goto error_param;
 706		}
 707		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 708
 709		/* set split mode 10b */
 710		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 711	}
 712
 713	/* databuffer length validation */
 714	if (info->databuffer_size > ((16 * 1024) - 128)) {
 715		ret = -EINVAL;
 716		goto error_param;
 717	}
 718	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 719
 720	/* max pkt. length validation */
 721	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 722		ret = -EINVAL;
 723		goto error_param;
 724	}
 725	rx_ctx.rxmax = info->max_pkt_size;
 726
 727	/* if port VLAN is configured increase the max packet size */
 728	if (vsi->info.pvid)
 729		rx_ctx.rxmax += VLAN_HLEN;
 730
 731	/* enable 32bytes desc always */
 732	rx_ctx.dsize = 1;
 733
 734	/* default values */
 735	rx_ctx.lrxqthresh = 1;
 736	rx_ctx.crcstrip = 1;
 737	rx_ctx.prefena = 1;
 738	rx_ctx.l2tsel = 1;
 739
 740	/* clear the context in the HMC */
 741	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 742	if (ret) {
 743		dev_err(&pf->pdev->dev,
 744			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
 745			pf_queue_id, ret);
 746		ret = -ENOENT;
 747		goto error_param;
 748	}
 749
 750	/* set the context in the HMC */
 751	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 752	if (ret) {
 753		dev_err(&pf->pdev->dev,
 754			"Failed to set VF LAN Rx queue context %d error: %d\n",
 755			pf_queue_id, ret);
 756		ret = -ENOENT;
 757		goto error_param;
 758	}
 759
 760error_param:
 761	return ret;
 762}
 763
 764/**
 765 * i40e_alloc_vsi_res
 766 * @vf: pointer to the VF info
 767 * @idx: VSI index, applies only for ADq mode, zero otherwise
 768 *
 769 * alloc VF vsi context & resources
 770 **/
 771static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
 772{
 773	struct i40e_mac_filter *f = NULL;
 774	struct i40e_pf *pf = vf->pf;
 775	struct i40e_vsi *vsi;
 776	u64 max_tx_rate = 0;
 777	int ret = 0;
 778
 779	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
 780			     vf->vf_id);
 781
 782	if (!vsi) {
 783		dev_err(&pf->pdev->dev,
 784			"add vsi failed for VF %d, aq_err %d\n",
 785			vf->vf_id, pf->hw.aq.asq_last_status);
 786		ret = -ENOENT;
 787		goto error_alloc_vsi_res;
 788	}
 789
 790	if (!idx) {
 791		u64 hena = i40e_pf_get_default_rss_hena(pf);
 792		u8 broadcast[ETH_ALEN];
 793
 794		vf->lan_vsi_idx = vsi->idx;
 795		vf->lan_vsi_id = vsi->id;
 796		/* If the port VLAN has been configured and then the
 797		 * VF driver was removed then the VSI port VLAN
 798		 * configuration was destroyed.  Check if there is
 799		 * a port VLAN and restore the VSI configuration if
 800		 * needed.
 801		 */
 802		if (vf->port_vlan_id)
 803			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 804
 805		spin_lock_bh(&vsi->mac_filter_hash_lock);
 806		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
 807			f = i40e_add_mac_filter(vsi,
 808						vf->default_lan_addr.addr);
 
 809			if (!f)
 810				dev_info(&pf->pdev->dev,
 811					 "Could not add MAC filter %pM for VF %d\n",
 812					vf->default_lan_addr.addr, vf->vf_id);
 813		}
 814		eth_broadcast_addr(broadcast);
 815		f = i40e_add_mac_filter(vsi, broadcast);
 
 816		if (!f)
 817			dev_info(&pf->pdev->dev,
 818				 "Could not allocate VF broadcast filter\n");
 819		spin_unlock_bh(&vsi->mac_filter_hash_lock);
 820		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
 821		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
 822		/* program mac filter only for VF VSI */
 823		ret = i40e_sync_vsi_filters(vsi);
 824		if (ret)
 825			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 826	}
 827
 828	/* storing VSI index and id for ADq and don't apply the mac filter */
 829	if (vf->adq_enabled) {
 830		vf->ch[idx].vsi_idx = vsi->idx;
 831		vf->ch[idx].vsi_id = vsi->id;
 832	}
 833
 834	/* Set VF bandwidth if specified */
 835	if (vf->tx_rate) {
 836		max_tx_rate = vf->tx_rate;
 837	} else if (vf->ch[idx].max_tx_rate) {
 838		max_tx_rate = vf->ch[idx].max_tx_rate;
 839	}
 840
 841	if (max_tx_rate) {
 842		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
 843		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 844						  max_tx_rate, 0, NULL);
 845		if (ret)
 846			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 847				vf->vf_id, ret);
 848	}
 849
 850error_alloc_vsi_res:
 851	return ret;
 852}
 853
 854/**
 855 * i40e_map_pf_queues_to_vsi
 856 * @vf: pointer to the VF info
 857 *
 858 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 859 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
 860 **/
 861static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
 862{
 863	struct i40e_pf *pf = vf->pf;
 864	struct i40e_hw *hw = &pf->hw;
 865	u32 reg, num_tc = 1; /* VF has at least one traffic class */
 866	u16 vsi_id, qps;
 867	int i, j;
 868
 869	if (vf->adq_enabled)
 870		num_tc = vf->num_tc;
 871
 872	for (i = 0; i < num_tc; i++) {
 873		if (vf->adq_enabled) {
 874			qps = vf->ch[i].num_qps;
 875			vsi_id =  vf->ch[i].vsi_id;
 876		} else {
 877			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 878			vsi_id = vf->lan_vsi_id;
 879		}
 880
 881		for (j = 0; j < 7; j++) {
 882			if (j * 2 >= qps) {
 883				/* end of list */
 884				reg = 0x07FF07FF;
 885			} else {
 886				u16 qid = i40e_vc_get_pf_queue_id(vf,
 887								  vsi_id,
 888								  j * 2);
 889				reg = qid;
 890				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
 891							      (j * 2) + 1);
 892				reg |= qid << 16;
 893			}
 894			i40e_write_rx_ctl(hw,
 895					  I40E_VSILAN_QTABLE(j, vsi_id),
 896					  reg);
 897		}
 898	}
 899}
 900
 901/**
 902 * i40e_map_pf_to_vf_queues
 903 * @vf: pointer to the VF info
 904 *
 905 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 906 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
 907 **/
 908static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
 909{
 910	struct i40e_pf *pf = vf->pf;
 911	struct i40e_hw *hw = &pf->hw;
 912	u32 reg, total_qps = 0;
 913	u32 qps, num_tc = 1; /* VF has at least one traffic class */
 914	u16 vsi_id, qid;
 915	int i, j;
 916
 917	if (vf->adq_enabled)
 918		num_tc = vf->num_tc;
 919
 920	for (i = 0; i < num_tc; i++) {
 921		if (vf->adq_enabled) {
 922			qps = vf->ch[i].num_qps;
 923			vsi_id =  vf->ch[i].vsi_id;
 924		} else {
 925			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 926			vsi_id = vf->lan_vsi_id;
 927		}
 928
 929		for (j = 0; j < qps; j++) {
 930			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
 931
 932			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 933			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
 934			     reg);
 935			total_qps++;
 936		}
 937	}
 938}
 939
 940/**
 941 * i40e_enable_vf_mappings
 942 * @vf: pointer to the VF info
 943 *
 944 * enable VF mappings
 945 **/
 946static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 947{
 948	struct i40e_pf *pf = vf->pf;
 949	struct i40e_hw *hw = &pf->hw;
 950	u32 reg;
 
 951
 952	/* Tell the hardware we're using noncontiguous mapping. HW requires
 953	 * that VF queues be mapped using this method, even when they are
 954	 * contiguous in real life
 955	 */
 956	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 957			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 958
 959	/* enable VF vplan_qtable mappings */
 960	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 961	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 962
 963	i40e_map_pf_to_vf_queues(vf);
 964	i40e_map_pf_queues_to_vsi(vf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965
 966	i40e_flush(hw);
 967}
 968
 969/**
 970 * i40e_disable_vf_mappings
 971 * @vf: pointer to the VF info
 972 *
 973 * disable VF mappings
 974 **/
 975static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 976{
 977	struct i40e_pf *pf = vf->pf;
 978	struct i40e_hw *hw = &pf->hw;
 979	int i;
 980
 981	/* disable qp mappings */
 982	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 983	for (i = 0; i < I40E_MAX_VSI_QP; i++)
 984		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 985		     I40E_QUEUE_END_OF_LIST);
 986	i40e_flush(hw);
 987}
 988
 989/**
 990 * i40e_free_vf_res
 991 * @vf: pointer to the VF info
 992 *
 993 * free VF resources
 994 **/
 995static void i40e_free_vf_res(struct i40e_vf *vf)
 996{
 997	struct i40e_pf *pf = vf->pf;
 998	struct i40e_hw *hw = &pf->hw;
 999	u32 reg_idx, reg;
1000	int i, j, msix_vf;
1001
1002	/* Start by disabling VF's configuration API to prevent the OS from
1003	 * accessing the VF's VSI after it's freed / invalidated.
1004	 */
1005	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1006
1007	/* It's possible the VF had requeuested more queues than the default so
1008	 * do the accounting here when we're about to free them.
1009	 */
1010	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1011		pf->queues_left += vf->num_queue_pairs -
1012				   I40E_DEFAULT_QUEUES_PER_VF;
1013	}
1014
1015	/* free vsi & disconnect it from the parent uplink */
1016	if (vf->lan_vsi_idx) {
1017		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1018		vf->lan_vsi_idx = 0;
1019		vf->lan_vsi_id = 0;
1020	}
1021
1022	/* do the accounting and remove additional ADq VSI's */
1023	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1024		for (j = 0; j < vf->num_tc; j++) {
1025			/* At this point VSI0 is already released so don't
1026			 * release it again and only clear their values in
1027			 * structure variables
1028			 */
1029			if (j)
1030				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1031			vf->ch[j].vsi_idx = 0;
1032			vf->ch[j].vsi_id = 0;
1033		}
1034	}
1035	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1036
1037	/* disable interrupts so the VF starts in a known state */
1038	for (i = 0; i < msix_vf; i++) {
1039		/* format is same for both registers */
1040		if (0 == i)
1041			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1042		else
1043			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1044						      (vf->vf_id))
1045						     + (i - 1));
1046		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1047		i40e_flush(hw);
1048	}
1049
1050	/* clear the irq settings */
1051	for (i = 0; i < msix_vf; i++) {
1052		/* format is same for both registers */
1053		if (0 == i)
1054			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1055		else
1056			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1057						      (vf->vf_id))
1058						     + (i - 1));
1059		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1060		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1061		wr32(hw, reg_idx, reg);
1062		i40e_flush(hw);
1063	}
1064	/* reset some of the state variables keeping track of the resources */
 
 
1065	vf->num_queue_pairs = 0;
1066	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1067	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1068}
1069
1070/**
1071 * i40e_alloc_vf_res
1072 * @vf: pointer to the VF info
1073 *
1074 * allocate VF resources
1075 **/
1076static int i40e_alloc_vf_res(struct i40e_vf *vf)
1077{
1078	struct i40e_pf *pf = vf->pf;
1079	int total_queue_pairs = 0;
1080	int ret, idx;
1081
1082	if (vf->num_req_queues &&
1083	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1084		pf->num_vf_qps = vf->num_req_queues;
1085	else
1086		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1087
1088	/* allocate hw vsi context & associated resources */
1089	ret = i40e_alloc_vsi_res(vf, 0);
1090	if (ret)
1091		goto error_alloc;
1092	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1093
1094	/* allocate additional VSIs based on tc information for ADq */
1095	if (vf->adq_enabled) {
1096		if (pf->queues_left >=
1097		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1098			/* TC 0 always belongs to VF VSI */
1099			for (idx = 1; idx < vf->num_tc; idx++) {
1100				ret = i40e_alloc_vsi_res(vf, idx);
1101				if (ret)
1102					goto error_alloc;
1103			}
1104			/* send correct number of queues */
1105			total_queue_pairs = I40E_MAX_VF_QUEUES;
1106		} else {
1107			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1108				 vf->vf_id);
1109			vf->adq_enabled = false;
1110		}
1111	}
1112
1113	/* We account for each VF to get a default number of queue pairs.  If
1114	 * the VF has now requested more, we need to account for that to make
1115	 * certain we never request more queues than we actually have left in
1116	 * HW.
1117	 */
1118	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1119		pf->queues_left -=
1120			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1121
1122	if (vf->trusted)
1123		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1124	else
1125		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1126
1127	/* store the total qps number for the runtime
1128	 * VF req validation
1129	 */
1130	vf->num_queue_pairs = total_queue_pairs;
1131
1132	/* VF is now completely initialized */
1133	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1134
1135error_alloc:
1136	if (ret)
1137		i40e_free_vf_res(vf);
1138
1139	return ret;
1140}
1141
1142#define VF_DEVICE_STATUS 0xAA
1143#define VF_TRANS_PENDING_MASK 0x20
1144/**
1145 * i40e_quiesce_vf_pci
1146 * @vf: pointer to the VF structure
1147 *
1148 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1149 * if the transactions never clear.
1150 **/
1151static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1152{
1153	struct i40e_pf *pf = vf->pf;
1154	struct i40e_hw *hw = &pf->hw;
1155	int vf_abs_id, i;
1156	u32 reg;
1157
1158	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1159
1160	wr32(hw, I40E_PF_PCI_CIAA,
1161	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1162	for (i = 0; i < 100; i++) {
1163		reg = rd32(hw, I40E_PF_PCI_CIAD);
1164		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1165			return 0;
1166		udelay(1);
1167	}
1168	return -EIO;
1169}
1170
1171/**
1172 * __i40e_getnum_vf_vsi_vlan_filters
1173 * @vsi: pointer to the vsi
 
1174 *
1175 * called to get the number of VLANs offloaded on this VF
1176 **/
1177static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1178{
1179	struct i40e_mac_filter *f;
1180	u16 num_vlans = 0, bkt;
1181
1182	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1183		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1184			num_vlans++;
1185	}
1186
1187	return num_vlans;
1188}
1189
1190/**
1191 * i40e_getnum_vf_vsi_vlan_filters
1192 * @vsi: pointer to the vsi
1193 *
1194 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1195 **/
1196static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1197{
1198	int num_vlans;
1199
1200	spin_lock_bh(&vsi->mac_filter_hash_lock);
1201	num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1202	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1203
1204	return num_vlans;
1205}
1206
1207/**
1208 * i40e_get_vlan_list_sync
1209 * @vsi: pointer to the VSI
1210 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1211 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1212 *             This array is allocated here, but has to be freed in caller.
1213 *
1214 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1215 **/
1216static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1217				    s16 **vlan_list)
1218{
1219	struct i40e_mac_filter *f;
1220	int i = 0;
1221	int bkt;
1222
1223	spin_lock_bh(&vsi->mac_filter_hash_lock);
1224	*num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1225	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1226	if (!(*vlan_list))
1227		goto err;
1228
1229	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1230		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1231			continue;
1232		(*vlan_list)[i++] = f->vlan;
1233	}
1234err:
1235	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1236}
1237
1238/**
1239 * i40e_set_vsi_promisc
1240 * @vf: pointer to the VF struct
1241 * @seid: VSI number
1242 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1243 *                for a given VLAN
1244 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1245 *                  for a given VLAN
1246 * @vl: List of VLANs - apply filter for given VLANs
1247 * @num_vlans: Number of elements in @vl
1248 **/
1249static i40e_status
1250i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1251		     bool unicast_enable, s16 *vl, u16 num_vlans)
1252{
1253	i40e_status aq_ret, aq_tmp = 0;
1254	struct i40e_pf *pf = vf->pf;
1255	struct i40e_hw *hw = &pf->hw;
 
1256	int i;
1257
1258	/* No VLAN to set promisc on, set on VSI */
1259	if (!num_vlans || !vl) {
1260		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1261							       multi_enable,
1262							       NULL);
1263		if (aq_ret) {
1264			int aq_err = pf->hw.aq.asq_last_status;
1265
1266			dev_err(&pf->pdev->dev,
1267				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1268				vf->vf_id,
1269				i40e_stat_str(&pf->hw, aq_ret),
1270				i40e_aq_str(&pf->hw, aq_err));
1271
1272			return aq_ret;
1273		}
1274
1275		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1276							     unicast_enable,
1277							     NULL, true);
1278
1279		if (aq_ret) {
1280			int aq_err = pf->hw.aq.asq_last_status;
1281
1282			dev_err(&pf->pdev->dev,
1283				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1284				vf->vf_id,
1285				i40e_stat_str(&pf->hw, aq_ret),
1286				i40e_aq_str(&pf->hw, aq_err));
1287		}
1288
1289		return aq_ret;
1290	}
1291
1292	for (i = 0; i < num_vlans; i++) {
1293		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1294							    multi_enable,
1295							    vl[i], NULL);
1296		if (aq_ret) {
1297			int aq_err = pf->hw.aq.asq_last_status;
1298
1299			dev_err(&pf->pdev->dev,
1300				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1301				vf->vf_id,
1302				i40e_stat_str(&pf->hw, aq_ret),
1303				i40e_aq_str(&pf->hw, aq_err));
1304
1305			if (!aq_tmp)
1306				aq_tmp = aq_ret;
1307		}
1308
1309		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1310							    unicast_enable,
1311							    vl[i], NULL);
1312		if (aq_ret) {
1313			int aq_err = pf->hw.aq.asq_last_status;
1314
1315			dev_err(&pf->pdev->dev,
1316				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1317				vf->vf_id,
1318				i40e_stat_str(&pf->hw, aq_ret),
1319				i40e_aq_str(&pf->hw, aq_err));
1320
1321			if (!aq_tmp)
1322				aq_tmp = aq_ret;
1323		}
1324	}
1325
1326	if (aq_tmp)
1327		aq_ret = aq_tmp;
1328
1329	return aq_ret;
1330}
1331
1332/**
1333 * i40e_config_vf_promiscuous_mode
1334 * @vf: pointer to the VF info
1335 * @vsi_id: VSI id
1336 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1337 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1338 *
1339 * Called from the VF to configure the promiscuous mode of
1340 * VF vsis and from the VF reset path to reset promiscuous mode.
1341 **/
1342static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1343						   u16 vsi_id,
1344						   bool allmulti,
1345						   bool alluni)
1346{
1347	i40e_status aq_ret = I40E_SUCCESS;
1348	struct i40e_pf *pf = vf->pf;
1349	struct i40e_vsi *vsi;
1350	u16 num_vlans;
1351	s16 *vl;
1352
1353	vsi = i40e_find_vsi_from_id(pf, vsi_id);
1354	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1355		return I40E_ERR_PARAM;
1356
1357	if (vf->port_vlan_id) {
1358		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1359					      alluni, &vf->port_vlan_id, 1);
1360		return aq_ret;
1361	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1362		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1363
1364		if (!vl)
1365			return I40E_ERR_NO_MEMORY;
1366
1367		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1368					      vl, num_vlans);
1369		kfree(vl);
1370		return aq_ret;
1371	}
1372
1373	/* no VLANs to set on, set on VSI */
1374	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1375				      NULL, 0);
1376	return aq_ret;
1377}
1378
1379/**
1380 * i40e_sync_vfr_reset
1381 * @hw: pointer to hw struct
1382 * @vf_id: VF identifier
1383 *
1384 * Before trigger hardware reset, we need to know if no other process has
1385 * reserved the hardware for any reset operations. This check is done by
1386 * examining the status of the RSTAT1 register used to signal the reset.
1387 **/
1388static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1389{
1390	u32 reg;
1391	int i;
1392
1393	for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1394		reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1395			   I40E_VFINT_ICR0_ADMINQ_MASK;
1396		if (reg)
1397			return 0;
1398
1399		usleep_range(100, 200);
1400	}
1401
1402	return -EAGAIN;
1403}
1404
1405/**
1406 * i40e_trigger_vf_reset
1407 * @vf: pointer to the VF structure
1408 * @flr: VFLR was issued or not
1409 *
1410 * Trigger hardware to start a reset for a particular VF. Expects the caller
1411 * to wait the proper amount of time to allow hardware to reset the VF before
1412 * it cleans up and restores VF functionality.
1413 **/
1414static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1415{
1416	struct i40e_pf *pf = vf->pf;
1417	struct i40e_hw *hw = &pf->hw;
1418	u32 reg, reg_idx, bit_idx;
1419	bool vf_active;
1420	u32 radq;
1421
1422	/* warn the VF */
1423	vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1424
1425	/* Disable VF's configuration API during reset. The flag is re-enabled
1426	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1427	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1428	 * to do it earlier to give some time to finish to any VF config
1429	 * functions that may still be running at this point.
1430	 */
1431	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1432
1433	/* In the case of a VFLR, the HW has already reset the VF and we
1434	 * just need to clean up, so don't hit the VFRTRIG register.
1435	 */
1436	if (!flr) {
1437		/* Sync VFR reset before trigger next one */
1438		radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1439			    I40E_VFINT_ICR0_ADMINQ_MASK;
1440		if (vf_active && !radq)
1441			/* waiting for finish reset by virtual driver */
1442			if (i40e_sync_vfr_reset(hw, vf->vf_id))
1443				dev_info(&pf->pdev->dev,
1444					 "Reset VF %d never finished\n",
1445				vf->vf_id);
1446
1447		/* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1448		 * in progress state in rstat1 register.
1449		 */
1450		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1451		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1452		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1453		i40e_flush(hw);
1454	}
1455	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1456	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1457	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1458	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1459	i40e_flush(hw);
1460
1461	if (i40e_quiesce_vf_pci(vf))
1462		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1463			vf->vf_id);
1464}
1465
1466/**
1467 * i40e_cleanup_reset_vf
1468 * @vf: pointer to the VF structure
1469 *
1470 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1471 * have verified whether the reset is finished properly, and ensure the
1472 * minimum amount of wait time has passed.
1473 **/
1474static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1475{
1476	struct i40e_pf *pf = vf->pf;
1477	struct i40e_hw *hw = &pf->hw;
1478	u32 reg;
1479
1480	/* disable promisc modes in case they were enabled */
1481	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1482
1483	/* free VF resources to begin resetting the VSI state */
1484	i40e_free_vf_res(vf);
1485
1486	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1487	 * By doing this we allow HW to access VF memory at any point. If we
1488	 * did it any sooner, HW could access memory while it was being freed
1489	 * in i40e_free_vf_res(), causing an IOMMU fault.
1490	 *
1491	 * On the other hand, this needs to be done ASAP, because the VF driver
1492	 * is waiting for this to happen and may report a timeout. It's
1493	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1494	 * it.
1495	 */
1496	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1497	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1498	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1499
1500	/* reallocate VF resources to finish resetting the VSI state */
1501	if (!i40e_alloc_vf_res(vf)) {
1502		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1503		i40e_enable_vf_mappings(vf);
1504		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1505		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1506		/* Do not notify the client during VF init */
1507		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1508					&vf->vf_states))
1509			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1510		vf->num_vlan = 0;
1511	}
1512
1513	/* Tell the VF driver the reset is done. This needs to be done only
1514	 * after VF has been fully initialized, because the VF driver may
1515	 * request resources immediately after setting this flag.
1516	 */
1517	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1518}
1519
1520/**
1521 * i40e_reset_vf
1522 * @vf: pointer to the VF structure
1523 * @flr: VFLR was issued or not
1524 *
1525 * Returns true if the VF is in reset, resets successfully, or resets
1526 * are disabled and false otherwise.
1527 **/
1528bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1529{
1530	struct i40e_pf *pf = vf->pf;
1531	struct i40e_hw *hw = &pf->hw;
1532	bool rsd = false;
1533	u32 reg;
1534	int i;
1535
1536	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1537		return true;
1538
1539	/* Bail out if VFs are disabled. */
1540	if (test_bit(__I40E_VF_DISABLE, pf->state))
1541		return true;
1542
1543	/* If VF is being reset already we don't need to continue. */
1544	if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1545		return true;
1546
1547	i40e_trigger_vf_reset(vf, flr);
1548
1549	/* poll VPGEN_VFRSTAT reg to make sure
1550	 * that reset is complete
1551	 */
1552	for (i = 0; i < 10; i++) {
1553		/* VF reset requires driver to first reset the VF and then
1554		 * poll the status register to make sure that the reset
1555		 * completed successfully. Due to internal HW FIFO flushes,
1556		 * we must wait 10ms before the register will be valid.
1557		 */
1558		usleep_range(10000, 20000);
1559		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1560		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1561			rsd = true;
1562			break;
1563		}
1564	}
1565
1566	if (flr)
1567		usleep_range(10000, 20000);
1568
1569	if (!rsd)
1570		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1571			vf->vf_id);
1572	usleep_range(10000, 20000);
 
 
 
 
1573
1574	/* On initial reset, we don't have any queues to disable */
1575	if (vf->lan_vsi_idx != 0)
1576		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1577
1578	i40e_cleanup_reset_vf(vf);
1579
1580	i40e_flush(hw);
1581	usleep_range(20000, 40000);
1582	clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1583
1584	return true;
1585}
1586
1587/**
1588 * i40e_reset_all_vfs
1589 * @pf: pointer to the PF structure
1590 * @flr: VFLR was issued or not
1591 *
1592 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1593 * VF, then do all the waiting in one chunk, and finally finish restoring each
1594 * VF after the wait. This is useful during PF routines which need to reset
1595 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1596 *
1597 * Returns true if any VFs were reset, and false otherwise.
1598 **/
1599bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1600{
1601	struct i40e_hw *hw = &pf->hw;
1602	struct i40e_vf *vf;
1603	int i, v;
1604	u32 reg;
1605
1606	/* If we don't have any VFs, then there is nothing to reset */
1607	if (!pf->num_alloc_vfs)
1608		return false;
1609
1610	/* If VFs have been disabled, there is no need to reset */
1611	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1612		return false;
1613
1614	/* Begin reset on all VFs at once */
1615	for (v = 0; v < pf->num_alloc_vfs; v++) {
1616		vf = &pf->vf[v];
1617		/* If VF is being reset no need to trigger reset again */
1618		if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1619			i40e_trigger_vf_reset(&pf->vf[v], flr);
1620	}
1621
1622	/* HW requires some time to make sure it can flush the FIFO for a VF
1623	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1624	 * sequence to make sure that it has completed. We'll keep track of
1625	 * the VFs using a simple iterator that increments once that VF has
1626	 * finished resetting.
1627	 */
1628	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1629		usleep_range(10000, 20000);
1630
1631		/* Check each VF in sequence, beginning with the VF to fail
1632		 * the previous check.
1633		 */
1634		while (v < pf->num_alloc_vfs) {
1635			vf = &pf->vf[v];
1636			if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1637				reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1638				if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1639					break;
1640			}
1641
1642			/* If the current VF has finished resetting, move on
1643			 * to the next VF in sequence.
1644			 */
1645			v++;
1646		}
1647	}
1648
1649	if (flr)
1650		usleep_range(10000, 20000);
1651
1652	/* Display a warning if at least one VF didn't manage to reset in
1653	 * time, but continue on with the operation.
1654	 */
1655	if (v < pf->num_alloc_vfs)
1656		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1657			pf->vf[v].vf_id);
1658	usleep_range(10000, 20000);
1659
1660	/* Begin disabling all the rings associated with VFs, but do not wait
1661	 * between each VF.
1662	 */
1663	for (v = 0; v < pf->num_alloc_vfs; v++) {
1664		/* On initial reset, we don't have any queues to disable */
1665		if (pf->vf[v].lan_vsi_idx == 0)
1666			continue;
1667
1668		/* If VF is reset in another thread just continue */
1669		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1670			continue;
1671
1672		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1673	}
1674
1675	/* Now that we've notified HW to disable all of the VF rings, wait
1676	 * until they finish.
1677	 */
1678	for (v = 0; v < pf->num_alloc_vfs; v++) {
1679		/* On initial reset, we don't have any queues to disable */
1680		if (pf->vf[v].lan_vsi_idx == 0)
1681			continue;
1682
1683		/* If VF is reset in another thread just continue */
1684		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1685			continue;
1686
1687		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1688	}
1689
1690	/* Hw may need up to 50ms to finish disabling the RX queues. We
1691	 * minimize the wait by delaying only once for all VFs.
1692	 */
1693	mdelay(50);
1694
1695	/* Finish the reset on each VF */
1696	for (v = 0; v < pf->num_alloc_vfs; v++) {
1697		/* If VF is reset in another thread just continue */
1698		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1699			continue;
1700
1701		i40e_cleanup_reset_vf(&pf->vf[v]);
1702	}
1703
1704	i40e_flush(hw);
1705	usleep_range(20000, 40000);
1706	clear_bit(__I40E_VF_DISABLE, pf->state);
1707
1708	return true;
1709}
1710
1711/**
1712 * i40e_free_vfs
1713 * @pf: pointer to the PF structure
1714 *
1715 * free VF resources
1716 **/
1717void i40e_free_vfs(struct i40e_pf *pf)
1718{
1719	struct i40e_hw *hw = &pf->hw;
1720	u32 reg_idx, bit_idx;
1721	int i, tmp, vf_id;
1722
1723	if (!pf->vf)
1724		return;
1725
1726	set_bit(__I40E_VFS_RELEASING, pf->state);
1727	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1728		usleep_range(1000, 2000);
1729
1730	i40e_notify_client_of_vf_enable(pf, 0);
 
 
 
 
1731
1732	/* Disable IOV before freeing resources. This lets any VF drivers
1733	 * running in the host get themselves cleaned up before we yank
1734	 * the carpet out from underneath their feet.
1735	 */
1736	if (!pci_vfs_assigned(pf->pdev))
1737		pci_disable_sriov(pf->pdev);
1738	else
1739		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1740
1741	/* Amortize wait time by stopping all VFs at the same time */
1742	for (i = 0; i < pf->num_alloc_vfs; i++) {
1743		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1744			continue;
1745
1746		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1747	}
1748
1749	for (i = 0; i < pf->num_alloc_vfs; i++) {
1750		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1751			continue;
1752
1753		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1754	}
1755
1756	/* free up VF resources */
1757	tmp = pf->num_alloc_vfs;
1758	pf->num_alloc_vfs = 0;
1759	for (i = 0; i < tmp; i++) {
1760		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1761			i40e_free_vf_res(&pf->vf[i]);
1762		/* disable qp mappings */
1763		i40e_disable_vf_mappings(&pf->vf[i]);
1764	}
1765
1766	kfree(pf->vf);
1767	pf->vf = NULL;
1768
1769	/* This check is for when the driver is unloaded while VFs are
1770	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1771	 * before this function ever gets called.
1772	 */
1773	if (!pci_vfs_assigned(pf->pdev)) {
1774		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1775		 * work correctly when SR-IOV gets re-enabled.
1776		 */
1777		for (vf_id = 0; vf_id < tmp; vf_id++) {
1778			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1779			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1780			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1781		}
1782	}
1783	clear_bit(__I40E_VF_DISABLE, pf->state);
1784	clear_bit(__I40E_VFS_RELEASING, pf->state);
1785}
1786
1787#ifdef CONFIG_PCI_IOV
1788/**
1789 * i40e_alloc_vfs
1790 * @pf: pointer to the PF structure
1791 * @num_alloc_vfs: number of VFs to allocate
1792 *
1793 * allocate VF resources
1794 **/
1795int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1796{
1797	struct i40e_vf *vfs;
1798	int i, ret = 0;
1799
1800	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1801	i40e_irq_dynamic_disable_icr0(pf);
1802
1803	/* Check to see if we're just allocating resources for extant VFs */
1804	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1805		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1806		if (ret) {
1807			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1808			pf->num_alloc_vfs = 0;
1809			goto err_iov;
1810		}
1811	}
 
1812	/* allocate memory */
1813	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1814	if (!vfs) {
1815		ret = -ENOMEM;
1816		goto err_alloc;
1817	}
1818	pf->vf = vfs;
1819
1820	/* apply default profile */
1821	for (i = 0; i < num_alloc_vfs; i++) {
1822		vfs[i].pf = pf;
1823		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1824		vfs[i].vf_id = i;
1825
1826		/* assign default capabilities */
1827		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1828		vfs[i].spoofchk = true;
1829
1830		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1831
1832	}
1833	pf->num_alloc_vfs = num_alloc_vfs;
1834
1835	/* VF resources get allocated during reset */
1836	i40e_reset_all_vfs(pf, false);
1837
1838	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1839
1840err_alloc:
1841	if (ret)
1842		i40e_free_vfs(pf);
1843err_iov:
1844	/* Re-enable interrupt 0. */
1845	i40e_irq_dynamic_enable_icr0(pf);
1846	return ret;
1847}
1848
1849#endif
1850/**
1851 * i40e_pci_sriov_enable
1852 * @pdev: pointer to a pci_dev structure
1853 * @num_vfs: number of VFs to allocate
1854 *
1855 * Enable or change the number of VFs
1856 **/
1857static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1858{
1859#ifdef CONFIG_PCI_IOV
1860	struct i40e_pf *pf = pci_get_drvdata(pdev);
1861	int pre_existing_vfs = pci_num_vf(pdev);
1862	int err = 0;
1863
1864	if (test_bit(__I40E_TESTING, pf->state)) {
1865		dev_warn(&pdev->dev,
1866			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1867		err = -EPERM;
1868		goto err_out;
1869	}
1870
1871	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1872		i40e_free_vfs(pf);
1873	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1874		goto out;
1875
1876	if (num_vfs > pf->num_req_vfs) {
1877		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1878			 num_vfs, pf->num_req_vfs);
1879		err = -EPERM;
1880		goto err_out;
1881	}
1882
1883	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1884	err = i40e_alloc_vfs(pf, num_vfs);
1885	if (err) {
1886		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1887		goto err_out;
1888	}
1889
1890out:
1891	return num_vfs;
1892
1893err_out:
1894	return err;
1895#endif
1896	return 0;
1897}
1898
1899/**
1900 * i40e_pci_sriov_configure
1901 * @pdev: pointer to a pci_dev structure
1902 * @num_vfs: number of VFs to allocate
1903 *
1904 * Enable or change the number of VFs. Called when the user updates the number
1905 * of VFs in sysfs.
1906 **/
1907int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1908{
1909	struct i40e_pf *pf = pci_get_drvdata(pdev);
1910	int ret = 0;
1911
1912	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1913		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1914		return -EAGAIN;
1915	}
1916
1917	if (num_vfs) {
1918		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1919			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1920			i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
 
1921		}
1922		ret = i40e_pci_sriov_enable(pdev, num_vfs);
1923		goto sriov_configure_out;
1924	}
1925
1926	if (!pci_vfs_assigned(pf->pdev)) {
1927		i40e_free_vfs(pf);
1928		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1929		i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1930	} else {
1931		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1932		ret = -EINVAL;
1933		goto sriov_configure_out;
1934	}
1935sriov_configure_out:
1936	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1937	return ret;
1938}
1939
1940/***********************virtual channel routines******************/
1941
1942/**
1943 * i40e_vc_send_msg_to_vf
1944 * @vf: pointer to the VF info
1945 * @v_opcode: virtual channel opcode
1946 * @v_retval: virtual channel return value
1947 * @msg: pointer to the msg buffer
1948 * @msglen: msg length
1949 *
1950 * send msg to VF
1951 **/
1952static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1953				  u32 v_retval, u8 *msg, u16 msglen)
1954{
1955	struct i40e_pf *pf;
1956	struct i40e_hw *hw;
1957	int abs_vf_id;
1958	i40e_status aq_ret;
1959
1960	/* validate the request */
1961	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1962		return -EINVAL;
1963
1964	pf = vf->pf;
1965	hw = &pf->hw;
1966	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1969					msg, msglen, NULL);
1970	if (aq_ret) {
1971		dev_info(&pf->pdev->dev,
1972			 "Unable to send the message to VF %d aq_err %d\n",
1973			 vf->vf_id, pf->hw.aq.asq_last_status);
1974		return -EIO;
1975	}
1976
1977	return 0;
1978}
1979
1980/**
1981 * i40e_vc_send_resp_to_vf
1982 * @vf: pointer to the VF info
1983 * @opcode: operation code
1984 * @retval: return value
1985 *
1986 * send resp msg to VF
1987 **/
1988static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1989				   enum virtchnl_ops opcode,
1990				   i40e_status retval)
1991{
1992	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1993}
1994
1995/**
1996 * i40e_sync_vf_state
1997 * @vf: pointer to the VF info
1998 * @state: VF state
1999 *
2000 * Called from a VF message to synchronize the service with a potential
2001 * VF reset state
2002 **/
2003static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2004{
2005	int i;
2006
2007	/* When handling some messages, it needs VF state to be set.
2008	 * It is possible that this flag is cleared during VF reset,
2009	 * so there is a need to wait until the end of the reset to
2010	 * handle the request message correctly.
2011	 */
2012	for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2013		if (test_bit(state, &vf->vf_states))
2014			return true;
2015		usleep_range(10000, 20000);
2016	}
2017
2018	return test_bit(state, &vf->vf_states);
2019}
2020
2021/**
2022 * i40e_vc_get_version_msg
2023 * @vf: pointer to the VF info
2024 * @msg: pointer to the msg buffer
2025 *
2026 * called from the VF to request the API version used by the PF
2027 **/
2028static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2029{
2030	struct virtchnl_version_info info = {
2031		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2032	};
2033
2034	vf->vf_ver = *(struct virtchnl_version_info *)msg;
2035	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2036	if (VF_IS_V10(&vf->vf_ver))
2037		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2038	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2039				      I40E_SUCCESS, (u8 *)&info,
2040				      sizeof(struct virtchnl_version_info));
2041}
2042
2043/**
2044 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2045 * @vf: pointer to VF structure
2046 **/
2047static void i40e_del_qch(struct i40e_vf *vf)
2048{
2049	struct i40e_pf *pf = vf->pf;
2050	int i;
2051
2052	/* first element in the array belongs to primary VF VSI and we shouldn't
2053	 * delete it. We should however delete the rest of the VSIs created
2054	 */
2055	for (i = 1; i < vf->num_tc; i++) {
2056		if (vf->ch[i].vsi_idx) {
2057			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2058			vf->ch[i].vsi_idx = 0;
2059			vf->ch[i].vsi_id = 0;
2060		}
2061	}
2062}
2063
2064/**
2065 * i40e_vc_get_max_frame_size
2066 * @vf: pointer to the VF
2067 *
2068 * Max frame size is determined based on the current port's max frame size and
2069 * whether a port VLAN is configured on this VF. The VF is not aware whether
2070 * it's in a port VLAN so the PF needs to account for this in max frame size
2071 * checks and sending the max frame size to the VF.
2072 **/
2073static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2074{
2075	u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2076
2077	if (vf->port_vlan_id)
2078		max_frame_size -= VLAN_HLEN;
2079
2080	return max_frame_size;
2081}
2082
2083/**
2084 * i40e_vc_get_vf_resources_msg
2085 * @vf: pointer to the VF info
2086 * @msg: pointer to the msg buffer
 
2087 *
2088 * called from the VF to request its resources
2089 **/
2090static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2091{
2092	struct virtchnl_vf_resource *vfres = NULL;
2093	struct i40e_pf *pf = vf->pf;
2094	i40e_status aq_ret = 0;
2095	struct i40e_vsi *vsi;
 
2096	int num_vsis = 1;
2097	size_t len = 0;
2098	int ret;
2099
2100	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2101		aq_ret = I40E_ERR_PARAM;
2102		goto err;
2103	}
2104
2105	len = struct_size(vfres, vsi_res, num_vsis);
 
 
2106	vfres = kzalloc(len, GFP_KERNEL);
2107	if (!vfres) {
2108		aq_ret = I40E_ERR_NO_MEMORY;
2109		len = 0;
2110		goto err;
2111	}
2112	if (VF_IS_V11(&vf->vf_ver))
2113		vf->driver_caps = *(u32 *)msg;
2114	else
2115		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2116				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2117				  VIRTCHNL_VF_OFFLOAD_VLAN;
2118
2119	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2120	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2121	vsi = pf->vsi[vf->lan_vsi_idx];
2122	if (!vsi->info.pvid)
2123		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2124
2125	if (i40e_vf_client_capable(pf, vf->vf_id) &&
2126	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
2127		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
2128		set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2129	} else {
2130		clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2131	}
2132
2133	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2134		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
 
 
2135	} else {
2136		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2137		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2138			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2139		else
2140			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2141	}
2142
2143	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2144		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2145			vfres->vf_cap_flags |=
2146				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2147	}
2148
2149	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2150		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2151
2152	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2153	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2154		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2155
2156	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2157		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2158			dev_err(&pf->pdev->dev,
2159				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2160				 vf->vf_id);
2161			aq_ret = I40E_ERR_PARAM;
2162			goto err;
2163		}
2164		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2165	}
2166
2167	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2168		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2169			vfres->vf_cap_flags |=
2170					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2171	}
2172
2173	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2174		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2175
2176	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2177		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2178
2179	vfres->num_vsis = num_vsis;
2180	vfres->num_queue_pairs = vf->num_queue_pairs;
2181	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2182	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2183	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2184	vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2185
2186	if (vf->lan_vsi_idx) {
2187		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2188		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2189		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2190		/* VFs only use TC 0 */
2191		vfres->vsi_res[0].qset_handle
2192					  = le16_to_cpu(vsi->info.qs_handle[0]);
2193		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2194			i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2195			eth_zero_addr(vf->default_lan_addr.addr);
2196		}
2197		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2198				vf->default_lan_addr.addr);
 
2199	}
2200	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2201
2202err:
2203	/* send the response back to the VF */
2204	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2205				     aq_ret, (u8 *)vfres, len);
2206
2207	kfree(vfres);
2208	return ret;
2209}
2210
2211/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2212 * i40e_vc_config_promiscuous_mode_msg
2213 * @vf: pointer to the VF info
2214 * @msg: pointer to the msg buffer
 
2215 *
2216 * called from the VF to configure the promiscuous mode of
2217 * VF vsis
2218 **/
2219static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
 
2220{
2221	struct virtchnl_promisc_info *info =
2222	    (struct virtchnl_promisc_info *)msg;
2223	struct i40e_pf *pf = vf->pf;
2224	i40e_status aq_ret = 0;
 
2225	bool allmulti = false;
2226	bool alluni = false;
2227
2228	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
 
 
 
 
2229		aq_ret = I40E_ERR_PARAM;
2230		goto err_out;
2231	}
2232	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2233		dev_err(&pf->pdev->dev,
2234			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
2235			vf->vf_id);
2236
2237		/* Lie to the VF on purpose, because this is an error we can
2238		 * ignore. Unprivileged VF is not a virtual channel error.
2239		 */
2240		aq_ret = 0;
2241		goto err_out;
2242	}
2243
2244	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2245		aq_ret = I40E_ERR_PARAM;
2246		goto err_out;
2247	}
2248
2249	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2250		aq_ret = I40E_ERR_PARAM;
2251		goto err_out;
2252	}
2253
2254	/* Multicast promiscuous handling*/
2255	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2256		allmulti = true;
 
 
2257
2258	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2259		alluni = true;
2260	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2261						 alluni);
2262	if (aq_ret)
2263		goto err_out;
2264
2265	if (allmulti) {
2266		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2267				      &vf->vf_states))
2268			dev_info(&pf->pdev->dev,
2269				 "VF %d successfully set multicast promiscuous mode\n",
2270				 vf->vf_id);
2271	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2272				      &vf->vf_states))
2273		dev_info(&pf->pdev->dev,
2274			 "VF %d successfully unset multicast promiscuous mode\n",
2275			 vf->vf_id);
2276
2277	if (alluni) {
2278		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2279				      &vf->vf_states))
2280			dev_info(&pf->pdev->dev,
2281				 "VF %d successfully set unicast promiscuous mode\n",
2282				 vf->vf_id);
2283	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2284				      &vf->vf_states))
2285		dev_info(&pf->pdev->dev,
2286			 "VF %d successfully unset unicast promiscuous mode\n",
2287			 vf->vf_id);
2288
2289err_out:
2290	/* send the response to the VF */
2291	return i40e_vc_send_resp_to_vf(vf,
2292				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2293				       aq_ret);
2294}
2295
2296/**
2297 * i40e_vc_config_queues_msg
2298 * @vf: pointer to the VF info
2299 * @msg: pointer to the msg buffer
 
2300 *
2301 * called from the VF to configure the rx/tx
2302 * queues
2303 **/
2304static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2305{
2306	struct virtchnl_vsi_queue_config_info *qci =
2307	    (struct virtchnl_vsi_queue_config_info *)msg;
2308	struct virtchnl_queue_pair_info *qpi;
2309	u16 vsi_id, vsi_queue_id = 0;
2310	struct i40e_pf *pf = vf->pf;
 
2311	i40e_status aq_ret = 0;
2312	int i, j = 0, idx = 0;
2313	struct i40e_vsi *vsi;
2314	u16 num_qps_all = 0;
2315
2316	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2317		aq_ret = I40E_ERR_PARAM;
2318		goto error_param;
2319	}
2320
2321	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
 
2322		aq_ret = I40E_ERR_PARAM;
2323		goto error_param;
2324	}
2325
2326	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2327		aq_ret = I40E_ERR_PARAM;
2328		goto error_param;
2329	}
2330
2331	if (vf->adq_enabled) {
2332		for (i = 0; i < vf->num_tc; i++)
2333			num_qps_all += vf->ch[i].num_qps;
2334		if (num_qps_all != qci->num_queue_pairs) {
2335			aq_ret = I40E_ERR_PARAM;
2336			goto error_param;
2337		}
2338	}
2339
2340	vsi_id = qci->vsi_id;
2341
2342	for (i = 0; i < qci->num_queue_pairs; i++) {
2343		qpi = &qci->qpair[i];
2344
2345		if (!vf->adq_enabled) {
2346			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2347						      qpi->txq.queue_id)) {
2348				aq_ret = I40E_ERR_PARAM;
2349				goto error_param;
2350			}
2351
2352			vsi_queue_id = qpi->txq.queue_id;
2353
2354			if (qpi->txq.vsi_id != qci->vsi_id ||
2355			    qpi->rxq.vsi_id != qci->vsi_id ||
2356			    qpi->rxq.queue_id != vsi_queue_id) {
2357				aq_ret = I40E_ERR_PARAM;
2358				goto error_param;
2359			}
2360		}
2361
2362		if (vf->adq_enabled) {
2363			if (idx >= ARRAY_SIZE(vf->ch)) {
2364				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2365				goto error_param;
2366			}
2367			vsi_id = vf->ch[idx].vsi_id;
2368		}
2369
2370		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2371					     &qpi->rxq) ||
2372		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2373					     &qpi->txq)) {
2374			aq_ret = I40E_ERR_PARAM;
2375			goto error_param;
2376		}
2377
2378		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2379		 * VF does not know about these additional VSIs and all
2380		 * it cares is about its own queues. PF configures these queues
2381		 * to its appropriate VSIs based on TC mapping
2382		 */
2383		if (vf->adq_enabled) {
2384			if (idx >= ARRAY_SIZE(vf->ch)) {
2385				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2386				goto error_param;
2387			}
2388			if (j == (vf->ch[idx].num_qps - 1)) {
2389				idx++;
2390				j = 0; /* resetting the queue count */
2391				vsi_queue_id = 0;
2392			} else {
2393				j++;
2394				vsi_queue_id++;
2395			}
2396		}
2397	}
2398	/* set vsi num_queue_pairs in use to num configured by VF */
2399	if (!vf->adq_enabled) {
2400		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2401			qci->num_queue_pairs;
2402	} else {
2403		for (i = 0; i < vf->num_tc; i++) {
2404			vsi = pf->vsi[vf->ch[i].vsi_idx];
2405			vsi->num_queue_pairs = vf->ch[i].num_qps;
2406
2407			if (i40e_update_adq_vsi_queues(vsi, i)) {
2408				aq_ret = I40E_ERR_CONFIG;
2409				goto error_param;
2410			}
2411		}
2412	}
2413
2414error_param:
2415	/* send the response to the VF */
2416	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2417				       aq_ret);
2418}
2419
2420/**
2421 * i40e_validate_queue_map - check queue map is valid
2422 * @vf: the VF structure pointer
2423 * @vsi_id: vsi id
2424 * @queuemap: Tx or Rx queue map
2425 *
2426 * check if Tx or Rx queue map is valid
2427 **/
2428static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2429				   unsigned long queuemap)
2430{
2431	u16 vsi_queue_id, queue_id;
2432
2433	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2434		if (vf->adq_enabled) {
2435			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2436			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2437		} else {
2438			queue_id = vsi_queue_id;
2439		}
2440
2441		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2442			return -EINVAL;
2443	}
2444
2445	return 0;
2446}
2447
2448/**
2449 * i40e_vc_config_irq_map_msg
2450 * @vf: pointer to the VF info
2451 * @msg: pointer to the msg buffer
 
2452 *
2453 * called from the VF to configure the irq to
2454 * queue map
2455 **/
2456static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2457{
2458	struct virtchnl_irq_map_info *irqmap_info =
2459	    (struct virtchnl_irq_map_info *)msg;
2460	struct virtchnl_vector_map *map;
2461	u16 vsi_id;
2462	i40e_status aq_ret = 0;
 
2463	int i;
2464
2465	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2466		aq_ret = I40E_ERR_PARAM;
2467		goto error_param;
2468	}
2469
2470	if (irqmap_info->num_vectors >
2471	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
2472		aq_ret = I40E_ERR_PARAM;
2473		goto error_param;
2474	}
2475
2476	for (i = 0; i < irqmap_info->num_vectors; i++) {
2477		map = &irqmap_info->vecmap[i];
 
 
 
2478		/* validate msg params */
2479		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2480		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2481			aq_ret = I40E_ERR_PARAM;
2482			goto error_param;
2483		}
2484		vsi_id = map->vsi_id;
2485
2486		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2487			aq_ret = I40E_ERR_PARAM;
2488			goto error_param;
 
 
 
 
 
2489		}
2490
2491		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2492			aq_ret = I40E_ERR_PARAM;
2493			goto error_param;
 
 
 
 
2494		}
2495
2496		i40e_config_irq_link_list(vf, vsi_id, map);
2497	}
2498error_param:
2499	/* send the response to the VF */
2500	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2501				       aq_ret);
2502}
2503
2504/**
2505 * i40e_ctrl_vf_tx_rings
2506 * @vsi: the SRIOV VSI being configured
2507 * @q_map: bit map of the queues to be enabled
2508 * @enable: start or stop the queue
2509 **/
2510static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2511				 bool enable)
2512{
2513	struct i40e_pf *pf = vsi->back;
2514	int ret = 0;
2515	u16 q_id;
2516
2517	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2518		ret = i40e_control_wait_tx_q(vsi->seid, pf,
2519					     vsi->base_queue + q_id,
2520					     false /*is xdp*/, enable);
2521		if (ret)
2522			break;
2523	}
2524	return ret;
2525}
2526
2527/**
2528 * i40e_ctrl_vf_rx_rings
2529 * @vsi: the SRIOV VSI being configured
2530 * @q_map: bit map of the queues to be enabled
2531 * @enable: start or stop the queue
2532 **/
2533static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2534				 bool enable)
2535{
2536	struct i40e_pf *pf = vsi->back;
2537	int ret = 0;
2538	u16 q_id;
2539
2540	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2541		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2542					     enable);
2543		if (ret)
2544			break;
2545	}
2546	return ret;
2547}
2548
2549/**
2550 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2551 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2552 *
2553 * Returns true if validation was successful, else false.
2554 */
2555static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2556{
2557	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2558	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2559	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2560		return false;
2561
2562	return true;
2563}
2564
2565/**
2566 * i40e_vc_enable_queues_msg
2567 * @vf: pointer to the VF info
2568 * @msg: pointer to the msg buffer
 
2569 *
2570 * called from the VF to enable all or specific queue(s)
2571 **/
2572static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2573{
2574	struct virtchnl_queue_select *vqs =
2575	    (struct virtchnl_queue_select *)msg;
2576	struct i40e_pf *pf = vf->pf;
 
2577	i40e_status aq_ret = 0;
2578	int i;
2579
2580	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2581		aq_ret = I40E_ERR_PARAM;
2582		goto error_param;
2583	}
2584
2585	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2586		aq_ret = I40E_ERR_PARAM;
2587		goto error_param;
2588	}
2589
2590	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2591		aq_ret = I40E_ERR_PARAM;
2592		goto error_param;
2593	}
2594
2595	/* Use the queue bit map sent by the VF */
2596	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2597				  true)) {
2598		aq_ret = I40E_ERR_TIMEOUT;
2599		goto error_param;
2600	}
2601	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2602				  true)) {
2603		aq_ret = I40E_ERR_TIMEOUT;
2604		goto error_param;
2605	}
2606
2607	/* need to start the rings for additional ADq VSI's as well */
2608	if (vf->adq_enabled) {
2609		/* zero belongs to LAN VSI */
2610		for (i = 1; i < vf->num_tc; i++) {
2611			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2612				aq_ret = I40E_ERR_TIMEOUT;
2613		}
2614	}
2615
2616error_param:
2617	/* send the response to the VF */
2618	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2619				       aq_ret);
2620}
2621
2622/**
2623 * i40e_vc_disable_queues_msg
2624 * @vf: pointer to the VF info
2625 * @msg: pointer to the msg buffer
 
2626 *
2627 * called from the VF to disable all or specific
2628 * queue(s)
2629 **/
2630static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2631{
2632	struct virtchnl_queue_select *vqs =
2633	    (struct virtchnl_queue_select *)msg;
2634	struct i40e_pf *pf = vf->pf;
2635	i40e_status aq_ret = 0;
2636
2637	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2638		aq_ret = I40E_ERR_PARAM;
2639		goto error_param;
2640	}
2641
2642	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2643		aq_ret = I40E_ERR_PARAM;
2644		goto error_param;
2645	}
2646
2647	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2648		aq_ret = I40E_ERR_PARAM;
2649		goto error_param;
2650	}
2651
2652	/* Use the queue bit map sent by the VF */
2653	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2654				  false)) {
2655		aq_ret = I40E_ERR_TIMEOUT;
2656		goto error_param;
2657	}
2658	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2659				  false)) {
2660		aq_ret = I40E_ERR_TIMEOUT;
2661		goto error_param;
2662	}
2663error_param:
2664	/* send the response to the VF */
2665	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2666				       aq_ret);
2667}
2668
2669/**
2670 * i40e_check_enough_queue - find big enough queue number
2671 * @vf: pointer to the VF info
2672 * @needed: the number of items needed
2673 *
2674 * Returns the base item index of the queue, or negative for error
2675 **/
2676static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2677{
2678	unsigned int  i, cur_queues, more, pool_size;
2679	struct i40e_lump_tracking *pile;
2680	struct i40e_pf *pf = vf->pf;
2681	struct i40e_vsi *vsi;
2682
2683	vsi = pf->vsi[vf->lan_vsi_idx];
2684	cur_queues = vsi->alloc_queue_pairs;
2685
2686	/* if current allocated queues are enough for need */
2687	if (cur_queues >= needed)
2688		return vsi->base_queue;
2689
2690	pile = pf->qp_pile;
2691	if (cur_queues > 0) {
2692		/* if the allocated queues are not zero
2693		 * just check if there are enough queues for more
2694		 * behind the allocated queues.
2695		 */
2696		more = needed - cur_queues;
2697		for (i = vsi->base_queue + cur_queues;
2698			i < pile->num_entries; i++) {
2699			if (pile->list[i] & I40E_PILE_VALID_BIT)
2700				break;
2701
2702			if (more-- == 1)
2703				/* there is enough */
2704				return vsi->base_queue;
2705		}
2706	}
2707
2708	pool_size = 0;
2709	for (i = 0; i < pile->num_entries; i++) {
2710		if (pile->list[i] & I40E_PILE_VALID_BIT) {
2711			pool_size = 0;
2712			continue;
2713		}
2714		if (needed <= ++pool_size)
2715			/* there is enough */
2716			return i;
2717	}
2718
2719	return -ENOMEM;
2720}
2721
2722/**
2723 * i40e_vc_request_queues_msg
2724 * @vf: pointer to the VF info
2725 * @msg: pointer to the msg buffer
2726 *
2727 * VFs get a default number of queues but can use this message to request a
2728 * different number.  If the request is successful, PF will reset the VF and
2729 * return 0.  If unsuccessful, PF will send message informing VF of number of
2730 * available queues and return result of sending VF a message.
2731 **/
2732static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2733{
2734	struct virtchnl_vf_res_request *vfres =
2735		(struct virtchnl_vf_res_request *)msg;
2736	u16 req_pairs = vfres->num_queue_pairs;
2737	u8 cur_pairs = vf->num_queue_pairs;
2738	struct i40e_pf *pf = vf->pf;
2739
2740	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2741		return -EINVAL;
2742
2743	if (req_pairs > I40E_MAX_VF_QUEUES) {
2744		dev_err(&pf->pdev->dev,
2745			"VF %d tried to request more than %d queues.\n",
2746			vf->vf_id,
2747			I40E_MAX_VF_QUEUES);
2748		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2749	} else if (req_pairs - cur_pairs > pf->queues_left) {
2750		dev_warn(&pf->pdev->dev,
2751			 "VF %d requested %d more queues, but only %d left.\n",
2752			 vf->vf_id,
2753			 req_pairs - cur_pairs,
2754			 pf->queues_left);
2755		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2756	} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2757		dev_warn(&pf->pdev->dev,
2758			 "VF %d requested %d more queues, but there is not enough for it.\n",
2759			 vf->vf_id,
2760			 req_pairs - cur_pairs);
2761		vfres->num_queue_pairs = cur_pairs;
2762	} else {
2763		/* successful request */
2764		vf->num_req_queues = req_pairs;
2765		i40e_vc_reset_vf(vf, true);
2766		return 0;
2767	}
2768
2769	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2770				      (u8 *)vfres, sizeof(*vfres));
2771}
2772
2773/**
2774 * i40e_vc_get_stats_msg
2775 * @vf: pointer to the VF info
2776 * @msg: pointer to the msg buffer
 
2777 *
2778 * called from the VF to get vsi stats
2779 **/
2780static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2781{
2782	struct virtchnl_queue_select *vqs =
2783	    (struct virtchnl_queue_select *)msg;
2784	struct i40e_pf *pf = vf->pf;
2785	struct i40e_eth_stats stats;
2786	i40e_status aq_ret = 0;
2787	struct i40e_vsi *vsi;
2788
2789	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2790
2791	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2792		aq_ret = I40E_ERR_PARAM;
2793		goto error_param;
2794	}
2795
2796	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2797		aq_ret = I40E_ERR_PARAM;
2798		goto error_param;
2799	}
2800
2801	vsi = pf->vsi[vf->lan_vsi_idx];
2802	if (!vsi) {
2803		aq_ret = I40E_ERR_PARAM;
2804		goto error_param;
2805	}
2806	i40e_update_eth_stats(vsi);
2807	stats = vsi->eth_stats;
2808
2809error_param:
2810	/* send the response back to the VF */
2811	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2812				      (u8 *)&stats, sizeof(stats));
2813}
2814
2815#define I40E_MAX_MACVLAN_PER_HW 3072
2816#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
2817	(num_ports))
2818/* If the VF is not trusted restrict the number of MAC/VLAN it can program
2819 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2820 */
2821#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2822#define I40E_VC_MAX_VLAN_PER_VF 16
2823
2824#define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports)		\
2825({	typeof(vf_num) vf_num_ = (vf_num);				\
2826	typeof(num_ports) num_ports_ = (num_ports);			\
2827	((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ *		\
2828	I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) +			\
2829	I40E_VC_MAX_MAC_ADDR_PER_VF; })
2830/**
2831 * i40e_check_vf_permission
2832 * @vf: pointer to the VF info
2833 * @al: MAC address list from virtchnl
2834 *
2835 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2836 * if any address in the list is not valid. Checks the following conditions:
2837 *
2838 * 1) broadcast and zero addresses are never valid
2839 * 2) unicast addresses are not allowed if the VMM has administratively set
2840 *    the VF MAC address, unless the VF is marked as privileged.
2841 * 3) There is enough space to add all the addresses.
2842 *
2843 * Note that to guarantee consistency, it is expected this function be called
2844 * while holding the mac_filter_hash_lock, as otherwise the current number of
2845 * addresses might not be accurate.
2846 **/
2847static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2848					   struct virtchnl_ether_addr_list *al)
2849{
2850	struct i40e_pf *pf = vf->pf;
2851	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2852	struct i40e_hw *hw = &pf->hw;
2853	int mac2add_cnt = 0;
2854	int i;
2855
2856	for (i = 0; i < al->num_elements; i++) {
2857		struct i40e_mac_filter *f;
2858		u8 *addr = al->list[i].addr;
2859
2860		if (is_broadcast_ether_addr(addr) ||
2861		    is_zero_ether_addr(addr)) {
2862			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2863				addr);
2864			return I40E_ERR_INVALID_MAC_ADDR;
2865		}
2866
 
 
 
 
 
 
2867		/* If the host VMM administrator has set the VF MAC address
2868		 * administratively via the ndo_set_vf_mac command then deny
2869		 * permission to the VF to add or delete unicast MAC addresses.
2870		 * Unless the VF is privileged and then it can do whatever.
2871		 * The VF may request to set the MAC address filter already
2872		 * assigned to it so do not return an error in that case.
2873		 */
2874		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2875		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2876		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2877			dev_err(&pf->pdev->dev,
2878				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2879			return -EPERM;
2880		}
2881
2882		/*count filters that really will be added*/
2883		f = i40e_find_mac(vsi, addr);
2884		if (!f)
2885			++mac2add_cnt;
2886	}
2887
2888	/* If this VF is not privileged, then we can't add more than a limited
2889	 * number of addresses. Check to make sure that the additions do not
2890	 * push us over the limit.
2891	 */
2892	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2893		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2894		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2895			dev_err(&pf->pdev->dev,
2896				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2897			return -EPERM;
2898		}
2899	/* If this VF is trusted, it can use more resources than untrusted.
2900	 * However to ensure that every trusted VF has appropriate number of
2901	 * resources, divide whole pool of resources per port and then across
2902	 * all VFs.
2903	 */
2904	} else {
2905		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2906		    I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2907						       hw->num_ports)) {
2908			dev_err(&pf->pdev->dev,
2909				"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2910			return -EPERM;
2911		}
2912	}
2913	return 0;
2914}
2915
2916/**
2917 * i40e_vc_add_mac_addr_msg
2918 * @vf: pointer to the VF info
2919 * @msg: pointer to the msg buffer
 
2920 *
2921 * add guest mac address filter
2922 **/
2923static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2924{
2925	struct virtchnl_ether_addr_list *al =
2926	    (struct virtchnl_ether_addr_list *)msg;
2927	struct i40e_pf *pf = vf->pf;
2928	struct i40e_vsi *vsi = NULL;
 
2929	i40e_status ret = 0;
2930	int i;
2931
2932	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
2933	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
 
2934		ret = I40E_ERR_PARAM;
2935		goto error_param;
2936	}
2937
 
 
 
 
 
2938	vsi = pf->vsi[vf->lan_vsi_idx];
2939
2940	/* Lock once, because all function inside for loop accesses VSI's
2941	 * MAC filter list which needs to be protected using same lock.
2942	 */
2943	spin_lock_bh(&vsi->mac_filter_hash_lock);
2944
2945	ret = i40e_check_vf_permission(vf, al);
2946	if (ret) {
2947		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2948		goto error_param;
2949	}
2950
2951	/* add new addresses to the list */
2952	for (i = 0; i < al->num_elements; i++) {
2953		struct i40e_mac_filter *f;
2954
2955		f = i40e_find_mac(vsi, al->list[i].addr);
2956		if (!f) {
2957			f = i40e_add_mac_filter(vsi, al->list[i].addr);
 
 
 
 
 
 
2958
2959			if (!f) {
2960				dev_err(&pf->pdev->dev,
2961					"Unable to add MAC filter %pM for VF %d\n",
2962					al->list[i].addr, vf->vf_id);
2963				ret = I40E_ERR_PARAM;
2964				spin_unlock_bh(&vsi->mac_filter_hash_lock);
2965				goto error_param;
2966			}
2967			if (is_valid_ether_addr(al->list[i].addr) &&
2968			    is_zero_ether_addr(vf->default_lan_addr.addr))
2969				ether_addr_copy(vf->default_lan_addr.addr,
2970						al->list[i].addr);
2971		}
2972	}
2973	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2974
2975	/* program the updated filter list */
2976	ret = i40e_sync_vsi_filters(vsi);
2977	if (ret)
2978		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2979			vf->vf_id, ret);
2980
2981error_param:
2982	/* send the response to the VF */
2983	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2984				      ret, NULL, 0);
2985}
2986
2987/**
2988 * i40e_vc_del_mac_addr_msg
2989 * @vf: pointer to the VF info
2990 * @msg: pointer to the msg buffer
 
2991 *
2992 * remove guest mac address filter
2993 **/
2994static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2995{
2996	struct virtchnl_ether_addr_list *al =
2997	    (struct virtchnl_ether_addr_list *)msg;
2998	bool was_unimac_deleted = false;
2999	struct i40e_pf *pf = vf->pf;
3000	struct i40e_vsi *vsi = NULL;
 
3001	i40e_status ret = 0;
3002	int i;
3003
3004	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3005	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
 
3006		ret = I40E_ERR_PARAM;
3007		goto error_param;
3008	}
3009
3010	for (i = 0; i < al->num_elements; i++) {
3011		if (is_broadcast_ether_addr(al->list[i].addr) ||
3012		    is_zero_ether_addr(al->list[i].addr)) {
3013			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3014				al->list[i].addr, vf->vf_id);
3015			ret = I40E_ERR_INVALID_MAC_ADDR;
3016			goto error_param;
3017		}
3018		if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
3019			was_unimac_deleted = true;
3020	}
3021	vsi = pf->vsi[vf->lan_vsi_idx];
3022
3023	spin_lock_bh(&vsi->mac_filter_hash_lock);
3024	/* delete addresses from the list */
3025	for (i = 0; i < al->num_elements; i++)
3026		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3027			ret = I40E_ERR_INVALID_MAC_ADDR;
3028			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3029			goto error_param;
3030		}
3031
3032	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3033
3034	/* program the updated filter list */
3035	ret = i40e_sync_vsi_filters(vsi);
3036	if (ret)
3037		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3038			vf->vf_id, ret);
3039
3040	if (vf->trusted && was_unimac_deleted) {
3041		struct i40e_mac_filter *f;
3042		struct hlist_node *h;
3043		u8 *macaddr = NULL;
3044		int bkt;
3045
3046		/* set last unicast mac address as default */
3047		spin_lock_bh(&vsi->mac_filter_hash_lock);
3048		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3049			if (is_valid_ether_addr(f->macaddr))
3050				macaddr = f->macaddr;
3051		}
3052		if (macaddr)
3053			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3054		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3055	}
3056error_param:
3057	/* send the response to the VF */
3058	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
 
3059}
3060
3061/**
3062 * i40e_vc_add_vlan_msg
3063 * @vf: pointer to the VF info
3064 * @msg: pointer to the msg buffer
 
3065 *
3066 * program guest vlan id
3067 **/
3068static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3069{
3070	struct virtchnl_vlan_filter_list *vfl =
3071	    (struct virtchnl_vlan_filter_list *)msg;
3072	struct i40e_pf *pf = vf->pf;
3073	struct i40e_vsi *vsi = NULL;
 
3074	i40e_status aq_ret = 0;
3075	int i;
3076
3077	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3078	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3079		dev_err(&pf->pdev->dev,
3080			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3081		goto error_param;
3082	}
3083	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3084	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3085		aq_ret = I40E_ERR_PARAM;
3086		goto error_param;
3087	}
3088
3089	for (i = 0; i < vfl->num_elements; i++) {
3090		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3091			aq_ret = I40E_ERR_PARAM;
3092			dev_err(&pf->pdev->dev,
3093				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3094			goto error_param;
3095		}
3096	}
3097	vsi = pf->vsi[vf->lan_vsi_idx];
3098	if (vsi->info.pvid) {
3099		aq_ret = I40E_ERR_PARAM;
3100		goto error_param;
3101	}
3102
3103	i40e_vlan_stripping_enable(vsi);
3104	for (i = 0; i < vfl->num_elements; i++) {
3105		/* add new VLAN filter */
3106		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3107		if (!ret)
3108			vf->num_vlan++;
3109
3110		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3111			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3112							   true,
3113							   vfl->vlan_id[i],
3114							   NULL);
3115		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3116			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3117							   true,
3118							   vfl->vlan_id[i],
3119							   NULL);
3120
3121		if (ret)
3122			dev_err(&pf->pdev->dev,
3123				"Unable to add VLAN filter %d for VF %d, error %d\n",
3124				vfl->vlan_id[i], vf->vf_id, ret);
3125	}
3126
3127error_param:
3128	/* send the response to the VF */
3129	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3130}
3131
3132/**
3133 * i40e_vc_remove_vlan_msg
3134 * @vf: pointer to the VF info
3135 * @msg: pointer to the msg buffer
 
3136 *
3137 * remove programmed guest vlan id
3138 **/
3139static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3140{
3141	struct virtchnl_vlan_filter_list *vfl =
3142	    (struct virtchnl_vlan_filter_list *)msg;
3143	struct i40e_pf *pf = vf->pf;
3144	struct i40e_vsi *vsi = NULL;
 
3145	i40e_status aq_ret = 0;
3146	int i;
3147
3148	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3149	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
 
3150		aq_ret = I40E_ERR_PARAM;
3151		goto error_param;
3152	}
3153
3154	for (i = 0; i < vfl->num_elements; i++) {
3155		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3156			aq_ret = I40E_ERR_PARAM;
3157			goto error_param;
3158		}
3159	}
3160
3161	vsi = pf->vsi[vf->lan_vsi_idx];
3162	if (vsi->info.pvid) {
3163		if (vfl->num_elements > 1 || vfl->vlan_id[0])
3164			aq_ret = I40E_ERR_PARAM;
3165		goto error_param;
3166	}
3167
3168	for (i = 0; i < vfl->num_elements; i++) {
3169		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3170		vf->num_vlan--;
3171
3172		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3173			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3174							   false,
3175							   vfl->vlan_id[i],
3176							   NULL);
3177		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3178			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3179							   false,
3180							   vfl->vlan_id[i],
3181							   NULL);
3182	}
3183
3184error_param:
3185	/* send the response to the VF */
3186	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3187}
3188
3189/**
3190 * i40e_vc_iwarp_msg
3191 * @vf: pointer to the VF info
3192 * @msg: pointer to the msg buffer
3193 * @msglen: msg length
3194 *
3195 * called from the VF for the iwarp msgs
3196 **/
3197static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3198{
3199	struct i40e_pf *pf = vf->pf;
3200	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3201	i40e_status aq_ret = 0;
3202
3203	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3204	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3205		aq_ret = I40E_ERR_PARAM;
3206		goto error_param;
3207	}
3208
3209	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3210				     msg, msglen);
3211
3212error_param:
3213	/* send the response to the VF */
3214	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
3215				       aq_ret);
3216}
3217
3218/**
3219 * i40e_vc_iwarp_qvmap_msg
3220 * @vf: pointer to the VF info
3221 * @msg: pointer to the msg buffer
 
3222 * @config: config qvmap or release it
3223 *
3224 * called from the VF for the iwarp msgs
3225 **/
3226static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
 
3227{
3228	struct virtchnl_iwarp_qvlist_info *qvlist_info =
3229				(struct virtchnl_iwarp_qvlist_info *)msg;
3230	i40e_status aq_ret = 0;
3231
3232	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3233	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3234		aq_ret = I40E_ERR_PARAM;
3235		goto error_param;
3236	}
3237
3238	if (config) {
3239		if (i40e_config_iwarp_qvlist(vf, qvlist_info))
3240			aq_ret = I40E_ERR_PARAM;
3241	} else {
3242		i40e_release_iwarp_qvlist(vf);
3243	}
3244
3245error_param:
3246	/* send the response to the VF */
3247	return i40e_vc_send_resp_to_vf(vf,
3248			       config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3249			       VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3250			       aq_ret);
3251}
3252
3253/**
3254 * i40e_vc_config_rss_key
3255 * @vf: pointer to the VF info
3256 * @msg: pointer to the msg buffer
 
 
3257 *
3258 * Configure the VF's RSS key
3259 **/
3260static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
 
3261{
3262	struct virtchnl_rss_key *vrk =
3263		(struct virtchnl_rss_key *)msg;
3264	struct i40e_pf *pf = vf->pf;
3265	struct i40e_vsi *vsi = NULL;
3266	i40e_status aq_ret = 0;
3267
3268	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3269	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3270	    vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3271		aq_ret = I40E_ERR_PARAM;
3272		goto err;
3273	}
3274
3275	vsi = pf->vsi[vf->lan_vsi_idx];
3276	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3277err:
3278	/* send the response to the VF */
3279	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3280				       aq_ret);
3281}
3282
3283/**
3284 * i40e_vc_config_rss_lut
3285 * @vf: pointer to the VF info
3286 * @msg: pointer to the msg buffer
3287 *
3288 * Configure the VF's RSS LUT
3289 **/
3290static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3291{
3292	struct virtchnl_rss_lut *vrl =
3293		(struct virtchnl_rss_lut *)msg;
3294	struct i40e_pf *pf = vf->pf;
3295	struct i40e_vsi *vsi = NULL;
3296	i40e_status aq_ret = 0;
3297	u16 i;
3298
3299	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3300	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3301	    vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3302		aq_ret = I40E_ERR_PARAM;
3303		goto err;
3304	}
3305
3306	for (i = 0; i < vrl->lut_entries; i++)
3307		if (vrl->lut[i] >= vf->num_queue_pairs) {
3308			aq_ret = I40E_ERR_PARAM;
3309			goto err;
3310		}
3311
3312	vsi = pf->vsi[vf->lan_vsi_idx];
3313	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3314	/* send the response to the VF */
3315err:
3316	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3317				       aq_ret);
3318}
3319
3320/**
3321 * i40e_vc_get_rss_hena
3322 * @vf: pointer to the VF info
3323 * @msg: pointer to the msg buffer
3324 *
3325 * Return the RSS HENA bits allowed by the hardware
3326 **/
3327static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3328{
3329	struct virtchnl_rss_hena *vrh = NULL;
3330	struct i40e_pf *pf = vf->pf;
3331	i40e_status aq_ret = 0;
3332	int len = 0;
3333
3334	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3335		aq_ret = I40E_ERR_PARAM;
3336		goto err;
3337	}
3338	len = sizeof(struct virtchnl_rss_hena);
3339
3340	vrh = kzalloc(len, GFP_KERNEL);
3341	if (!vrh) {
3342		aq_ret = I40E_ERR_NO_MEMORY;
3343		len = 0;
3344		goto err;
3345	}
3346	vrh->hena = i40e_pf_get_default_rss_hena(pf);
3347err:
3348	/* send the response back to the VF */
3349	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3350					aq_ret, (u8 *)vrh, len);
3351	kfree(vrh);
3352	return aq_ret;
3353}
3354
3355/**
3356 * i40e_vc_set_rss_hena
3357 * @vf: pointer to the VF info
3358 * @msg: pointer to the msg buffer
3359 *
3360 * Set the RSS HENA bits for the VF
3361 **/
3362static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3363{
3364	struct virtchnl_rss_hena *vrh =
3365		(struct virtchnl_rss_hena *)msg;
3366	struct i40e_pf *pf = vf->pf;
3367	struct i40e_hw *hw = &pf->hw;
3368	i40e_status aq_ret = 0;
3369
3370	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3371		aq_ret = I40E_ERR_PARAM;
3372		goto err;
3373	}
3374	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3375	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3376			  (u32)(vrh->hena >> 32));
3377
3378	/* send the response to the VF */
3379err:
3380	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3381}
3382
3383/**
3384 * i40e_vc_enable_vlan_stripping
3385 * @vf: pointer to the VF info
3386 * @msg: pointer to the msg buffer
3387 *
3388 * Enable vlan header stripping for the VF
3389 **/
3390static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3391{
3392	i40e_status aq_ret = 0;
3393	struct i40e_vsi *vsi;
3394
3395	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3396		aq_ret = I40E_ERR_PARAM;
3397		goto err;
3398	}
3399
3400	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3401	i40e_vlan_stripping_enable(vsi);
3402
3403	/* send the response to the VF */
3404err:
3405	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3406				       aq_ret);
3407}
3408
3409/**
3410 * i40e_vc_disable_vlan_stripping
3411 * @vf: pointer to the VF info
3412 * @msg: pointer to the msg buffer
3413 *
3414 * Disable vlan header stripping for the VF
3415 **/
3416static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3417{
3418	i40e_status aq_ret = 0;
3419	struct i40e_vsi *vsi;
3420
3421	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3422		aq_ret = I40E_ERR_PARAM;
3423		goto err;
3424	}
3425
3426	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3427	i40e_vlan_stripping_disable(vsi);
3428
3429	/* send the response to the VF */
3430err:
3431	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3432				       aq_ret);
3433}
3434
3435/**
3436 * i40e_validate_cloud_filter
3437 * @vf: pointer to VF structure
3438 * @tc_filter: pointer to filter requested
3439 *
3440 * This function validates cloud filter programmed as TC filter for ADq
3441 **/
3442static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3443				      struct virtchnl_filter *tc_filter)
3444{
3445	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3446	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3447	struct i40e_pf *pf = vf->pf;
3448	struct i40e_vsi *vsi = NULL;
3449	struct i40e_mac_filter *f;
3450	struct hlist_node *h;
3451	bool found = false;
3452	int bkt;
3453
3454	if (!tc_filter->action) {
3455		dev_info(&pf->pdev->dev,
3456			 "VF %d: Currently ADq doesn't support Drop Action\n",
3457			 vf->vf_id);
3458		goto err;
3459	}
3460
3461	/* action_meta is TC number here to which the filter is applied */
3462	if (!tc_filter->action_meta ||
3463	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
3464		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3465			 vf->vf_id, tc_filter->action_meta);
3466		goto err;
3467	}
3468
3469	/* Check filter if it's programmed for advanced mode or basic mode.
3470	 * There are two ADq modes (for VF only),
3471	 * 1. Basic mode: intended to allow as many filter options as possible
3472	 *		  to be added to a VF in Non-trusted mode. Main goal is
3473	 *		  to add filters to its own MAC and VLAN id.
3474	 * 2. Advanced mode: is for allowing filters to be applied other than
3475	 *		  its own MAC or VLAN. This mode requires the VF to be
3476	 *		  Trusted.
3477	 */
3478	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3479		vsi = pf->vsi[vf->lan_vsi_idx];
3480		f = i40e_find_mac(vsi, data.dst_mac);
3481
3482		if (!f) {
3483			dev_info(&pf->pdev->dev,
3484				 "Destination MAC %pM doesn't belong to VF %d\n",
3485				 data.dst_mac, vf->vf_id);
3486			goto err;
3487		}
3488
3489		if (mask.vlan_id) {
3490			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3491					   hlist) {
3492				if (f->vlan == ntohs(data.vlan_id)) {
3493					found = true;
3494					break;
3495				}
3496			}
3497			if (!found) {
3498				dev_info(&pf->pdev->dev,
3499					 "VF %d doesn't have any VLAN id %u\n",
3500					 vf->vf_id, ntohs(data.vlan_id));
3501				goto err;
3502			}
3503		}
3504	} else {
3505		/* Check if VF is trusted */
3506		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3507			dev_err(&pf->pdev->dev,
3508				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3509				vf->vf_id);
3510			return I40E_ERR_CONFIG;
3511		}
3512	}
3513
3514	if (mask.dst_mac[0] & data.dst_mac[0]) {
3515		if (is_broadcast_ether_addr(data.dst_mac) ||
3516		    is_zero_ether_addr(data.dst_mac)) {
3517			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3518				 vf->vf_id, data.dst_mac);
3519			goto err;
3520		}
3521	}
3522
3523	if (mask.src_mac[0] & data.src_mac[0]) {
3524		if (is_broadcast_ether_addr(data.src_mac) ||
3525		    is_zero_ether_addr(data.src_mac)) {
3526			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3527				 vf->vf_id, data.src_mac);
3528			goto err;
3529		}
3530	}
3531
3532	if (mask.dst_port & data.dst_port) {
3533		if (!data.dst_port) {
3534			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3535				 vf->vf_id);
3536			goto err;
3537		}
3538	}
3539
3540	if (mask.src_port & data.src_port) {
3541		if (!data.src_port) {
3542			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3543				 vf->vf_id);
3544			goto err;
3545		}
3546	}
3547
3548	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3549	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3550		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3551			 vf->vf_id);
3552		goto err;
3553	}
3554
3555	if (mask.vlan_id & data.vlan_id) {
3556		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3557			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3558				 vf->vf_id);
3559			goto err;
3560		}
3561	}
3562
3563	return I40E_SUCCESS;
3564err:
3565	return I40E_ERR_CONFIG;
3566}
3567
3568/**
3569 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3570 * @vf: pointer to the VF info
3571 * @seid: seid of the vsi it is searching for
3572 **/
3573static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3574{
3575	struct i40e_pf *pf = vf->pf;
3576	struct i40e_vsi *vsi = NULL;
3577	int i;
3578
3579	for (i = 0; i < vf->num_tc ; i++) {
3580		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3581		if (vsi && vsi->seid == seid)
3582			return vsi;
3583	}
3584	return NULL;
3585}
3586
3587/**
3588 * i40e_del_all_cloud_filters
3589 * @vf: pointer to the VF info
3590 *
3591 * This function deletes all cloud filters
3592 **/
3593static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3594{
3595	struct i40e_cloud_filter *cfilter = NULL;
3596	struct i40e_pf *pf = vf->pf;
3597	struct i40e_vsi *vsi = NULL;
3598	struct hlist_node *node;
3599	int ret;
3600
3601	hlist_for_each_entry_safe(cfilter, node,
3602				  &vf->cloud_filter_list, cloud_node) {
3603		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3604
3605		if (!vsi) {
3606			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3607				vf->vf_id, cfilter->seid);
3608			continue;
3609		}
3610
3611		if (cfilter->dst_port)
3612			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3613								false);
3614		else
3615			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3616		if (ret)
3617			dev_err(&pf->pdev->dev,
3618				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3619				vf->vf_id, i40e_stat_str(&pf->hw, ret),
3620				i40e_aq_str(&pf->hw,
3621					    pf->hw.aq.asq_last_status));
3622
3623		hlist_del(&cfilter->cloud_node);
3624		kfree(cfilter);
3625		vf->num_cloud_filters--;
3626	}
3627}
3628
3629/**
3630 * i40e_vc_del_cloud_filter
3631 * @vf: pointer to the VF info
3632 * @msg: pointer to the msg buffer
3633 *
3634 * This function deletes a cloud filter programmed as TC filter for ADq
3635 **/
3636static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3637{
3638	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3639	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3640	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3641	struct i40e_cloud_filter cfilter, *cf = NULL;
3642	struct i40e_pf *pf = vf->pf;
3643	struct i40e_vsi *vsi = NULL;
3644	struct hlist_node *node;
3645	i40e_status aq_ret = 0;
3646	int i, ret;
3647
3648	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3649		aq_ret = I40E_ERR_PARAM;
3650		goto err;
3651	}
3652
3653	if (!vf->adq_enabled) {
3654		dev_info(&pf->pdev->dev,
3655			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3656			 vf->vf_id);
3657		aq_ret = I40E_ERR_PARAM;
3658		goto err;
3659	}
3660
3661	if (i40e_validate_cloud_filter(vf, vcf)) {
3662		dev_info(&pf->pdev->dev,
3663			 "VF %d: Invalid input, can't apply cloud filter\n",
3664			 vf->vf_id);
3665		aq_ret = I40E_ERR_PARAM;
3666		goto err;
3667	}
3668
3669	memset(&cfilter, 0, sizeof(cfilter));
3670	/* parse destination mac address */
3671	for (i = 0; i < ETH_ALEN; i++)
3672		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3673
3674	/* parse source mac address */
3675	for (i = 0; i < ETH_ALEN; i++)
3676		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3677
3678	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3679	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3680	cfilter.src_port = mask.src_port & tcf.src_port;
3681
3682	switch (vcf->flow_type) {
3683	case VIRTCHNL_TCP_V4_FLOW:
3684		cfilter.n_proto = ETH_P_IP;
3685		if (mask.dst_ip[0] & tcf.dst_ip[0])
3686			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3687			       ARRAY_SIZE(tcf.dst_ip));
3688		else if (mask.src_ip[0] & tcf.dst_ip[0])
3689			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3690			       ARRAY_SIZE(tcf.dst_ip));
3691		break;
3692	case VIRTCHNL_TCP_V6_FLOW:
3693		cfilter.n_proto = ETH_P_IPV6;
3694		if (mask.dst_ip[3] & tcf.dst_ip[3])
3695			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3696			       sizeof(cfilter.ip.v6.dst_ip6));
3697		if (mask.src_ip[3] & tcf.src_ip[3])
3698			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3699			       sizeof(cfilter.ip.v6.src_ip6));
3700		break;
3701	default:
3702		/* TC filter can be configured based on different combinations
3703		 * and in this case IP is not a part of filter config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3704		 */
3705		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3706			 vf->vf_id);
3707	}
3708
3709	/* get the vsi to which the tc belongs to */
3710	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3711	cfilter.seid = vsi->seid;
3712	cfilter.flags = vcf->field_flags;
3713
3714	/* Deleting TC filter */
3715	if (tcf.dst_port)
3716		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3717	else
3718		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3719	if (ret) {
3720		dev_err(&pf->pdev->dev,
3721			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3722			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3723			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3724		goto err;
3725	}
3726
3727	hlist_for_each_entry_safe(cf, node,
3728				  &vf->cloud_filter_list, cloud_node) {
3729		if (cf->seid != cfilter.seid)
3730			continue;
3731		if (mask.dst_port)
3732			if (cfilter.dst_port != cf->dst_port)
3733				continue;
3734		if (mask.dst_mac[0])
3735			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3736				continue;
3737		/* for ipv4 data to be valid, only first byte of mask is set */
3738		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3739			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3740				   ARRAY_SIZE(tcf.dst_ip)))
3741				continue;
3742		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3743		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3744			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3745				   sizeof(cfilter.ip.v6.src_ip6)))
3746				continue;
3747		if (mask.vlan_id)
3748			if (cfilter.vlan_id != cf->vlan_id)
3749				continue;
3750
3751		hlist_del(&cf->cloud_node);
3752		kfree(cf);
3753		vf->num_cloud_filters--;
3754	}
3755
3756err:
3757	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3758				       aq_ret);
3759}
3760
3761/**
3762 * i40e_vc_add_cloud_filter
3763 * @vf: pointer to the VF info
3764 * @msg: pointer to the msg buffer
3765 *
3766 * This function adds a cloud filter programmed as TC filter for ADq
3767 **/
3768static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3769{
3770	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3771	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3772	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3773	struct i40e_cloud_filter *cfilter = NULL;
3774	struct i40e_pf *pf = vf->pf;
3775	struct i40e_vsi *vsi = NULL;
3776	i40e_status aq_ret = 0;
3777	int i, ret;
3778
3779	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3780		aq_ret = I40E_ERR_PARAM;
3781		goto err_out;
3782	}
3783
3784	if (!vf->adq_enabled) {
3785		dev_info(&pf->pdev->dev,
3786			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3787			 vf->vf_id);
3788		aq_ret = I40E_ERR_PARAM;
3789		goto err_out;
3790	}
3791
3792	if (i40e_validate_cloud_filter(vf, vcf)) {
3793		dev_info(&pf->pdev->dev,
3794			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3795			 vf->vf_id);
3796		aq_ret = I40E_ERR_PARAM;
3797		goto err_out;
3798	}
3799
3800	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3801	if (!cfilter)
3802		return -ENOMEM;
3803
3804	/* parse destination mac address */
3805	for (i = 0; i < ETH_ALEN; i++)
3806		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3807
3808	/* parse source mac address */
3809	for (i = 0; i < ETH_ALEN; i++)
3810		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3811
3812	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3813	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3814	cfilter->src_port = mask.src_port & tcf.src_port;
3815
3816	switch (vcf->flow_type) {
3817	case VIRTCHNL_TCP_V4_FLOW:
3818		cfilter->n_proto = ETH_P_IP;
3819		if (mask.dst_ip[0] & tcf.dst_ip[0])
3820			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3821			       ARRAY_SIZE(tcf.dst_ip));
3822		else if (mask.src_ip[0] & tcf.dst_ip[0])
3823			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3824			       ARRAY_SIZE(tcf.dst_ip));
3825		break;
3826	case VIRTCHNL_TCP_V6_FLOW:
3827		cfilter->n_proto = ETH_P_IPV6;
3828		if (mask.dst_ip[3] & tcf.dst_ip[3])
3829			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3830			       sizeof(cfilter->ip.v6.dst_ip6));
3831		if (mask.src_ip[3] & tcf.src_ip[3])
3832			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3833			       sizeof(cfilter->ip.v6.src_ip6));
3834		break;
3835	default:
3836		/* TC filter can be configured based on different combinations
3837		 * and in this case IP is not a part of filter config
3838		 */
3839		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3840			 vf->vf_id);
3841	}
3842
3843	/* get the VSI to which the TC belongs to */
3844	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3845	cfilter->seid = vsi->seid;
3846	cfilter->flags = vcf->field_flags;
3847
3848	/* Adding cloud filter programmed as TC filter */
3849	if (tcf.dst_port)
3850		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3851	else
3852		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3853	if (ret) {
3854		dev_err(&pf->pdev->dev,
3855			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3856			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3857			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3858		goto err_free;
3859	}
3860
3861	INIT_HLIST_NODE(&cfilter->cloud_node);
3862	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3863	/* release the pointer passing it to the collection */
3864	cfilter = NULL;
3865	vf->num_cloud_filters++;
3866err_free:
3867	kfree(cfilter);
3868err_out:
3869	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3870				       aq_ret);
3871}
3872
3873/**
3874 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3875 * @vf: pointer to the VF info
3876 * @msg: pointer to the msg buffer
3877 **/
3878static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3879{
3880	struct virtchnl_tc_info *tci =
3881		(struct virtchnl_tc_info *)msg;
3882	struct i40e_pf *pf = vf->pf;
3883	struct i40e_link_status *ls = &pf->hw.phy.link_info;
3884	int i, adq_request_qps = 0;
3885	i40e_status aq_ret = 0;
3886	u64 speed = 0;
3887
3888	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3889		aq_ret = I40E_ERR_PARAM;
3890		goto err;
3891	}
3892
3893	/* ADq cannot be applied if spoof check is ON */
3894	if (vf->spoofchk) {
3895		dev_err(&pf->pdev->dev,
3896			"Spoof check is ON, turn it OFF to enable ADq\n");
3897		aq_ret = I40E_ERR_PARAM;
3898		goto err;
3899	}
3900
3901	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3902		dev_err(&pf->pdev->dev,
3903			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3904			vf->vf_id);
3905		aq_ret = I40E_ERR_PARAM;
3906		goto err;
3907	}
3908
3909	/* max number of traffic classes for VF currently capped at 4 */
3910	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3911		dev_err(&pf->pdev->dev,
3912			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3913			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3914		aq_ret = I40E_ERR_PARAM;
3915		goto err;
3916	}
3917
3918	/* validate queues for each TC */
3919	for (i = 0; i < tci->num_tc; i++)
3920		if (!tci->list[i].count ||
3921		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3922			dev_err(&pf->pdev->dev,
3923				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3924				vf->vf_id, i, tci->list[i].count,
3925				I40E_DEFAULT_QUEUES_PER_VF);
3926			aq_ret = I40E_ERR_PARAM;
3927			goto err;
3928		}
3929
3930	/* need Max VF queues but already have default number of queues */
3931	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3932
3933	if (pf->queues_left < adq_request_qps) {
3934		dev_err(&pf->pdev->dev,
3935			"No queues left to allocate to VF %d\n",
3936			vf->vf_id);
3937		aq_ret = I40E_ERR_PARAM;
3938		goto err;
3939	} else {
3940		/* we need to allocate max VF queues to enable ADq so as to
3941		 * make sure ADq enabled VF always gets back queues when it
3942		 * goes through a reset.
3943		 */
3944		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3945	}
3946
3947	/* get link speed in MB to validate rate limit */
3948	speed = i40e_vc_link_speed2mbps(ls->link_speed);
3949	if (speed == SPEED_UNKNOWN) {
3950		dev_err(&pf->pdev->dev,
3951			"Cannot detect link speed\n");
3952		aq_ret = I40E_ERR_PARAM;
3953		goto err;
3954	}
3955
3956	/* parse data from the queue channel info */
3957	vf->num_tc = tci->num_tc;
3958	for (i = 0; i < vf->num_tc; i++) {
3959		if (tci->list[i].max_tx_rate) {
3960			if (tci->list[i].max_tx_rate > speed) {
3961				dev_err(&pf->pdev->dev,
3962					"Invalid max tx rate %llu specified for VF %d.",
3963					tci->list[i].max_tx_rate,
3964					vf->vf_id);
3965				aq_ret = I40E_ERR_PARAM;
3966				goto err;
3967			} else {
3968				vf->ch[i].max_tx_rate =
3969					tci->list[i].max_tx_rate;
3970			}
 
 
3971		}
3972		vf->ch[i].num_qps = tci->list[i].count;
 
 
 
 
 
3973	}
3974
3975	/* set this flag only after making sure all inputs are sane */
3976	vf->adq_enabled = true;
3977
3978	/* reset the VF in order to allocate resources */
3979	i40e_vc_reset_vf(vf, true);
3980
3981	return I40E_SUCCESS;
3982
3983	/* send the response to the VF */
3984err:
3985	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3986				       aq_ret);
3987}
3988
3989/**
3990 * i40e_vc_del_qch_msg
3991 * @vf: pointer to the VF info
3992 * @msg: pointer to the msg buffer
3993 **/
3994static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3995{
3996	struct i40e_pf *pf = vf->pf;
3997	i40e_status aq_ret = 0;
3998
3999	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4000		aq_ret = I40E_ERR_PARAM;
4001		goto err;
4002	}
4003
4004	if (vf->adq_enabled) {
4005		i40e_del_all_cloud_filters(vf);
4006		i40e_del_qch(vf);
4007		vf->adq_enabled = false;
4008		vf->num_tc = 0;
4009		dev_info(&pf->pdev->dev,
4010			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4011			 vf->vf_id);
4012	} else {
4013		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4014			 vf->vf_id);
4015		aq_ret = I40E_ERR_PARAM;
4016	}
4017
4018	/* reset the VF in order to allocate resources */
4019	i40e_vc_reset_vf(vf, true);
4020
4021	return I40E_SUCCESS;
4022
4023err:
4024	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4025				       aq_ret);
4026}
4027
4028/**
4029 * i40e_vc_process_vf_msg
4030 * @pf: pointer to the PF structure
4031 * @vf_id: source VF id
4032 * @v_opcode: operation code
4033 * @v_retval: unused return value code
4034 * @msg: pointer to the msg buffer
4035 * @msglen: msg length
 
4036 *
4037 * called from the common aeq/arq handler to
4038 * process request from VF
4039 **/
4040int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4041			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
4042{
4043	struct i40e_hw *hw = &pf->hw;
4044	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4045	struct i40e_vf *vf;
4046	int ret;
4047
4048	pf->vf_aq_requests++;
4049	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4050		return -EINVAL;
4051	vf = &(pf->vf[local_vf_id]);
4052
4053	/* Check if VF is disabled. */
4054	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4055		return I40E_ERR_PARAM;
4056
4057	/* perform basic checks on the msg */
4058	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4059
4060	if (ret) {
4061		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
4062		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4063			local_vf_id, v_opcode, msglen);
4064		switch (ret) {
4065		case VIRTCHNL_STATUS_ERR_PARAM:
4066			return -EPERM;
4067		default:
4068			return -EINVAL;
4069		}
4070	}
4071
4072	switch (v_opcode) {
4073	case VIRTCHNL_OP_VERSION:
4074		ret = i40e_vc_get_version_msg(vf, msg);
4075		break;
4076	case VIRTCHNL_OP_GET_VF_RESOURCES:
4077		ret = i40e_vc_get_vf_resources_msg(vf, msg);
4078		i40e_vc_notify_vf_link_state(vf);
4079		break;
4080	case VIRTCHNL_OP_RESET_VF:
4081		i40e_vc_reset_vf(vf, false);
4082		ret = 0;
4083		break;
4084	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4085		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4086		break;
4087	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4088		ret = i40e_vc_config_queues_msg(vf, msg);
4089		break;
4090	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4091		ret = i40e_vc_config_irq_map_msg(vf, msg);
4092		break;
4093	case VIRTCHNL_OP_ENABLE_QUEUES:
4094		ret = i40e_vc_enable_queues_msg(vf, msg);
4095		i40e_vc_notify_vf_link_state(vf);
4096		break;
4097	case VIRTCHNL_OP_DISABLE_QUEUES:
4098		ret = i40e_vc_disable_queues_msg(vf, msg);
4099		break;
4100	case VIRTCHNL_OP_ADD_ETH_ADDR:
4101		ret = i40e_vc_add_mac_addr_msg(vf, msg);
4102		break;
4103	case VIRTCHNL_OP_DEL_ETH_ADDR:
4104		ret = i40e_vc_del_mac_addr_msg(vf, msg);
4105		break;
4106	case VIRTCHNL_OP_ADD_VLAN:
4107		ret = i40e_vc_add_vlan_msg(vf, msg);
4108		break;
4109	case VIRTCHNL_OP_DEL_VLAN:
4110		ret = i40e_vc_remove_vlan_msg(vf, msg);
4111		break;
4112	case VIRTCHNL_OP_GET_STATS:
4113		ret = i40e_vc_get_stats_msg(vf, msg);
4114		break;
4115	case VIRTCHNL_OP_IWARP:
4116		ret = i40e_vc_iwarp_msg(vf, msg, msglen);
4117		break;
4118	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
4119		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
4120		break;
4121	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
4122		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
4123		break;
4124	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4125		ret = i40e_vc_config_rss_key(vf, msg);
4126		break;
4127	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4128		ret = i40e_vc_config_rss_lut(vf, msg);
4129		break;
4130	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4131		ret = i40e_vc_get_rss_hena(vf, msg);
4132		break;
4133	case VIRTCHNL_OP_SET_RSS_HENA:
4134		ret = i40e_vc_set_rss_hena(vf, msg);
4135		break;
4136	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4137		ret = i40e_vc_enable_vlan_stripping(vf, msg);
4138		break;
4139	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4140		ret = i40e_vc_disable_vlan_stripping(vf, msg);
4141		break;
4142	case VIRTCHNL_OP_REQUEST_QUEUES:
4143		ret = i40e_vc_request_queues_msg(vf, msg);
4144		break;
4145	case VIRTCHNL_OP_ENABLE_CHANNELS:
4146		ret = i40e_vc_add_qch_msg(vf, msg);
4147		break;
4148	case VIRTCHNL_OP_DISABLE_CHANNELS:
4149		ret = i40e_vc_del_qch_msg(vf, msg);
4150		break;
4151	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4152		ret = i40e_vc_add_cloud_filter(vf, msg);
4153		break;
4154	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4155		ret = i40e_vc_del_cloud_filter(vf, msg);
4156		break;
4157	case VIRTCHNL_OP_UNKNOWN:
4158	default:
4159		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4160			v_opcode, local_vf_id);
4161		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4162					      I40E_ERR_NOT_IMPLEMENTED);
4163		break;
4164	}
4165
4166	return ret;
4167}
4168
4169/**
4170 * i40e_vc_process_vflr_event
4171 * @pf: pointer to the PF structure
4172 *
4173 * called from the vlfr irq handler to
4174 * free up VF resources and state variables
4175 **/
4176int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4177{
 
4178	struct i40e_hw *hw = &pf->hw;
4179	u32 reg, reg_idx, bit_idx;
4180	struct i40e_vf *vf;
4181	int vf_id;
4182
4183	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4184		return 0;
4185
4186	/* Re-enable the VFLR interrupt cause here, before looking for which
4187	 * VF got reset. Otherwise, if another VF gets a reset while the
4188	 * first one is being processed, that interrupt will be lost, and
4189	 * that VF will be stuck in reset forever.
4190	 */
4191	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4192	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4193	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4194	i40e_flush(hw);
4195
4196	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4197	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4198		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4199		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4200		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
4201		vf = &pf->vf[vf_id];
4202		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4203		if (reg & BIT(bit_idx))
4204			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4205			i40e_reset_vf(vf, true);
 
 
 
 
4206	}
4207
4208	return 0;
4209}
4210
4211/**
4212 * i40e_validate_vf
4213 * @pf: the physical function
4214 * @vf_id: VF identifier
4215 *
4216 * Check that the VF is enabled and the VSI exists.
4217 *
4218 * Returns 0 on success, negative on failure
4219 **/
4220static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4221{
4222	struct i40e_vsi *vsi;
4223	struct i40e_vf *vf;
4224	int ret = 0;
4225
4226	if (vf_id >= pf->num_alloc_vfs) {
4227		dev_err(&pf->pdev->dev,
4228			"Invalid VF Identifier %d\n", vf_id);
4229		ret = -EINVAL;
4230		goto err_out;
4231	}
4232	vf = &pf->vf[vf_id];
4233	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4234	if (!vsi)
4235		ret = -EINVAL;
4236err_out:
4237	return ret;
4238}
4239
4240/**
4241 * i40e_ndo_set_vf_mac
4242 * @netdev: network interface device structure
4243 * @vf_id: VF identifier
4244 * @mac: mac address
4245 *
4246 * program VF mac address
4247 **/
4248int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4249{
4250	struct i40e_netdev_priv *np = netdev_priv(netdev);
4251	struct i40e_vsi *vsi = np->vsi;
4252	struct i40e_pf *pf = vsi->back;
4253	struct i40e_mac_filter *f;
4254	struct i40e_vf *vf;
4255	int ret = 0;
4256	struct hlist_node *h;
4257	int bkt;
4258	u8 i;
4259
4260	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4261		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4262		return -EAGAIN;
4263	}
4264
4265	/* validate the request */
4266	ret = i40e_validate_vf(pf, vf_id);
4267	if (ret)
 
 
4268		goto error_param;
 
4269
4270	vf = &pf->vf[vf_id];
4271
4272	/* When the VF is resetting wait until it is done.
4273	 * It can take up to 200 milliseconds,
4274	 * but wait for up to 300 milliseconds to be safe.
4275	 * Acquire the VSI pointer only after the VF has been
4276	 * properly initialized.
4277	 */
4278	for (i = 0; i < 15; i++) {
4279		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4280			break;
4281		msleep(20);
4282	}
4283	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4284		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4285			vf_id);
4286		ret = -EAGAIN;
4287		goto error_param;
4288	}
4289	vsi = pf->vsi[vf->lan_vsi_idx];
4290
4291	if (is_multicast_ether_addr(mac)) {
4292		dev_err(&pf->pdev->dev,
4293			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4294		ret = -EINVAL;
4295		goto error_param;
4296	}
4297
4298	/* Lock once because below invoked function add/del_filter requires
4299	 * mac_filter_hash_lock to be held
4300	 */
4301	spin_lock_bh(&vsi->mac_filter_hash_lock);
4302
4303	/* delete the temporary mac address */
4304	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4305		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
 
 
4306
4307	/* Delete all the filters for this VSI - we're going to kill it
4308	 * anyway.
4309	 */
4310	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4311		__i40e_del_filter(vsi, f);
4312
4313	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4314
 
4315	/* program mac filter */
4316	if (i40e_sync_vsi_filters(vsi)) {
4317		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4318		ret = -EIO;
4319		goto error_param;
4320	}
4321	ether_addr_copy(vf->default_lan_addr.addr, mac);
4322
4323	if (is_zero_ether_addr(mac)) {
4324		vf->pf_set_mac = false;
4325		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4326	} else {
4327		vf->pf_set_mac = true;
4328		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4329			 mac, vf_id);
4330	}
4331
4332	/* Force the VF interface down so it has to bring up with new MAC
4333	 * address
4334	 */
4335	i40e_vc_reset_vf(vf, true);
4336	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4337
4338error_param:
4339	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4340	return ret;
4341}
4342
4343/**
4344 * i40e_ndo_set_vf_port_vlan
4345 * @netdev: network interface device structure
4346 * @vf_id: VF identifier
4347 * @vlan_id: mac address
4348 * @qos: priority setting
4349 * @vlan_proto: vlan protocol
4350 *
4351 * program VF vlan id and/or qos
4352 **/
4353int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4354			      u16 vlan_id, u8 qos, __be16 vlan_proto)
4355{
4356	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4357	struct i40e_netdev_priv *np = netdev_priv(netdev);
4358	bool allmulti = false, alluni = false;
4359	struct i40e_pf *pf = np->vsi->back;
 
4360	struct i40e_vsi *vsi;
4361	struct i40e_vf *vf;
4362	int ret = 0;
4363
4364	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4365		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4366		return -EAGAIN;
4367	}
4368
4369	/* validate the request */
4370	ret = i40e_validate_vf(pf, vf_id);
4371	if (ret)
 
4372		goto error_pvid;
 
4373
4374	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4375		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4376		ret = -EINVAL;
4377		goto error_pvid;
4378	}
4379
4380	if (vlan_proto != htons(ETH_P_8021Q)) {
4381		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4382		ret = -EPROTONOSUPPORT;
4383		goto error_pvid;
4384	}
4385
4386	vf = &pf->vf[vf_id];
4387	vsi = pf->vsi[vf->lan_vsi_idx];
4388	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4389		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4390			vf_id);
4391		ret = -EAGAIN;
4392		goto error_pvid;
4393	}
4394
4395	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4396		/* duplicate request, so just return success */
4397		goto error_pvid;
4398
4399	i40e_vlan_stripping_enable(vsi);
4400	i40e_vc_reset_vf(vf, true);
4401	/* During reset the VF got a new VSI, so refresh a pointer. */
4402	vsi = pf->vsi[vf->lan_vsi_idx];
4403	/* Locked once because multiple functions below iterate list */
4404	spin_lock_bh(&vsi->mac_filter_hash_lock);
 
 
 
 
 
 
 
 
 
 
4405
4406	/* Check for condition where there was already a port VLAN ID
4407	 * filter set and now it is being deleted by setting it to zero.
4408	 * Additionally check for the condition where there was a port
4409	 * VLAN but now there is a new and different port VLAN being set.
4410	 * Before deleting all the old VLAN filters we must add new ones
4411	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4412	 * MAC addresses deleted.
4413	 */
4414	if ((!(vlan_id || qos) ||
4415	     vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4416	    vsi->info.pvid) {
4417		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
 
 
 
 
 
4418		if (ret) {
4419			dev_info(&vsi->back->pdev->dev,
4420				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4421				 vsi->back->hw.aq.asq_last_status);
4422			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4423			goto error_pvid;
4424		}
4425	}
4426
4427	if (vsi->info.pvid) {
4428		/* remove all filters on the old VLAN */
4429		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4430					   VLAN_VID_MASK));
4431	}
4432
4433	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4434
4435	/* disable promisc modes in case they were enabled */
4436	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4437					      allmulti, alluni);
4438	if (ret) {
4439		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4440		goto error_pvid;
4441	}
4442
4443	if (vlan_id || qos)
4444		ret = i40e_vsi_add_pvid(vsi, vlanprio);
4445	else
4446		i40e_vsi_remove_pvid(vsi);
4447	spin_lock_bh(&vsi->mac_filter_hash_lock);
4448
4449	if (vlan_id) {
4450		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4451			 vlan_id, qos, vf_id);
4452
4453		/* add new VLAN filter for each MAC */
4454		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4455		if (ret) {
4456			dev_info(&vsi->back->pdev->dev,
4457				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4458				 vsi->back->hw.aq.asq_last_status);
4459			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4460			goto error_pvid;
4461		}
4462
4463		/* remove the previously added non-VLAN MAC filters */
4464		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
 
4465	}
4466
4467	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4468
4469	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4470		alluni = true;
4471
4472	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4473		allmulti = true;
4474
4475	/* Schedule the worker thread to take care of applying changes */
4476	i40e_service_event_schedule(vsi->back);
4477
4478	if (ret) {
4479		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4480		goto error_pvid;
4481	}
4482
4483	/* The Port VLAN needs to be saved across resets the same as the
4484	 * default LAN MAC address.
4485	 */
4486	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4487
4488	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4489	if (ret) {
4490		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4491		goto error_pvid;
4492	}
4493
4494	ret = 0;
4495
4496error_pvid:
4497	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4498	return ret;
4499}
4500
 
 
4501/**
4502 * i40e_ndo_set_vf_bw
4503 * @netdev: network interface device structure
4504 * @vf_id: VF identifier
4505 * @min_tx_rate: Minimum Tx rate
4506 * @max_tx_rate: Maximum Tx rate
4507 *
4508 * configure VF Tx rate
4509 **/
4510int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4511		       int max_tx_rate)
4512{
4513	struct i40e_netdev_priv *np = netdev_priv(netdev);
4514	struct i40e_pf *pf = np->vsi->back;
4515	struct i40e_vsi *vsi;
4516	struct i40e_vf *vf;
 
4517	int ret = 0;
4518
4519	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4520		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4521		return -EAGAIN;
4522	}
4523
4524	/* validate the request */
4525	ret = i40e_validate_vf(pf, vf_id);
4526	if (ret)
 
4527		goto error;
 
4528
4529	if (min_tx_rate) {
4530		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4531			min_tx_rate, vf_id);
4532		ret = -EINVAL;
4533		goto error;
4534	}
4535
4536	vf = &pf->vf[vf_id];
4537	vsi = pf->vsi[vf->lan_vsi_idx];
4538	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4539		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4540			vf_id);
4541		ret = -EAGAIN;
4542		goto error;
4543	}
4544
4545	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4546	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4547		goto error;
 
 
 
 
 
 
4548
 
 
 
 
 
 
 
 
 
 
4549	vf->tx_rate = max_tx_rate;
4550error:
4551	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4552	return ret;
4553}
4554
4555/**
4556 * i40e_ndo_get_vf_config
4557 * @netdev: network interface device structure
4558 * @vf_id: VF identifier
4559 * @ivi: VF configuration structure
4560 *
4561 * return VF configuration
4562 **/
4563int i40e_ndo_get_vf_config(struct net_device *netdev,
4564			   int vf_id, struct ifla_vf_info *ivi)
4565{
4566	struct i40e_netdev_priv *np = netdev_priv(netdev);
4567	struct i40e_vsi *vsi = np->vsi;
4568	struct i40e_pf *pf = vsi->back;
4569	struct i40e_vf *vf;
4570	int ret = 0;
4571
4572	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4573		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4574		return -EAGAIN;
4575	}
4576
4577	/* validate the request */
4578	ret = i40e_validate_vf(pf, vf_id);
4579	if (ret)
 
4580		goto error_param;
 
4581
4582	vf = &pf->vf[vf_id];
4583	/* first vsi is always the LAN vsi */
4584	vsi = pf->vsi[vf->lan_vsi_idx];
4585	if (!vsi) {
4586		ret = -ENOENT;
 
 
4587		goto error_param;
4588	}
4589
4590	ivi->vf = vf_id;
4591
4592	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4593
4594	ivi->max_tx_rate = vf->tx_rate;
4595	ivi->min_tx_rate = 0;
4596	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4597	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4598		   I40E_VLAN_PRIORITY_SHIFT;
4599	if (vf->link_forced == false)
4600		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4601	else if (vf->link_up == true)
4602		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4603	else
4604		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4605	ivi->spoofchk = vf->spoofchk;
4606	ivi->trusted = vf->trusted;
4607	ret = 0;
4608
4609error_param:
4610	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4611	return ret;
4612}
4613
4614/**
4615 * i40e_ndo_set_vf_link_state
4616 * @netdev: network interface device structure
4617 * @vf_id: VF identifier
4618 * @link: required link state
4619 *
4620 * Set the link state of a specified VF, regardless of physical link state
4621 **/
4622int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4623{
4624	struct i40e_netdev_priv *np = netdev_priv(netdev);
4625	struct i40e_pf *pf = np->vsi->back;
4626	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4627	struct virtchnl_pf_event pfe;
4628	struct i40e_hw *hw = &pf->hw;
4629	struct i40e_vf *vf;
4630	int abs_vf_id;
4631	int ret = 0;
4632
4633	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4634		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4635		return -EAGAIN;
4636	}
4637
4638	/* validate the request */
4639	if (vf_id >= pf->num_alloc_vfs) {
4640		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4641		ret = -EINVAL;
4642		goto error_out;
4643	}
4644
4645	vf = &pf->vf[vf_id];
4646	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4647
4648	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4649	pfe.severity = PF_EVENT_SEVERITY_INFO;
4650
4651	switch (link) {
4652	case IFLA_VF_LINK_STATE_AUTO:
4653		vf->link_forced = false;
4654		i40e_set_vf_link_state(vf, &pfe, ls);
 
 
 
4655		break;
4656	case IFLA_VF_LINK_STATE_ENABLE:
4657		vf->link_forced = true;
4658		vf->link_up = true;
4659		i40e_set_vf_link_state(vf, &pfe, ls);
 
4660		break;
4661	case IFLA_VF_LINK_STATE_DISABLE:
4662		vf->link_forced = true;
4663		vf->link_up = false;
4664		i40e_set_vf_link_state(vf, &pfe, ls);
 
4665		break;
4666	default:
4667		ret = -EINVAL;
4668		goto error_out;
4669	}
4670	/* Notify the VF of its new link state */
4671	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4672			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4673
4674error_out:
4675	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4676	return ret;
4677}
4678
4679/**
4680 * i40e_ndo_set_vf_spoofchk
4681 * @netdev: network interface device structure
4682 * @vf_id: VF identifier
4683 * @enable: flag to enable or disable feature
4684 *
4685 * Enable or disable VF spoof checking
4686 **/
4687int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4688{
4689	struct i40e_netdev_priv *np = netdev_priv(netdev);
4690	struct i40e_vsi *vsi = np->vsi;
4691	struct i40e_pf *pf = vsi->back;
4692	struct i40e_vsi_context ctxt;
4693	struct i40e_hw *hw = &pf->hw;
4694	struct i40e_vf *vf;
4695	int ret = 0;
4696
4697	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4698		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4699		return -EAGAIN;
4700	}
4701
4702	/* validate the request */
4703	if (vf_id >= pf->num_alloc_vfs) {
4704		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4705		ret = -EINVAL;
4706		goto out;
4707	}
4708
4709	vf = &(pf->vf[vf_id]);
4710	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4711		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4712			vf_id);
4713		ret = -EAGAIN;
4714		goto out;
4715	}
4716
4717	if (enable == vf->spoofchk)
4718		goto out;
4719
4720	vf->spoofchk = enable;
4721	memset(&ctxt, 0, sizeof(ctxt));
4722	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4723	ctxt.pf_num = pf->hw.pf_id;
4724	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4725	if (enable)
4726		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4727					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4728	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4729	if (ret) {
4730		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4731			ret);
4732		ret = -EIO;
4733	}
4734out:
4735	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4736	return ret;
4737}
4738
4739/**
4740 * i40e_ndo_set_vf_trust
4741 * @netdev: network interface device structure of the pf
4742 * @vf_id: VF identifier
4743 * @setting: trust setting
4744 *
4745 * Enable or disable VF trust setting
4746 **/
4747int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4748{
4749	struct i40e_netdev_priv *np = netdev_priv(netdev);
4750	struct i40e_pf *pf = np->vsi->back;
4751	struct i40e_vf *vf;
4752	int ret = 0;
4753
4754	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4755		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4756		return -EAGAIN;
4757	}
4758
4759	/* validate the request */
4760	if (vf_id >= pf->num_alloc_vfs) {
4761		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4762		ret = -EINVAL;
4763		goto out;
4764	}
4765
4766	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4767		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4768		ret = -EINVAL;
4769		goto out;
4770	}
4771
4772	vf = &pf->vf[vf_id];
4773
4774	if (setting == vf->trusted)
4775		goto out;
4776
4777	vf->trusted = setting;
4778
4779	/* request PF to sync mac/vlan filters for the VF */
4780	set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4781	pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4782
4783	i40e_vc_reset_vf(vf, true);
4784	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4785		 vf_id, setting ? "" : "un");
4786
4787	if (vf->adq_enabled) {
4788		if (!vf->trusted) {
4789			dev_info(&pf->pdev->dev,
4790				 "VF %u no longer Trusted, deleting all cloud filters\n",
4791				 vf_id);
4792			i40e_del_all_cloud_filters(vf);
4793		}
4794	}
4795
4796out:
4797	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4798	return ret;
4799}
4800
4801/**
4802 * i40e_get_vf_stats - populate some stats for the VF
4803 * @netdev: the netdev of the PF
4804 * @vf_id: the host OS identifier (0-127)
4805 * @vf_stats: pointer to the OS memory to be initialized
4806 */
4807int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4808		      struct ifla_vf_stats *vf_stats)
4809{
4810	struct i40e_netdev_priv *np = netdev_priv(netdev);
4811	struct i40e_pf *pf = np->vsi->back;
4812	struct i40e_eth_stats *stats;
4813	struct i40e_vsi *vsi;
4814	struct i40e_vf *vf;
4815
4816	/* validate the request */
4817	if (i40e_validate_vf(pf, vf_id))
4818		return -EINVAL;
4819
4820	vf = &pf->vf[vf_id];
4821	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4822		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4823		return -EBUSY;
4824	}
4825
4826	vsi = pf->vsi[vf->lan_vsi_idx];
4827	if (!vsi)
4828		return -EINVAL;
4829
4830	i40e_update_eth_stats(vsi);
4831	stats = &vsi->eth_stats;
4832
4833	memset(vf_stats, 0, sizeof(*vf_stats));
4834
4835	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4836		stats->rx_multicast;
4837	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4838		stats->tx_multicast;
4839	vf_stats->rx_bytes   = stats->rx_bytes;
4840	vf_stats->tx_bytes   = stats->tx_bytes;
4841	vf_stats->broadcast  = stats->rx_broadcast;
4842	vf_stats->multicast  = stats->rx_multicast;
4843	vf_stats->rx_dropped = stats->rx_discards;
4844	vf_stats->tx_dropped = stats->tx_discards;
4845
4846	return 0;
4847}