Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3
   4#include "i40e.h"
   5#include "i40e_lan_hmc.h"
   6#include "i40e_virtchnl_pf.h"
   7
   8/*********************notification routines***********************/
   9
  10/**
  11 * i40e_vc_vf_broadcast
  12 * @pf: pointer to the PF structure
  13 * @v_opcode: operation code
  14 * @v_retval: return value
  15 * @msg: pointer to the msg buffer
  16 * @msglen: msg length
  17 *
  18 * send a message to all VFs on a given PF
  19 **/
  20static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  21				 enum virtchnl_ops v_opcode,
  22				 int v_retval, u8 *msg,
  23				 u16 msglen)
  24{
  25	struct i40e_hw *hw = &pf->hw;
  26	struct i40e_vf *vf = pf->vf;
  27	int i;
  28
  29	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  30		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  31		/* Not all vfs are enabled so skip the ones that are not */
  32		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
  33		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
  34			continue;
  35
  36		/* Ignore return value on purpose - a given VF may fail, but
  37		 * we need to keep going and send to all of them
  38		 */
  39		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  40				       msg, msglen, NULL);
  41	}
  42}
  43
  44/**
  45 * i40e_vc_link_speed2mbps
  46 * converts i40e_aq_link_speed to integer value of Mbps
  47 * @link_speed: the speed to convert
  48 *
  49 * return the speed as direct value of Mbps.
  50 **/
  51static u32
  52i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
  53{
  54	switch (link_speed) {
  55	case I40E_LINK_SPEED_100MB:
  56		return SPEED_100;
  57	case I40E_LINK_SPEED_1GB:
  58		return SPEED_1000;
  59	case I40E_LINK_SPEED_2_5GB:
  60		return SPEED_2500;
  61	case I40E_LINK_SPEED_5GB:
  62		return SPEED_5000;
  63	case I40E_LINK_SPEED_10GB:
  64		return SPEED_10000;
  65	case I40E_LINK_SPEED_20GB:
  66		return SPEED_20000;
  67	case I40E_LINK_SPEED_25GB:
  68		return SPEED_25000;
  69	case I40E_LINK_SPEED_40GB:
  70		return SPEED_40000;
  71	case I40E_LINK_SPEED_UNKNOWN:
  72		return SPEED_UNKNOWN;
  73	}
  74	return SPEED_UNKNOWN;
  75}
  76
  77/**
  78 * i40e_set_vf_link_state
  79 * @vf: pointer to the VF structure
  80 * @pfe: pointer to PF event structure
  81 * @ls: pointer to link status structure
  82 *
  83 * set a link state on a single vf
  84 **/
  85static void i40e_set_vf_link_state(struct i40e_vf *vf,
  86				   struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
  87{
  88	u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
  89
  90	if (vf->link_forced)
  91		link_status = vf->link_up;
  92
  93	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
  94		pfe->event_data.link_event_adv.link_speed = link_status ?
  95			i40e_vc_link_speed2mbps(ls->link_speed) : 0;
  96		pfe->event_data.link_event_adv.link_status = link_status;
  97	} else {
  98		pfe->event_data.link_event.link_speed = link_status ?
  99			i40e_virtchnl_link_speed(ls->link_speed) : 0;
 100		pfe->event_data.link_event.link_status = link_status;
 101	}
 102}
 103
 104/**
 105 * i40e_vc_notify_vf_link_state
 106 * @vf: pointer to the VF structure
 107 *
 108 * send a link status message to a single VF
 109 **/
 110static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
 111{
 112	struct virtchnl_pf_event pfe;
 113	struct i40e_pf *pf = vf->pf;
 114	struct i40e_hw *hw = &pf->hw;
 115	struct i40e_link_status *ls = &pf->hw.phy.link_info;
 116	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
 117
 118	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 119	pfe.severity = PF_EVENT_SEVERITY_INFO;
 120
 121	i40e_set_vf_link_state(vf, &pfe, ls);
 122
 
 
 
 
 
 
 
 123	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 124			       0, (u8 *)&pfe, sizeof(pfe), NULL);
 125}
 126
 127/**
 128 * i40e_vc_notify_link_state
 129 * @pf: pointer to the PF structure
 130 *
 131 * send a link status message to all VFs on a given PF
 132 **/
 133void i40e_vc_notify_link_state(struct i40e_pf *pf)
 134{
 135	int i;
 136
 137	for (i = 0; i < pf->num_alloc_vfs; i++)
 138		i40e_vc_notify_vf_link_state(&pf->vf[i]);
 139}
 140
 141/**
 142 * i40e_vc_notify_reset
 143 * @pf: pointer to the PF structure
 144 *
 145 * indicate a pending reset to all VFs on a given PF
 146 **/
 147void i40e_vc_notify_reset(struct i40e_pf *pf)
 148{
 149	struct virtchnl_pf_event pfe;
 150
 151	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 152	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 153	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
 154			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
 155}
 156
 157#ifdef CONFIG_PCI_IOV
 158void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
 159{
 160	u16 vf_id;
 161	u16 pos;
 162
 163	/* Continue only if this is a PF */
 164	if (!pdev->is_physfn)
 165		return;
 166
 167	if (!pci_num_vf(pdev))
 168		return;
 169
 170	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
 171	if (pos) {
 172		struct pci_dev *vf_dev = NULL;
 173
 174		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
 175		while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
 176			if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
 177				pci_restore_msi_state(vf_dev);
 178		}
 179	}
 180}
 181#endif /* CONFIG_PCI_IOV */
 182
 183/**
 184 * i40e_vc_notify_vf_reset
 185 * @vf: pointer to the VF structure
 186 *
 187 * indicate a pending reset to the given VF
 188 **/
 189void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 190{
 191	struct virtchnl_pf_event pfe;
 192	int abs_vf_id;
 193
 194	/* validate the request */
 195	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 196		return;
 197
 198	/* verify if the VF is in either init or active before proceeding */
 199	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
 200	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
 201		return;
 202
 203	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 204
 205	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 206	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 207	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 208			       0, (u8 *)&pfe,
 209			       sizeof(struct virtchnl_pf_event), NULL);
 210}
 211/***********************misc routines*****************************/
 212
 213/**
 214 * i40e_vc_reset_vf
 215 * @vf: pointer to the VF info
 216 * @notify_vf: notify vf about reset or not
 217 * Reset VF handler.
 218 **/
 219static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
 220{
 221	struct i40e_pf *pf = vf->pf;
 222	int i;
 223
 224	if (notify_vf)
 225		i40e_vc_notify_vf_reset(vf);
 226
 227	/* We want to ensure that an actual reset occurs initiated after this
 228	 * function was called. However, we do not want to wait forever, so
 229	 * we'll give a reasonable time and print a message if we failed to
 230	 * ensure a reset.
 231	 */
 232	for (i = 0; i < 20; i++) {
 233		/* If PF is in VFs releasing state reset VF is impossible,
 234		 * so leave it.
 235		 */
 236		if (test_bit(__I40E_VFS_RELEASING, pf->state))
 237			return;
 238		if (i40e_reset_vf(vf, false))
 239			return;
 240		usleep_range(10000, 20000);
 241	}
 242
 243	if (notify_vf)
 244		dev_warn(&vf->pf->pdev->dev,
 245			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
 246			 vf->vf_id);
 247	else
 248		dev_dbg(&vf->pf->pdev->dev,
 249			"Failed to initiate reset for VF %d after 200 milliseconds\n",
 250			vf->vf_id);
 251}
 252
 253/**
 254 * i40e_vc_isvalid_vsi_id
 255 * @vf: pointer to the VF info
 256 * @vsi_id: VF relative VSI id
 257 *
 258 * check for the valid VSI id
 259 **/
 260static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 261{
 262	struct i40e_pf *pf = vf->pf;
 263	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 264
 265	return (vsi && (vsi->vf_id == vf->vf_id));
 266}
 267
 268/**
 269 * i40e_vc_isvalid_queue_id
 270 * @vf: pointer to the VF info
 271 * @vsi_id: vsi id
 272 * @qid: vsi relative queue id
 273 *
 274 * check for the valid queue id
 275 **/
 276static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 277					    u16 qid)
 278{
 279	struct i40e_pf *pf = vf->pf;
 280	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 281
 282	return (vsi && (qid < vsi->alloc_queue_pairs));
 283}
 284
 285/**
 286 * i40e_vc_isvalid_vector_id
 287 * @vf: pointer to the VF info
 288 * @vector_id: VF relative vector id
 289 *
 290 * check for the valid vector id
 291 **/
 292static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
 293{
 294	struct i40e_pf *pf = vf->pf;
 295
 296	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 297}
 298
 299/***********************vf resource mgmt routines*****************/
 300
 301/**
 302 * i40e_vc_get_pf_queue_id
 303 * @vf: pointer to the VF info
 304 * @vsi_id: id of VSI as provided by the FW
 305 * @vsi_queue_id: vsi relative queue id
 306 *
 307 * return PF relative queue id
 308 **/
 309static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 310				   u8 vsi_queue_id)
 311{
 312	struct i40e_pf *pf = vf->pf;
 313	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 314	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 315
 316	if (!vsi)
 317		return pf_queue_id;
 318
 319	if (le16_to_cpu(vsi->info.mapping_flags) &
 320	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 321		pf_queue_id =
 322			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 323	else
 324		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 325			      vsi_queue_id;
 326
 327	return pf_queue_id;
 328}
 329
 330/**
 331 * i40e_get_real_pf_qid
 332 * @vf: pointer to the VF info
 333 * @vsi_id: vsi id
 334 * @queue_id: queue number
 335 *
 336 * wrapper function to get pf_queue_id handling ADq code as well
 337 **/
 338static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
 339{
 340	int i;
 341
 342	if (vf->adq_enabled) {
 343		/* Although VF considers all the queues(can be 1 to 16) as its
 344		 * own but they may actually belong to different VSIs(up to 4).
 345		 * We need to find which queues belongs to which VSI.
 346		 */
 347		for (i = 0; i < vf->num_tc; i++) {
 348			if (queue_id < vf->ch[i].num_qps) {
 349				vsi_id = vf->ch[i].vsi_id;
 350				break;
 351			}
 352			/* find right queue id which is relative to a
 353			 * given VSI.
 354			 */
 355			queue_id -= vf->ch[i].num_qps;
 356			}
 357		}
 358
 359	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
 360}
 361
 362/**
 363 * i40e_config_irq_link_list
 364 * @vf: pointer to the VF info
 365 * @vsi_id: id of VSI as given by the FW
 366 * @vecmap: irq map info
 367 *
 368 * configure irq link list from the map
 369 **/
 370static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 371				      struct virtchnl_vector_map *vecmap)
 372{
 373	unsigned long linklistmap = 0, tempmap;
 374	struct i40e_pf *pf = vf->pf;
 375	struct i40e_hw *hw = &pf->hw;
 376	u16 vsi_queue_id, pf_queue_id;
 377	enum i40e_queue_type qtype;
 378	u16 next_q, vector_id, size;
 379	u32 reg, reg_idx;
 380	u16 itr_idx = 0;
 381
 382	vector_id = vecmap->vector_id;
 383	/* setup the head */
 384	if (0 == vector_id)
 385		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 386	else
 387		reg_idx = I40E_VPINT_LNKLSTN(
 388		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 389		     (vector_id - 1));
 390
 391	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 392		/* Special case - No queues mapped on this vector */
 393		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 394		goto irq_list_done;
 395	}
 396	tempmap = vecmap->rxq_map;
 397	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 398		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 399				    vsi_queue_id));
 400	}
 401
 402	tempmap = vecmap->txq_map;
 403	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 404		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 405				     vsi_queue_id + 1));
 406	}
 407
 408	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
 409	next_q = find_first_bit(&linklistmap, size);
 410	if (unlikely(next_q == size))
 411		goto irq_list_done;
 412
 413	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 414	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 415	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
 416	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 417
 418	wr32(hw, reg_idx, reg);
 419
 420	while (next_q < size) {
 421		switch (qtype) {
 422		case I40E_QUEUE_TYPE_RX:
 423			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 424			itr_idx = vecmap->rxitr_idx;
 425			break;
 426		case I40E_QUEUE_TYPE_TX:
 427			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 428			itr_idx = vecmap->txitr_idx;
 429			break;
 430		default:
 431			break;
 432		}
 433
 434		next_q = find_next_bit(&linklistmap, size, next_q + 1);
 435		if (next_q < size) {
 436			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 437			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 438			pf_queue_id = i40e_get_real_pf_qid(vf,
 439							   vsi_id,
 440							   vsi_queue_id);
 441		} else {
 442			pf_queue_id = I40E_QUEUE_END_OF_LIST;
 443			qtype = 0;
 444		}
 445
 446		/* format for the RQCTL & TQCTL regs is same */
 447		reg = (vector_id) |
 448		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 449		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 450		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 451		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 452		wr32(hw, reg_idx, reg);
 453	}
 454
 455	/* if the vf is running in polling mode and using interrupt zero,
 456	 * need to disable auto-mask on enabling zero interrupt for VFs.
 457	 */
 458	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
 459	    (vector_id == 0)) {
 460		reg = rd32(hw, I40E_GLINT_CTL);
 461		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
 462			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
 463			wr32(hw, I40E_GLINT_CTL, reg);
 464		}
 465	}
 466
 467irq_list_done:
 468	i40e_flush(hw);
 469}
 470
 471/**
 472 * i40e_release_rdma_qvlist
 473 * @vf: pointer to the VF.
 474 *
 475 **/
 476static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
 477{
 478	struct i40e_pf *pf = vf->pf;
 479	struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
 480	u32 msix_vf;
 481	u32 i;
 482
 483	if (!vf->qvlist_info)
 484		return;
 485
 486	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 487	for (i = 0; i < qvlist_info->num_vectors; i++) {
 488		struct virtchnl_rdma_qv_info *qv_info;
 489		u32 next_q_index, next_q_type;
 490		struct i40e_hw *hw = &pf->hw;
 491		u32 v_idx, reg_idx, reg;
 492
 493		qv_info = &qvlist_info->qv_info[i];
 
 
 494		v_idx = qv_info->v_idx;
 495		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 496			/* Figure out the queue after CEQ and make that the
 497			 * first queue.
 498			 */
 499			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 500			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
 501			next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK,
 502						 reg);
 503			next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK,
 504						reg);
 505
 506			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 507			reg = (next_q_index &
 508			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 509			       (next_q_type <<
 510			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 511
 512			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 513		}
 514	}
 515	kfree(vf->qvlist_info);
 516	vf->qvlist_info = NULL;
 517}
 518
 519/**
 520 * i40e_config_rdma_qvlist
 521 * @vf: pointer to the VF info
 522 * @qvlist_info: queue and vector list
 523 *
 524 * Return 0 on success or < 0 on error
 525 **/
 526static int
 527i40e_config_rdma_qvlist(struct i40e_vf *vf,
 528			struct virtchnl_rdma_qvlist_info *qvlist_info)
 529{
 530	struct i40e_pf *pf = vf->pf;
 531	struct i40e_hw *hw = &pf->hw;
 532	struct virtchnl_rdma_qv_info *qv_info;
 533	u32 v_idx, i, reg_idx, reg;
 534	u32 next_q_idx, next_q_type;
 535	size_t size;
 536	u32 msix_vf;
 537	int ret = 0;
 538
 539	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 540
 541	if (qvlist_info->num_vectors > msix_vf) {
 542		dev_warn(&pf->pdev->dev,
 543			 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
 544			 qvlist_info->num_vectors,
 545			 msix_vf);
 546		ret = -EINVAL;
 547		goto err_out;
 548	}
 549
 550	kfree(vf->qvlist_info);
 551	size = virtchnl_struct_size(vf->qvlist_info, qv_info,
 552				    qvlist_info->num_vectors);
 553	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
 554	if (!vf->qvlist_info) {
 555		ret = -ENOMEM;
 556		goto err_out;
 557	}
 558	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 559
 560	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 561	for (i = 0; i < qvlist_info->num_vectors; i++) {
 562		qv_info = &qvlist_info->qv_info[i];
 
 
 
 563
 564		/* Validate vector id belongs to this vf */
 565		if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
 566			ret = -EINVAL;
 567			goto err_free;
 568		}
 569
 570		v_idx = qv_info->v_idx;
 571
 572		vf->qvlist_info->qv_info[i] = *qv_info;
 573
 574		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 575		/* We might be sharing the interrupt, so get the first queue
 576		 * index and type, push it down the list by adding the new
 577		 * queue on top. Also link it with the new queue in CEQCTL.
 578		 */
 579		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
 580		next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK,
 581				       reg);
 582		next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK,
 583					reg);
 584
 585		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 586			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 587			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
 588			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
 589			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
 590			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
 591			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
 592			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
 593
 594			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 595			reg = (qv_info->ceq_idx &
 596			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 597			       (I40E_QUEUE_TYPE_PE_CEQ <<
 598			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 599			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 600		}
 601
 602		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
 603			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
 604			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
 605			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
 606
 607			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
 608		}
 609	}
 610
 611	return 0;
 612err_free:
 613	kfree(vf->qvlist_info);
 614	vf->qvlist_info = NULL;
 615err_out:
 616	return ret;
 617}
 618
 619/**
 620 * i40e_config_vsi_tx_queue
 621 * @vf: pointer to the VF info
 622 * @vsi_id: id of VSI as provided by the FW
 623 * @vsi_queue_id: vsi relative queue index
 624 * @info: config. info
 625 *
 626 * configure tx queue
 627 **/
 628static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 629				    u16 vsi_queue_id,
 630				    struct virtchnl_txq_info *info)
 631{
 632	struct i40e_pf *pf = vf->pf;
 633	struct i40e_hw *hw = &pf->hw;
 634	struct i40e_hmc_obj_txq tx_ctx;
 635	struct i40e_vsi *vsi;
 636	u16 pf_queue_id;
 637	u32 qtx_ctl;
 638	int ret = 0;
 639
 640	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
 641		ret = -ENOENT;
 642		goto error_context;
 643	}
 644	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 645	vsi = i40e_find_vsi_from_id(pf, vsi_id);
 646	if (!vsi) {
 647		ret = -ENOENT;
 648		goto error_context;
 649	}
 650
 651	/* clear the context structure first */
 652	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 653
 654	/* only set the required fields */
 655	tx_ctx.base = info->dma_ring_addr / 128;
 656	tx_ctx.qlen = info->ring_len;
 657	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 658	tx_ctx.rdylist_act = 0;
 659	tx_ctx.head_wb_ena = info->headwb_enabled;
 660	tx_ctx.head_wb_addr = info->dma_headwb_addr;
 661
 662	/* clear the context in the HMC */
 663	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 664	if (ret) {
 665		dev_err(&pf->pdev->dev,
 666			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
 667			pf_queue_id, ret);
 668		ret = -ENOENT;
 669		goto error_context;
 670	}
 671
 672	/* set the context in the HMC */
 673	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 674	if (ret) {
 675		dev_err(&pf->pdev->dev,
 676			"Failed to set VF LAN Tx queue context %d error: %d\n",
 677			pf_queue_id, ret);
 678		ret = -ENOENT;
 679		goto error_context;
 680	}
 681
 682	/* associate this queue with the PCI VF function */
 683	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 684	qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
 685	qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
 686			      vf->vf_id + hw->func_caps.vf_base_id);
 
 
 687	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 688	i40e_flush(hw);
 689
 690error_context:
 691	return ret;
 692}
 693
 694/**
 695 * i40e_config_vsi_rx_queue
 696 * @vf: pointer to the VF info
 697 * @vsi_id: id of VSI  as provided by the FW
 698 * @vsi_queue_id: vsi relative queue index
 699 * @info: config. info
 700 *
 701 * configure rx queue
 702 **/
 703static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 704				    u16 vsi_queue_id,
 705				    struct virtchnl_rxq_info *info)
 706{
 707	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 708	struct i40e_pf *pf = vf->pf;
 709	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
 710	struct i40e_hw *hw = &pf->hw;
 711	struct i40e_hmc_obj_rxq rx_ctx;
 
 712	int ret = 0;
 713
 
 
 714	/* clear the context structure first */
 715	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 716
 717	/* only set the required fields */
 718	rx_ctx.base = info->dma_ring_addr / 128;
 719	rx_ctx.qlen = info->ring_len;
 720
 721	if (info->splithdr_enabled) {
 722		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 723				  I40E_RX_SPLIT_IP      |
 724				  I40E_RX_SPLIT_TCP_UDP |
 725				  I40E_RX_SPLIT_SCTP;
 726		/* header length validation */
 727		if (info->hdr_size > ((2 * 1024) - 64)) {
 728			ret = -EINVAL;
 729			goto error_param;
 730		}
 731		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 732
 733		/* set split mode 10b */
 734		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 735	}
 736
 737	/* databuffer length validation */
 738	if (info->databuffer_size > ((16 * 1024) - 128)) {
 739		ret = -EINVAL;
 740		goto error_param;
 741	}
 742	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 743
 744	/* max pkt. length validation */
 745	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 746		ret = -EINVAL;
 747		goto error_param;
 748	}
 749	rx_ctx.rxmax = info->max_pkt_size;
 750
 751	/* if port VLAN is configured increase the max packet size */
 752	if (vsi->info.pvid)
 753		rx_ctx.rxmax += VLAN_HLEN;
 754
 755	/* enable 32bytes desc always */
 756	rx_ctx.dsize = 1;
 757
 758	/* default values */
 759	rx_ctx.lrxqthresh = 1;
 760	rx_ctx.crcstrip = 1;
 761	rx_ctx.prefena = 1;
 762	rx_ctx.l2tsel = 1;
 763
 764	/* clear the context in the HMC */
 765	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 766	if (ret) {
 767		dev_err(&pf->pdev->dev,
 768			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
 769			pf_queue_id, ret);
 770		ret = -ENOENT;
 771		goto error_param;
 772	}
 773
 774	/* set the context in the HMC */
 775	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 776	if (ret) {
 777		dev_err(&pf->pdev->dev,
 778			"Failed to set VF LAN Rx queue context %d error: %d\n",
 779			pf_queue_id, ret);
 780		ret = -ENOENT;
 781		goto error_param;
 782	}
 783
 784error_param:
 785	return ret;
 786}
 787
 788/**
 789 * i40e_alloc_vsi_res
 790 * @vf: pointer to the VF info
 791 * @idx: VSI index, applies only for ADq mode, zero otherwise
 792 *
 793 * alloc VF vsi context & resources
 794 **/
 795static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
 796{
 797	struct i40e_mac_filter *f = NULL;
 798	struct i40e_pf *pf = vf->pf;
 799	struct i40e_vsi *vsi;
 800	u64 max_tx_rate = 0;
 801	int ret = 0;
 802
 803	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
 804			     vf->vf_id);
 805
 806	if (!vsi) {
 807		dev_err(&pf->pdev->dev,
 808			"add vsi failed for VF %d, aq_err %d\n",
 809			vf->vf_id, pf->hw.aq.asq_last_status);
 810		ret = -ENOENT;
 811		goto error_alloc_vsi_res;
 812	}
 813
 814	if (!idx) {
 815		u64 hena = i40e_pf_get_default_rss_hena(pf);
 816		u8 broadcast[ETH_ALEN];
 817
 818		vf->lan_vsi_idx = vsi->idx;
 819		vf->lan_vsi_id = vsi->id;
 820		/* If the port VLAN has been configured and then the
 821		 * VF driver was removed then the VSI port VLAN
 822		 * configuration was destroyed.  Check if there is
 823		 * a port VLAN and restore the VSI configuration if
 824		 * needed.
 825		 */
 826		if (vf->port_vlan_id)
 827			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 828
 829		spin_lock_bh(&vsi->mac_filter_hash_lock);
 830		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
 831			f = i40e_add_mac_filter(vsi,
 832						vf->default_lan_addr.addr);
 833			if (!f)
 834				dev_info(&pf->pdev->dev,
 835					 "Could not add MAC filter %pM for VF %d\n",
 836					vf->default_lan_addr.addr, vf->vf_id);
 837		}
 838		eth_broadcast_addr(broadcast);
 839		f = i40e_add_mac_filter(vsi, broadcast);
 840		if (!f)
 841			dev_info(&pf->pdev->dev,
 842				 "Could not allocate VF broadcast filter\n");
 843		spin_unlock_bh(&vsi->mac_filter_hash_lock);
 844		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
 845		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
 846		/* program mac filter only for VF VSI */
 847		ret = i40e_sync_vsi_filters(vsi);
 848		if (ret)
 849			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 850	}
 851
 852	/* storing VSI index and id for ADq and don't apply the mac filter */
 853	if (vf->adq_enabled) {
 854		vf->ch[idx].vsi_idx = vsi->idx;
 855		vf->ch[idx].vsi_id = vsi->id;
 856	}
 857
 858	/* Set VF bandwidth if specified */
 859	if (vf->tx_rate) {
 860		max_tx_rate = vf->tx_rate;
 861	} else if (vf->ch[idx].max_tx_rate) {
 862		max_tx_rate = vf->ch[idx].max_tx_rate;
 863	}
 864
 865	if (max_tx_rate) {
 866		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
 867		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 868						  max_tx_rate, 0, NULL);
 869		if (ret)
 870			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 871				vf->vf_id, ret);
 872	}
 873
 874error_alloc_vsi_res:
 875	return ret;
 876}
 877
 878/**
 879 * i40e_map_pf_queues_to_vsi
 880 * @vf: pointer to the VF info
 881 *
 882 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 883 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
 884 **/
 885static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
 886{
 887	struct i40e_pf *pf = vf->pf;
 888	struct i40e_hw *hw = &pf->hw;
 889	u32 reg, num_tc = 1; /* VF has at least one traffic class */
 890	u16 vsi_id, qps;
 891	int i, j;
 892
 893	if (vf->adq_enabled)
 894		num_tc = vf->num_tc;
 895
 896	for (i = 0; i < num_tc; i++) {
 897		if (vf->adq_enabled) {
 898			qps = vf->ch[i].num_qps;
 899			vsi_id =  vf->ch[i].vsi_id;
 900		} else {
 901			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 902			vsi_id = vf->lan_vsi_id;
 903		}
 904
 905		for (j = 0; j < 7; j++) {
 906			if (j * 2 >= qps) {
 907				/* end of list */
 908				reg = 0x07FF07FF;
 909			} else {
 910				u16 qid = i40e_vc_get_pf_queue_id(vf,
 911								  vsi_id,
 912								  j * 2);
 913				reg = qid;
 914				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
 915							      (j * 2) + 1);
 916				reg |= qid << 16;
 917			}
 918			i40e_write_rx_ctl(hw,
 919					  I40E_VSILAN_QTABLE(j, vsi_id),
 920					  reg);
 921		}
 922	}
 923}
 924
 925/**
 926 * i40e_map_pf_to_vf_queues
 927 * @vf: pointer to the VF info
 928 *
 929 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 930 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
 931 **/
 932static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
 933{
 934	struct i40e_pf *pf = vf->pf;
 935	struct i40e_hw *hw = &pf->hw;
 936	u32 reg, total_qps = 0;
 937	u32 qps, num_tc = 1; /* VF has at least one traffic class */
 938	u16 vsi_id, qid;
 939	int i, j;
 940
 941	if (vf->adq_enabled)
 942		num_tc = vf->num_tc;
 943
 944	for (i = 0; i < num_tc; i++) {
 945		if (vf->adq_enabled) {
 946			qps = vf->ch[i].num_qps;
 947			vsi_id =  vf->ch[i].vsi_id;
 948		} else {
 949			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 950			vsi_id = vf->lan_vsi_id;
 951		}
 952
 953		for (j = 0; j < qps; j++) {
 954			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
 955
 956			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 957			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
 958			     reg);
 959			total_qps++;
 960		}
 961	}
 962}
 963
 964/**
 965 * i40e_enable_vf_mappings
 966 * @vf: pointer to the VF info
 967 *
 968 * enable VF mappings
 969 **/
 970static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 971{
 972	struct i40e_pf *pf = vf->pf;
 973	struct i40e_hw *hw = &pf->hw;
 974	u32 reg;
 975
 976	/* Tell the hardware we're using noncontiguous mapping. HW requires
 977	 * that VF queues be mapped using this method, even when they are
 978	 * contiguous in real life
 979	 */
 980	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 981			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 982
 983	/* enable VF vplan_qtable mappings */
 984	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 985	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 986
 987	i40e_map_pf_to_vf_queues(vf);
 988	i40e_map_pf_queues_to_vsi(vf);
 989
 990	i40e_flush(hw);
 991}
 992
 993/**
 994 * i40e_disable_vf_mappings
 995 * @vf: pointer to the VF info
 996 *
 997 * disable VF mappings
 998 **/
 999static void i40e_disable_vf_mappings(struct i40e_vf *vf)
1000{
1001	struct i40e_pf *pf = vf->pf;
1002	struct i40e_hw *hw = &pf->hw;
1003	int i;
1004
1005	/* disable qp mappings */
1006	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
1007	for (i = 0; i < I40E_MAX_VSI_QP; i++)
1008		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
1009		     I40E_QUEUE_END_OF_LIST);
1010	i40e_flush(hw);
1011}
1012
1013/**
1014 * i40e_free_vf_res
1015 * @vf: pointer to the VF info
1016 *
1017 * free VF resources
1018 **/
1019static void i40e_free_vf_res(struct i40e_vf *vf)
1020{
1021	struct i40e_pf *pf = vf->pf;
1022	struct i40e_hw *hw = &pf->hw;
1023	u32 reg_idx, reg;
1024	int i, j, msix_vf;
1025
1026	/* Start by disabling VF's configuration API to prevent the OS from
1027	 * accessing the VF's VSI after it's freed / invalidated.
1028	 */
1029	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1030
1031	/* It's possible the VF had requeuested more queues than the default so
1032	 * do the accounting here when we're about to free them.
1033	 */
1034	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1035		pf->queues_left += vf->num_queue_pairs -
1036				   I40E_DEFAULT_QUEUES_PER_VF;
1037	}
1038
1039	/* free vsi & disconnect it from the parent uplink */
1040	if (vf->lan_vsi_idx) {
1041		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1042		vf->lan_vsi_idx = 0;
1043		vf->lan_vsi_id = 0;
 
1044	}
1045
1046	/* do the accounting and remove additional ADq VSI's */
1047	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1048		for (j = 0; j < vf->num_tc; j++) {
1049			/* At this point VSI0 is already released so don't
1050			 * release it again and only clear their values in
1051			 * structure variables
1052			 */
1053			if (j)
1054				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1055			vf->ch[j].vsi_idx = 0;
1056			vf->ch[j].vsi_id = 0;
1057		}
1058	}
1059	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1060
1061	/* disable interrupts so the VF starts in a known state */
1062	for (i = 0; i < msix_vf; i++) {
1063		/* format is same for both registers */
1064		if (0 == i)
1065			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1066		else
1067			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1068						      (vf->vf_id))
1069						     + (i - 1));
1070		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1071		i40e_flush(hw);
1072	}
1073
1074	/* clear the irq settings */
1075	for (i = 0; i < msix_vf; i++) {
1076		/* format is same for both registers */
1077		if (0 == i)
1078			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1079		else
1080			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1081						      (vf->vf_id))
1082						     + (i - 1));
1083		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1084		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1085		wr32(hw, reg_idx, reg);
1086		i40e_flush(hw);
1087	}
1088	/* reset some of the state variables keeping track of the resources */
1089	vf->num_queue_pairs = 0;
1090	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1091	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1092}
1093
1094/**
1095 * i40e_alloc_vf_res
1096 * @vf: pointer to the VF info
1097 *
1098 * allocate VF resources
1099 **/
1100static int i40e_alloc_vf_res(struct i40e_vf *vf)
1101{
1102	struct i40e_pf *pf = vf->pf;
1103	int total_queue_pairs = 0;
1104	int ret, idx;
1105
1106	if (vf->num_req_queues &&
1107	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1108		pf->num_vf_qps = vf->num_req_queues;
1109	else
1110		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1111
1112	/* allocate hw vsi context & associated resources */
1113	ret = i40e_alloc_vsi_res(vf, 0);
1114	if (ret)
1115		goto error_alloc;
1116	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1117
1118	/* allocate additional VSIs based on tc information for ADq */
1119	if (vf->adq_enabled) {
1120		if (pf->queues_left >=
1121		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1122			/* TC 0 always belongs to VF VSI */
1123			for (idx = 1; idx < vf->num_tc; idx++) {
1124				ret = i40e_alloc_vsi_res(vf, idx);
1125				if (ret)
1126					goto error_alloc;
1127			}
1128			/* send correct number of queues */
1129			total_queue_pairs = I40E_MAX_VF_QUEUES;
1130		} else {
1131			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1132				 vf->vf_id);
1133			vf->adq_enabled = false;
1134		}
1135	}
1136
1137	/* We account for each VF to get a default number of queue pairs.  If
1138	 * the VF has now requested more, we need to account for that to make
1139	 * certain we never request more queues than we actually have left in
1140	 * HW.
1141	 */
1142	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1143		pf->queues_left -=
1144			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1145
1146	if (vf->trusted)
1147		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1148	else
1149		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1150
1151	/* store the total qps number for the runtime
1152	 * VF req validation
1153	 */
1154	vf->num_queue_pairs = total_queue_pairs;
1155
1156	/* VF is now completely initialized */
1157	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1158
1159error_alloc:
1160	if (ret)
1161		i40e_free_vf_res(vf);
1162
1163	return ret;
1164}
1165
1166#define VF_DEVICE_STATUS 0xAA
1167#define VF_TRANS_PENDING_MASK 0x20
1168/**
1169 * i40e_quiesce_vf_pci
1170 * @vf: pointer to the VF structure
1171 *
1172 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1173 * if the transactions never clear.
1174 **/
1175static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1176{
1177	struct i40e_pf *pf = vf->pf;
1178	struct i40e_hw *hw = &pf->hw;
1179	int vf_abs_id, i;
1180	u32 reg;
1181
1182	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1183
1184	wr32(hw, I40E_PF_PCI_CIAA,
1185	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1186	for (i = 0; i < 100; i++) {
1187		reg = rd32(hw, I40E_PF_PCI_CIAD);
1188		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1189			return 0;
1190		udelay(1);
1191	}
1192	return -EIO;
1193}
1194
1195/**
1196 * __i40e_getnum_vf_vsi_vlan_filters
1197 * @vsi: pointer to the vsi
1198 *
1199 * called to get the number of VLANs offloaded on this VF
1200 **/
1201static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1202{
1203	struct i40e_mac_filter *f;
1204	u16 num_vlans = 0, bkt;
1205
1206	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1207		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1208			num_vlans++;
1209	}
1210
1211	return num_vlans;
1212}
1213
1214/**
1215 * i40e_getnum_vf_vsi_vlan_filters
1216 * @vsi: pointer to the vsi
1217 *
1218 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1219 **/
1220static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1221{
1222	int num_vlans;
1223
1224	spin_lock_bh(&vsi->mac_filter_hash_lock);
1225	num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1226	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1227
1228	return num_vlans;
1229}
1230
1231/**
1232 * i40e_get_vlan_list_sync
1233 * @vsi: pointer to the VSI
1234 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1235 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1236 *             This array is allocated here, but has to be freed in caller.
1237 *
1238 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1239 **/
1240static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1241				    s16 **vlan_list)
1242{
1243	struct i40e_mac_filter *f;
1244	int i = 0;
1245	int bkt;
1246
1247	spin_lock_bh(&vsi->mac_filter_hash_lock);
1248	*num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1249	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1250	if (!(*vlan_list))
1251		goto err;
1252
1253	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1254		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1255			continue;
1256		(*vlan_list)[i++] = f->vlan;
1257	}
1258err:
1259	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1260}
1261
1262/**
1263 * i40e_set_vsi_promisc
1264 * @vf: pointer to the VF struct
1265 * @seid: VSI number
1266 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1267 *                for a given VLAN
1268 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1269 *                  for a given VLAN
1270 * @vl: List of VLANs - apply filter for given VLANs
1271 * @num_vlans: Number of elements in @vl
1272 **/
1273static int
1274i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1275		     bool unicast_enable, s16 *vl, u16 num_vlans)
1276{
1277	struct i40e_pf *pf = vf->pf;
1278	struct i40e_hw *hw = &pf->hw;
1279	int aq_ret, aq_tmp = 0;
1280	int i;
1281
1282	/* No VLAN to set promisc on, set on VSI */
1283	if (!num_vlans || !vl) {
1284		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1285							       multi_enable,
1286							       NULL);
1287		if (aq_ret) {
1288			int aq_err = pf->hw.aq.asq_last_status;
1289
1290			dev_err(&pf->pdev->dev,
1291				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1292				vf->vf_id,
1293				ERR_PTR(aq_ret),
1294				i40e_aq_str(&pf->hw, aq_err));
1295
1296			return aq_ret;
1297		}
1298
1299		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1300							     unicast_enable,
1301							     NULL, true);
1302
1303		if (aq_ret) {
1304			int aq_err = pf->hw.aq.asq_last_status;
1305
1306			dev_err(&pf->pdev->dev,
1307				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1308				vf->vf_id,
1309				ERR_PTR(aq_ret),
1310				i40e_aq_str(&pf->hw, aq_err));
1311		}
1312
1313		return aq_ret;
1314	}
1315
1316	for (i = 0; i < num_vlans; i++) {
1317		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1318							    multi_enable,
1319							    vl[i], NULL);
1320		if (aq_ret) {
1321			int aq_err = pf->hw.aq.asq_last_status;
1322
1323			dev_err(&pf->pdev->dev,
1324				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1325				vf->vf_id,
1326				ERR_PTR(aq_ret),
1327				i40e_aq_str(&pf->hw, aq_err));
1328
1329			if (!aq_tmp)
1330				aq_tmp = aq_ret;
1331		}
1332
1333		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1334							    unicast_enable,
1335							    vl[i], NULL);
1336		if (aq_ret) {
1337			int aq_err = pf->hw.aq.asq_last_status;
1338
1339			dev_err(&pf->pdev->dev,
1340				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1341				vf->vf_id,
1342				ERR_PTR(aq_ret),
1343				i40e_aq_str(&pf->hw, aq_err));
1344
1345			if (!aq_tmp)
1346				aq_tmp = aq_ret;
1347		}
1348	}
1349
1350	if (aq_tmp)
1351		aq_ret = aq_tmp;
1352
1353	return aq_ret;
1354}
1355
1356/**
1357 * i40e_config_vf_promiscuous_mode
1358 * @vf: pointer to the VF info
1359 * @vsi_id: VSI id
1360 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1361 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1362 *
1363 * Called from the VF to configure the promiscuous mode of
1364 * VF vsis and from the VF reset path to reset promiscuous mode.
1365 **/
1366static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1367					   u16 vsi_id,
1368					   bool allmulti,
1369					   bool alluni)
1370{
1371	struct i40e_pf *pf = vf->pf;
1372	struct i40e_vsi *vsi;
1373	int aq_ret = 0;
1374	u16 num_vlans;
1375	s16 *vl;
1376
1377	vsi = i40e_find_vsi_from_id(pf, vsi_id);
1378	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1379		return -EINVAL;
1380
1381	if (vf->port_vlan_id) {
1382		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1383					      alluni, &vf->port_vlan_id, 1);
1384		return aq_ret;
1385	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1386		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1387
1388		if (!vl)
1389			return -ENOMEM;
1390
1391		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1392					      vl, num_vlans);
1393		kfree(vl);
1394		return aq_ret;
1395	}
1396
1397	/* no VLANs to set on, set on VSI */
1398	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1399				      NULL, 0);
1400	return aq_ret;
1401}
1402
1403/**
1404 * i40e_sync_vfr_reset
1405 * @hw: pointer to hw struct
1406 * @vf_id: VF identifier
1407 *
1408 * Before trigger hardware reset, we need to know if no other process has
1409 * reserved the hardware for any reset operations. This check is done by
1410 * examining the status of the RSTAT1 register used to signal the reset.
1411 **/
1412static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1413{
1414	u32 reg;
1415	int i;
1416
1417	for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1418		reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1419			   I40E_VFINT_ICR0_ADMINQ_MASK;
1420		if (reg)
1421			return 0;
1422
1423		usleep_range(100, 200);
1424	}
1425
1426	return -EAGAIN;
1427}
1428
1429/**
1430 * i40e_trigger_vf_reset
1431 * @vf: pointer to the VF structure
1432 * @flr: VFLR was issued or not
1433 *
1434 * Trigger hardware to start a reset for a particular VF. Expects the caller
1435 * to wait the proper amount of time to allow hardware to reset the VF before
1436 * it cleans up and restores VF functionality.
1437 **/
1438static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1439{
1440	struct i40e_pf *pf = vf->pf;
1441	struct i40e_hw *hw = &pf->hw;
1442	u32 reg, reg_idx, bit_idx;
1443	bool vf_active;
1444	u32 radq;
1445
1446	/* warn the VF */
1447	vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1448
1449	/* Disable VF's configuration API during reset. The flag is re-enabled
1450	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1451	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1452	 * to do it earlier to give some time to finish to any VF config
1453	 * functions that may still be running at this point.
1454	 */
1455	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1456
1457	/* In the case of a VFLR, the HW has already reset the VF and we
1458	 * just need to clean up, so don't hit the VFRTRIG register.
1459	 */
1460	if (!flr) {
1461		/* Sync VFR reset before trigger next one */
1462		radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1463			    I40E_VFINT_ICR0_ADMINQ_MASK;
1464		if (vf_active && !radq)
1465			/* waiting for finish reset by virtual driver */
1466			if (i40e_sync_vfr_reset(hw, vf->vf_id))
1467				dev_info(&pf->pdev->dev,
1468					 "Reset VF %d never finished\n",
1469				vf->vf_id);
1470
1471		/* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1472		 * in progress state in rstat1 register.
1473		 */
1474		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1475		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1476		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1477		i40e_flush(hw);
1478	}
1479	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1480	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1481	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1482	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1483	i40e_flush(hw);
1484
1485	if (i40e_quiesce_vf_pci(vf))
1486		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1487			vf->vf_id);
1488}
1489
1490/**
1491 * i40e_cleanup_reset_vf
1492 * @vf: pointer to the VF structure
1493 *
1494 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1495 * have verified whether the reset is finished properly, and ensure the
1496 * minimum amount of wait time has passed.
1497 **/
1498static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1499{
1500	struct i40e_pf *pf = vf->pf;
1501	struct i40e_hw *hw = &pf->hw;
1502	u32 reg;
1503
1504	/* disable promisc modes in case they were enabled */
1505	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1506
1507	/* free VF resources to begin resetting the VSI state */
1508	i40e_free_vf_res(vf);
1509
1510	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1511	 * By doing this we allow HW to access VF memory at any point. If we
1512	 * did it any sooner, HW could access memory while it was being freed
1513	 * in i40e_free_vf_res(), causing an IOMMU fault.
1514	 *
1515	 * On the other hand, this needs to be done ASAP, because the VF driver
1516	 * is waiting for this to happen and may report a timeout. It's
1517	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1518	 * it.
1519	 */
1520	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1521	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1522	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1523
1524	/* reallocate VF resources to finish resetting the VSI state */
1525	if (!i40e_alloc_vf_res(vf)) {
1526		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1527		i40e_enable_vf_mappings(vf);
1528		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1529		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1530		/* Do not notify the client during VF init */
1531		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1532					&vf->vf_states))
1533			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1534		vf->num_vlan = 0;
1535	}
1536
1537	/* Tell the VF driver the reset is done. This needs to be done only
1538	 * after VF has been fully initialized, because the VF driver may
1539	 * request resources immediately after setting this flag.
1540	 */
1541	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1542}
1543
1544/**
1545 * i40e_reset_vf
1546 * @vf: pointer to the VF structure
1547 * @flr: VFLR was issued or not
1548 *
1549 * Returns true if the VF is in reset, resets successfully, or resets
1550 * are disabled and false otherwise.
1551 **/
1552bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1553{
1554	struct i40e_pf *pf = vf->pf;
1555	struct i40e_hw *hw = &pf->hw;
1556	bool rsd = false;
1557	u32 reg;
1558	int i;
1559
1560	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1561		return true;
1562
1563	/* Bail out if VFs are disabled. */
1564	if (test_bit(__I40E_VF_DISABLE, pf->state))
1565		return true;
1566
1567	/* If VF is being reset already we don't need to continue. */
1568	if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1569		return true;
1570
1571	i40e_trigger_vf_reset(vf, flr);
1572
1573	/* poll VPGEN_VFRSTAT reg to make sure
1574	 * that reset is complete
1575	 */
1576	for (i = 0; i < 10; i++) {
1577		/* VF reset requires driver to first reset the VF and then
1578		 * poll the status register to make sure that the reset
1579		 * completed successfully. Due to internal HW FIFO flushes,
1580		 * we must wait 10ms before the register will be valid.
1581		 */
1582		usleep_range(10000, 20000);
1583		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1584		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1585			rsd = true;
1586			break;
1587		}
1588	}
1589
1590	if (flr)
1591		usleep_range(10000, 20000);
1592
1593	if (!rsd)
1594		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1595			vf->vf_id);
1596	usleep_range(10000, 20000);
1597
1598	/* On initial reset, we don't have any queues to disable */
1599	if (vf->lan_vsi_idx != 0)
1600		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1601
1602	i40e_cleanup_reset_vf(vf);
1603
1604	i40e_flush(hw);
1605	usleep_range(20000, 40000);
1606	clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1607
1608	return true;
1609}
1610
1611/**
1612 * i40e_reset_all_vfs
1613 * @pf: pointer to the PF structure
1614 * @flr: VFLR was issued or not
1615 *
1616 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1617 * VF, then do all the waiting in one chunk, and finally finish restoring each
1618 * VF after the wait. This is useful during PF routines which need to reset
1619 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1620 *
1621 * Returns true if any VFs were reset, and false otherwise.
1622 **/
1623bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1624{
1625	struct i40e_hw *hw = &pf->hw;
1626	struct i40e_vf *vf;
 
1627	u32 reg;
1628	int i;
1629
1630	/* If we don't have any VFs, then there is nothing to reset */
1631	if (!pf->num_alloc_vfs)
1632		return false;
1633
1634	/* If VFs have been disabled, there is no need to reset */
1635	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1636		return false;
1637
1638	/* Begin reset on all VFs at once */
1639	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1640		/* If VF is being reset no need to trigger reset again */
1641		if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1642			i40e_trigger_vf_reset(vf, flr);
1643	}
1644
1645	/* HW requires some time to make sure it can flush the FIFO for a VF
1646	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1647	 * sequence to make sure that it has completed. We'll keep track of
1648	 * the VFs using a simple iterator that increments once that VF has
1649	 * finished resetting.
1650	 */
1651	for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
1652		usleep_range(10000, 20000);
1653
1654		/* Check each VF in sequence, beginning with the VF to fail
1655		 * the previous check.
1656		 */
1657		while (vf < &pf->vf[pf->num_alloc_vfs]) {
1658			if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1659				reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1660				if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1661					break;
1662			}
1663
1664			/* If the current VF has finished resetting, move on
1665			 * to the next VF in sequence.
1666			 */
1667			++vf;
1668		}
1669	}
1670
1671	if (flr)
1672		usleep_range(10000, 20000);
1673
1674	/* Display a warning if at least one VF didn't manage to reset in
1675	 * time, but continue on with the operation.
1676	 */
1677	if (vf < &pf->vf[pf->num_alloc_vfs])
1678		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1679			vf->vf_id);
1680	usleep_range(10000, 20000);
1681
1682	/* Begin disabling all the rings associated with VFs, but do not wait
1683	 * between each VF.
1684	 */
1685	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1686		/* On initial reset, we don't have any queues to disable */
1687		if (vf->lan_vsi_idx == 0)
1688			continue;
1689
1690		/* If VF is reset in another thread just continue */
1691		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1692			continue;
1693
1694		i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
1695	}
1696
1697	/* Now that we've notified HW to disable all of the VF rings, wait
1698	 * until they finish.
1699	 */
1700	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1701		/* On initial reset, we don't have any queues to disable */
1702		if (vf->lan_vsi_idx == 0)
1703			continue;
1704
1705		/* If VF is reset in another thread just continue */
1706		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1707			continue;
1708
1709		i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
1710	}
1711
1712	/* Hw may need up to 50ms to finish disabling the RX queues. We
1713	 * minimize the wait by delaying only once for all VFs.
1714	 */
1715	mdelay(50);
1716
1717	/* Finish the reset on each VF */
1718	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1719		/* If VF is reset in another thread just continue */
1720		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1721			continue;
1722
1723		i40e_cleanup_reset_vf(vf);
1724	}
1725
1726	i40e_flush(hw);
1727	usleep_range(20000, 40000);
1728	clear_bit(__I40E_VF_DISABLE, pf->state);
1729
1730	return true;
1731}
1732
1733/**
1734 * i40e_free_vfs
1735 * @pf: pointer to the PF structure
1736 *
1737 * free VF resources
1738 **/
1739void i40e_free_vfs(struct i40e_pf *pf)
1740{
1741	struct i40e_hw *hw = &pf->hw;
1742	u32 reg_idx, bit_idx;
1743	int i, tmp, vf_id;
1744
1745	if (!pf->vf)
1746		return;
1747
1748	set_bit(__I40E_VFS_RELEASING, pf->state);
1749	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1750		usleep_range(1000, 2000);
1751
1752	i40e_notify_client_of_vf_enable(pf, 0);
1753
1754	/* Disable IOV before freeing resources. This lets any VF drivers
1755	 * running in the host get themselves cleaned up before we yank
1756	 * the carpet out from underneath their feet.
1757	 */
1758	if (!pci_vfs_assigned(pf->pdev))
1759		pci_disable_sriov(pf->pdev);
1760	else
1761		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1762
1763	/* Amortize wait time by stopping all VFs at the same time */
1764	for (i = 0; i < pf->num_alloc_vfs; i++) {
1765		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1766			continue;
1767
1768		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1769	}
1770
1771	for (i = 0; i < pf->num_alloc_vfs; i++) {
1772		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1773			continue;
1774
1775		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1776	}
1777
 
 
 
 
 
 
 
 
 
1778	/* free up VF resources */
1779	tmp = pf->num_alloc_vfs;
1780	pf->num_alloc_vfs = 0;
1781	for (i = 0; i < tmp; i++) {
1782		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1783			i40e_free_vf_res(&pf->vf[i]);
1784		/* disable qp mappings */
1785		i40e_disable_vf_mappings(&pf->vf[i]);
1786	}
1787
1788	kfree(pf->vf);
1789	pf->vf = NULL;
1790
1791	/* This check is for when the driver is unloaded while VFs are
1792	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1793	 * before this function ever gets called.
1794	 */
1795	if (!pci_vfs_assigned(pf->pdev)) {
1796		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1797		 * work correctly when SR-IOV gets re-enabled.
1798		 */
1799		for (vf_id = 0; vf_id < tmp; vf_id++) {
1800			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1801			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1802			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1803		}
1804	}
1805	clear_bit(__I40E_VF_DISABLE, pf->state);
1806	clear_bit(__I40E_VFS_RELEASING, pf->state);
1807}
1808
1809#ifdef CONFIG_PCI_IOV
1810/**
1811 * i40e_alloc_vfs
1812 * @pf: pointer to the PF structure
1813 * @num_alloc_vfs: number of VFs to allocate
1814 *
1815 * allocate VF resources
1816 **/
1817int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1818{
1819	struct i40e_vf *vfs;
1820	int i, ret = 0;
1821
1822	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1823	i40e_irq_dynamic_disable_icr0(pf);
1824
1825	/* Check to see if we're just allocating resources for extant VFs */
1826	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1827		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1828		if (ret) {
1829			clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1830			pf->num_alloc_vfs = 0;
1831			goto err_iov;
1832		}
1833	}
1834	/* allocate memory */
1835	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1836	if (!vfs) {
1837		ret = -ENOMEM;
1838		goto err_alloc;
1839	}
1840	pf->vf = vfs;
1841
1842	/* apply default profile */
1843	for (i = 0; i < num_alloc_vfs; i++) {
1844		vfs[i].pf = pf;
1845		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1846		vfs[i].vf_id = i;
1847
1848		/* assign default capabilities */
1849		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1850		vfs[i].spoofchk = true;
1851
1852		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1853
1854	}
1855	pf->num_alloc_vfs = num_alloc_vfs;
1856
1857	/* VF resources get allocated during reset */
1858	i40e_reset_all_vfs(pf, false);
1859
1860	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1861
1862err_alloc:
1863	if (ret)
1864		i40e_free_vfs(pf);
1865err_iov:
1866	/* Re-enable interrupt 0. */
1867	i40e_irq_dynamic_enable_icr0(pf);
1868	return ret;
1869}
1870
1871#endif
1872/**
1873 * i40e_pci_sriov_enable
1874 * @pdev: pointer to a pci_dev structure
1875 * @num_vfs: number of VFs to allocate
1876 *
1877 * Enable or change the number of VFs
1878 **/
1879static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1880{
1881#ifdef CONFIG_PCI_IOV
1882	struct i40e_pf *pf = pci_get_drvdata(pdev);
1883	int pre_existing_vfs = pci_num_vf(pdev);
1884	int err = 0;
1885
1886	if (test_bit(__I40E_TESTING, pf->state)) {
1887		dev_warn(&pdev->dev,
1888			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1889		err = -EPERM;
1890		goto err_out;
1891	}
1892
1893	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1894		i40e_free_vfs(pf);
1895	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1896		goto out;
1897
1898	if (num_vfs > pf->num_req_vfs) {
1899		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1900			 num_vfs, pf->num_req_vfs);
1901		err = -EPERM;
1902		goto err_out;
1903	}
1904
1905	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1906	err = i40e_alloc_vfs(pf, num_vfs);
1907	if (err) {
1908		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1909		goto err_out;
1910	}
1911
1912out:
1913	return num_vfs;
1914
1915err_out:
1916	return err;
1917#endif
1918	return 0;
1919}
1920
1921/**
1922 * i40e_pci_sriov_configure
1923 * @pdev: pointer to a pci_dev structure
1924 * @num_vfs: number of VFs to allocate
1925 *
1926 * Enable or change the number of VFs. Called when the user updates the number
1927 * of VFs in sysfs.
1928 **/
1929int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1930{
1931	struct i40e_pf *pf = pci_get_drvdata(pdev);
1932	int ret = 0;
1933
1934	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1935		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1936		return -EAGAIN;
1937	}
1938
1939	if (num_vfs) {
1940		if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
1941			set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1942			i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1943		}
1944		ret = i40e_pci_sriov_enable(pdev, num_vfs);
1945		goto sriov_configure_out;
1946	}
1947
1948	if (!pci_vfs_assigned(pf->pdev)) {
1949		i40e_free_vfs(pf);
1950		clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1951		i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1952	} else {
1953		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1954		ret = -EINVAL;
1955		goto sriov_configure_out;
1956	}
1957sriov_configure_out:
1958	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1959	return ret;
1960}
1961
1962/***********************virtual channel routines******************/
1963
1964/**
1965 * i40e_vc_send_msg_to_vf
1966 * @vf: pointer to the VF info
1967 * @v_opcode: virtual channel opcode
1968 * @v_retval: virtual channel return value
1969 * @msg: pointer to the msg buffer
1970 * @msglen: msg length
1971 *
1972 * send msg to VF
1973 **/
1974static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1975				  u32 v_retval, u8 *msg, u16 msglen)
1976{
1977	struct i40e_pf *pf;
1978	struct i40e_hw *hw;
1979	int abs_vf_id;
1980	int aq_ret;
1981
1982	/* validate the request */
1983	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1984		return -EINVAL;
1985
1986	pf = vf->pf;
1987	hw = &pf->hw;
1988	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1989
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1990	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1991					msg, msglen, NULL);
1992	if (aq_ret) {
1993		dev_info(&pf->pdev->dev,
1994			 "Unable to send the message to VF %d aq_err %d\n",
1995			 vf->vf_id, pf->hw.aq.asq_last_status);
1996		return -EIO;
1997	}
1998
1999	return 0;
2000}
2001
2002/**
2003 * i40e_vc_send_resp_to_vf
2004 * @vf: pointer to the VF info
2005 * @opcode: operation code
2006 * @retval: return value
2007 *
2008 * send resp msg to VF
2009 **/
2010static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
2011				   enum virtchnl_ops opcode,
2012				   int retval)
2013{
2014	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
2015}
2016
2017/**
2018 * i40e_sync_vf_state
2019 * @vf: pointer to the VF info
2020 * @state: VF state
2021 *
2022 * Called from a VF message to synchronize the service with a potential
2023 * VF reset state
2024 **/
2025static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2026{
2027	int i;
2028
2029	/* When handling some messages, it needs VF state to be set.
2030	 * It is possible that this flag is cleared during VF reset,
2031	 * so there is a need to wait until the end of the reset to
2032	 * handle the request message correctly.
2033	 */
2034	for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2035		if (test_bit(state, &vf->vf_states))
2036			return true;
2037		usleep_range(10000, 20000);
2038	}
2039
2040	return test_bit(state, &vf->vf_states);
2041}
2042
2043/**
2044 * i40e_vc_get_version_msg
2045 * @vf: pointer to the VF info
2046 * @msg: pointer to the msg buffer
2047 *
2048 * called from the VF to request the API version used by the PF
2049 **/
2050static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2051{
2052	struct virtchnl_version_info info = {
2053		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2054	};
2055
2056	vf->vf_ver = *(struct virtchnl_version_info *)msg;
2057	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2058	if (VF_IS_V10(&vf->vf_ver))
2059		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2060	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2061				      0, (u8 *)&info,
2062				      sizeof(struct virtchnl_version_info));
2063}
2064
2065/**
2066 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2067 * @vf: pointer to VF structure
2068 **/
2069static void i40e_del_qch(struct i40e_vf *vf)
2070{
2071	struct i40e_pf *pf = vf->pf;
2072	int i;
2073
2074	/* first element in the array belongs to primary VF VSI and we shouldn't
2075	 * delete it. We should however delete the rest of the VSIs created
2076	 */
2077	for (i = 1; i < vf->num_tc; i++) {
2078		if (vf->ch[i].vsi_idx) {
2079			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2080			vf->ch[i].vsi_idx = 0;
2081			vf->ch[i].vsi_id = 0;
2082		}
2083	}
2084}
2085
2086/**
2087 * i40e_vc_get_max_frame_size
2088 * @vf: pointer to the VF
2089 *
2090 * Max frame size is determined based on the current port's max frame size and
2091 * whether a port VLAN is configured on this VF. The VF is not aware whether
2092 * it's in a port VLAN so the PF needs to account for this in max frame size
2093 * checks and sending the max frame size to the VF.
2094 **/
2095static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2096{
2097	u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2098
2099	if (vf->port_vlan_id)
2100		max_frame_size -= VLAN_HLEN;
2101
2102	return max_frame_size;
2103}
2104
2105/**
2106 * i40e_vc_get_vf_resources_msg
2107 * @vf: pointer to the VF info
2108 * @msg: pointer to the msg buffer
 
2109 *
2110 * called from the VF to request its resources
2111 **/
2112static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2113{
2114	struct virtchnl_vf_resource *vfres = NULL;
2115	struct i40e_pf *pf = vf->pf;
 
2116	struct i40e_vsi *vsi;
2117	int num_vsis = 1;
2118	int aq_ret = 0;
2119	size_t len = 0;
2120	int ret;
2121
2122	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2123		aq_ret = -EINVAL;
2124		goto err;
2125	}
2126
2127	len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
 
 
2128	vfres = kzalloc(len, GFP_KERNEL);
2129	if (!vfres) {
2130		aq_ret = -ENOMEM;
2131		len = 0;
2132		goto err;
2133	}
2134	if (VF_IS_V11(&vf->vf_ver))
2135		vf->driver_caps = *(u32 *)msg;
2136	else
2137		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2138				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2139				  VIRTCHNL_VF_OFFLOAD_VLAN;
2140
2141	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2142	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2143	vsi = pf->vsi[vf->lan_vsi_idx];
2144	if (!vsi->info.pvid)
2145		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2146
2147	if (i40e_vf_client_capable(pf, vf->vf_id) &&
2148	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
2149		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
2150		set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2151	} else {
2152		clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2153	}
2154
2155	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2156		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2157	} else {
2158		if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
2159		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2160			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2161		else
2162			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2163	}
2164
2165	if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) {
2166		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2167			vfres->vf_cap_flags |=
2168				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2169	}
2170
2171	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2172		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2173
2174	if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) &&
2175	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2176		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2177
2178	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2179		if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
2180			dev_err(&pf->pdev->dev,
2181				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2182				 vf->vf_id);
2183			aq_ret = -EINVAL;
2184			goto err;
2185		}
2186		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2187	}
2188
2189	if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) {
2190		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2191			vfres->vf_cap_flags |=
2192					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2193	}
2194
2195	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2196		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2197
2198	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2199		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2200
2201	vfres->num_vsis = num_vsis;
2202	vfres->num_queue_pairs = vf->num_queue_pairs;
2203	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2204	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2205	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2206	vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2207
2208	if (vf->lan_vsi_idx) {
2209		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2210		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2211		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2212		/* VFs only use TC 0 */
2213		vfres->vsi_res[0].qset_handle
2214					  = le16_to_cpu(vsi->info.qs_handle[0]);
2215		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2216			i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2217			eth_zero_addr(vf->default_lan_addr.addr);
2218		}
2219		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2220				vf->default_lan_addr.addr);
2221	}
2222	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2223
2224err:
2225	/* send the response back to the VF */
2226	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2227				     aq_ret, (u8 *)vfres, len);
2228
2229	kfree(vfres);
2230	return ret;
2231}
2232
2233/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2234 * i40e_vc_config_promiscuous_mode_msg
2235 * @vf: pointer to the VF info
2236 * @msg: pointer to the msg buffer
 
2237 *
2238 * called from the VF to configure the promiscuous mode of
2239 * VF vsis
2240 **/
2241static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
 
2242{
2243	struct virtchnl_promisc_info *info =
2244	    (struct virtchnl_promisc_info *)msg;
2245	struct i40e_pf *pf = vf->pf;
 
 
 
2246	bool allmulti = false;
 
2247	bool alluni = false;
2248	int aq_ret = 0;
 
2249
2250	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2251		aq_ret = -EINVAL;
2252		goto err_out;
 
 
 
2253	}
2254	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2255		dev_err(&pf->pdev->dev,
2256			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
2257			vf->vf_id);
2258
2259		/* Lie to the VF on purpose, because this is an error we can
2260		 * ignore. Unprivileged VF is not a virtual channel error.
2261		 */
2262		aq_ret = 0;
2263		goto err_out;
2264	}
2265
2266	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2267		aq_ret = -EINVAL;
2268		goto err_out;
2269	}
2270
2271	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2272		aq_ret = -EINVAL;
2273		goto err_out;
2274	}
2275
2276	/* Multicast promiscuous handling*/
2277	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2278		allmulti = true;
2279
2280	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2281		alluni = true;
2282	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2283						 alluni);
2284	if (aq_ret)
2285		goto err_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2286
2287	if (allmulti) {
2288		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2289				      &vf->vf_states))
2290			dev_info(&pf->pdev->dev,
2291				 "VF %d successfully set multicast promiscuous mode\n",
2292				 vf->vf_id);
2293	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2294				      &vf->vf_states))
2295		dev_info(&pf->pdev->dev,
2296			 "VF %d successfully unset multicast promiscuous mode\n",
2297			 vf->vf_id);
 
 
 
 
 
2298
2299	if (alluni) {
2300		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2301				      &vf->vf_states))
2302			dev_info(&pf->pdev->dev,
2303				 "VF %d successfully set unicast promiscuous mode\n",
2304				 vf->vf_id);
2305	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2306				      &vf->vf_states))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2307		dev_info(&pf->pdev->dev,
2308			 "VF %d successfully unset unicast promiscuous mode\n",
2309			 vf->vf_id);
 
 
 
 
 
2310
2311err_out:
2312	/* send the response to the VF */
2313	return i40e_vc_send_resp_to_vf(vf,
2314				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2315				       aq_ret);
2316}
2317
2318/**
2319 * i40e_vc_config_queues_msg
2320 * @vf: pointer to the VF info
2321 * @msg: pointer to the msg buffer
 
2322 *
2323 * called from the VF to configure the rx/tx
2324 * queues
2325 **/
2326static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2327{
2328	struct virtchnl_vsi_queue_config_info *qci =
2329	    (struct virtchnl_vsi_queue_config_info *)msg;
2330	struct virtchnl_queue_pair_info *qpi;
2331	u16 vsi_id, vsi_queue_id = 0;
2332	struct i40e_pf *pf = vf->pf;
 
 
2333	int i, j = 0, idx = 0;
2334	struct i40e_vsi *vsi;
2335	u16 num_qps_all = 0;
2336	int aq_ret = 0;
2337
2338	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2339		aq_ret = -EINVAL;
2340		goto error_param;
2341	}
2342
2343	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2344		aq_ret = -EINVAL;
2345		goto error_param;
2346	}
2347
2348	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2349		aq_ret = -EINVAL;
2350		goto error_param;
2351	}
2352
2353	if (vf->adq_enabled) {
2354		for (i = 0; i < vf->num_tc; i++)
2355			num_qps_all += vf->ch[i].num_qps;
2356		if (num_qps_all != qci->num_queue_pairs) {
2357			aq_ret = -EINVAL;
2358			goto error_param;
2359		}
2360	}
2361
2362	vsi_id = qci->vsi_id;
2363
2364	for (i = 0; i < qci->num_queue_pairs; i++) {
2365		qpi = &qci->qpair[i];
2366
2367		if (!vf->adq_enabled) {
2368			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2369						      qpi->txq.queue_id)) {
2370				aq_ret = -EINVAL;
2371				goto error_param;
2372			}
2373
2374			vsi_queue_id = qpi->txq.queue_id;
2375
2376			if (qpi->txq.vsi_id != qci->vsi_id ||
2377			    qpi->rxq.vsi_id != qci->vsi_id ||
2378			    qpi->rxq.queue_id != vsi_queue_id) {
2379				aq_ret = -EINVAL;
2380				goto error_param;
2381			}
2382		}
2383
2384		if (vf->adq_enabled) {
2385			if (idx >= ARRAY_SIZE(vf->ch)) {
2386				aq_ret = -ENODEV;
2387				goto error_param;
2388			}
2389			vsi_id = vf->ch[idx].vsi_id;
2390		}
2391
2392		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2393					     &qpi->rxq) ||
2394		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2395					     &qpi->txq)) {
2396			aq_ret = -EINVAL;
2397			goto error_param;
2398		}
2399
2400		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2401		 * VF does not know about these additional VSIs and all
2402		 * it cares is about its own queues. PF configures these queues
2403		 * to its appropriate VSIs based on TC mapping
2404		 */
2405		if (vf->adq_enabled) {
2406			if (idx >= ARRAY_SIZE(vf->ch)) {
2407				aq_ret = -ENODEV;
2408				goto error_param;
2409			}
2410			if (j == (vf->ch[idx].num_qps - 1)) {
2411				idx++;
2412				j = 0; /* resetting the queue count */
2413				vsi_queue_id = 0;
2414			} else {
2415				j++;
2416				vsi_queue_id++;
2417			}
 
2418		}
2419	}
2420	/* set vsi num_queue_pairs in use to num configured by VF */
2421	if (!vf->adq_enabled) {
2422		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2423			qci->num_queue_pairs;
2424	} else {
2425		for (i = 0; i < vf->num_tc; i++) {
2426			vsi = pf->vsi[vf->ch[i].vsi_idx];
2427			vsi->num_queue_pairs = vf->ch[i].num_qps;
2428
2429			if (i40e_update_adq_vsi_queues(vsi, i)) {
2430				aq_ret = -EIO;
2431				goto error_param;
2432			}
2433		}
2434	}
2435
2436error_param:
2437	/* send the response to the VF */
2438	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2439				       aq_ret);
2440}
2441
2442/**
2443 * i40e_validate_queue_map - check queue map is valid
2444 * @vf: the VF structure pointer
2445 * @vsi_id: vsi id
2446 * @queuemap: Tx or Rx queue map
2447 *
2448 * check if Tx or Rx queue map is valid
2449 **/
2450static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2451				   unsigned long queuemap)
2452{
2453	u16 vsi_queue_id, queue_id;
2454
2455	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2456		if (vf->adq_enabled) {
2457			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2458			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2459		} else {
2460			queue_id = vsi_queue_id;
2461		}
2462
2463		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2464			return -EINVAL;
2465	}
2466
2467	return 0;
2468}
2469
2470/**
2471 * i40e_vc_config_irq_map_msg
2472 * @vf: pointer to the VF info
2473 * @msg: pointer to the msg buffer
 
2474 *
2475 * called from the VF to configure the irq to
2476 * queue map
2477 **/
2478static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2479{
2480	struct virtchnl_irq_map_info *irqmap_info =
2481	    (struct virtchnl_irq_map_info *)msg;
2482	struct virtchnl_vector_map *map;
2483	int aq_ret = 0;
2484	u16 vsi_id;
2485	int i;
2486
2487	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2488		aq_ret = -EINVAL;
2489		goto error_param;
2490	}
2491
2492	if (irqmap_info->num_vectors >
2493	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
2494		aq_ret = -EINVAL;
2495		goto error_param;
2496	}
2497
2498	for (i = 0; i < irqmap_info->num_vectors; i++) {
2499		map = &irqmap_info->vecmap[i];
 
 
2500		/* validate msg params */
2501		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2502		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2503			aq_ret = -EINVAL;
2504			goto error_param;
2505		}
2506		vsi_id = map->vsi_id;
2507
2508		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2509			aq_ret = -EINVAL;
2510			goto error_param;
2511		}
2512
2513		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2514			aq_ret = -EINVAL;
2515			goto error_param;
2516		}
2517
2518		i40e_config_irq_link_list(vf, vsi_id, map);
2519	}
2520error_param:
2521	/* send the response to the VF */
2522	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2523				       aq_ret);
2524}
2525
2526/**
2527 * i40e_ctrl_vf_tx_rings
2528 * @vsi: the SRIOV VSI being configured
2529 * @q_map: bit map of the queues to be enabled
2530 * @enable: start or stop the queue
2531 **/
2532static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2533				 bool enable)
2534{
2535	struct i40e_pf *pf = vsi->back;
2536	int ret = 0;
2537	u16 q_id;
2538
2539	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2540		ret = i40e_control_wait_tx_q(vsi->seid, pf,
2541					     vsi->base_queue + q_id,
2542					     false /*is xdp*/, enable);
2543		if (ret)
2544			break;
2545	}
2546	return ret;
2547}
2548
2549/**
2550 * i40e_ctrl_vf_rx_rings
2551 * @vsi: the SRIOV VSI being configured
2552 * @q_map: bit map of the queues to be enabled
2553 * @enable: start or stop the queue
2554 **/
2555static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2556				 bool enable)
2557{
2558	struct i40e_pf *pf = vsi->back;
2559	int ret = 0;
2560	u16 q_id;
2561
2562	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2563		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2564					     enable);
2565		if (ret)
2566			break;
2567	}
2568	return ret;
2569}
2570
2571/**
2572 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2573 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2574 *
2575 * Returns true if validation was successful, else false.
2576 */
2577static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2578{
2579	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2580	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2581	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2582		return false;
2583
2584	return true;
2585}
2586
2587/**
2588 * i40e_vc_enable_queues_msg
2589 * @vf: pointer to the VF info
2590 * @msg: pointer to the msg buffer
 
2591 *
2592 * called from the VF to enable all or specific queue(s)
2593 **/
2594static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2595{
2596	struct virtchnl_queue_select *vqs =
2597	    (struct virtchnl_queue_select *)msg;
2598	struct i40e_pf *pf = vf->pf;
2599	int aq_ret = 0;
 
2600	int i;
2601
2602	if (vf->is_disabled_from_host) {
2603		aq_ret = -EPERM;
2604		dev_info(&pf->pdev->dev,
2605			 "Admin has disabled VF %d, will not enable queues\n",
2606			 vf->vf_id);
2607		goto error_param;
2608	}
2609
2610	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2611		aq_ret = -EINVAL;
2612		goto error_param;
2613	}
2614
2615	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2616		aq_ret = -EINVAL;
2617		goto error_param;
2618	}
2619
2620	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2621		aq_ret = -EINVAL;
2622		goto error_param;
2623	}
2624
2625	/* Use the queue bit map sent by the VF */
2626	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2627				  true)) {
2628		aq_ret = -EIO;
2629		goto error_param;
2630	}
2631	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2632				  true)) {
2633		aq_ret = -EIO;
2634		goto error_param;
2635	}
2636
2637	/* need to start the rings for additional ADq VSI's as well */
2638	if (vf->adq_enabled) {
2639		/* zero belongs to LAN VSI */
2640		for (i = 1; i < vf->num_tc; i++) {
2641			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2642				aq_ret = -EIO;
2643		}
2644	}
2645
2646error_param:
2647	/* send the response to the VF */
2648	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2649				       aq_ret);
2650}
2651
2652/**
2653 * i40e_vc_disable_queues_msg
2654 * @vf: pointer to the VF info
2655 * @msg: pointer to the msg buffer
 
2656 *
2657 * called from the VF to disable all or specific
2658 * queue(s)
2659 **/
2660static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2661{
2662	struct virtchnl_queue_select *vqs =
2663	    (struct virtchnl_queue_select *)msg;
2664	struct i40e_pf *pf = vf->pf;
2665	int aq_ret = 0;
2666
2667	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2668		aq_ret = -EINVAL;
2669		goto error_param;
2670	}
2671
2672	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2673		aq_ret = -EINVAL;
2674		goto error_param;
2675	}
2676
2677	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2678		aq_ret = -EINVAL;
2679		goto error_param;
2680	}
2681
2682	/* Use the queue bit map sent by the VF */
2683	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2684				  false)) {
2685		aq_ret = -EIO;
2686		goto error_param;
2687	}
2688	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2689				  false)) {
2690		aq_ret = -EIO;
2691		goto error_param;
2692	}
2693error_param:
2694	/* send the response to the VF */
2695	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2696				       aq_ret);
2697}
2698
2699/**
2700 * i40e_check_enough_queue - find big enough queue number
2701 * @vf: pointer to the VF info
2702 * @needed: the number of items needed
2703 *
2704 * Returns the base item index of the queue, or negative for error
2705 **/
2706static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2707{
2708	unsigned int  i, cur_queues, more, pool_size;
2709	struct i40e_lump_tracking *pile;
2710	struct i40e_pf *pf = vf->pf;
2711	struct i40e_vsi *vsi;
2712
2713	vsi = pf->vsi[vf->lan_vsi_idx];
2714	cur_queues = vsi->alloc_queue_pairs;
2715
2716	/* if current allocated queues are enough for need */
2717	if (cur_queues >= needed)
2718		return vsi->base_queue;
2719
2720	pile = pf->qp_pile;
2721	if (cur_queues > 0) {
2722		/* if the allocated queues are not zero
2723		 * just check if there are enough queues for more
2724		 * behind the allocated queues.
2725		 */
2726		more = needed - cur_queues;
2727		for (i = vsi->base_queue + cur_queues;
2728			i < pile->num_entries; i++) {
2729			if (pile->list[i] & I40E_PILE_VALID_BIT)
2730				break;
2731
2732			if (more-- == 1)
2733				/* there is enough */
2734				return vsi->base_queue;
2735		}
2736	}
2737
2738	pool_size = 0;
2739	for (i = 0; i < pile->num_entries; i++) {
2740		if (pile->list[i] & I40E_PILE_VALID_BIT) {
2741			pool_size = 0;
2742			continue;
2743		}
2744		if (needed <= ++pool_size)
2745			/* there is enough */
2746			return i;
2747	}
2748
2749	return -ENOMEM;
2750}
2751
2752/**
2753 * i40e_vc_request_queues_msg
2754 * @vf: pointer to the VF info
2755 * @msg: pointer to the msg buffer
 
2756 *
2757 * VFs get a default number of queues but can use this message to request a
2758 * different number.  If the request is successful, PF will reset the VF and
2759 * return 0.  If unsuccessful, PF will send message informing VF of number of
2760 * available queues and return result of sending VF a message.
2761 **/
2762static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2763{
2764	struct virtchnl_vf_res_request *vfres =
2765		(struct virtchnl_vf_res_request *)msg;
2766	u16 req_pairs = vfres->num_queue_pairs;
2767	u8 cur_pairs = vf->num_queue_pairs;
2768	struct i40e_pf *pf = vf->pf;
2769
2770	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2771		return -EINVAL;
2772
2773	if (req_pairs > I40E_MAX_VF_QUEUES) {
 
 
 
 
2774		dev_err(&pf->pdev->dev,
2775			"VF %d tried to request more than %d queues.\n",
2776			vf->vf_id,
2777			I40E_MAX_VF_QUEUES);
2778		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2779	} else if (req_pairs - cur_pairs > pf->queues_left) {
2780		dev_warn(&pf->pdev->dev,
2781			 "VF %d requested %d more queues, but only %d left.\n",
2782			 vf->vf_id,
2783			 req_pairs - cur_pairs,
2784			 pf->queues_left);
2785		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2786	} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2787		dev_warn(&pf->pdev->dev,
2788			 "VF %d requested %d more queues, but there is not enough for it.\n",
2789			 vf->vf_id,
2790			 req_pairs - cur_pairs);
2791		vfres->num_queue_pairs = cur_pairs;
2792	} else {
2793		/* successful request */
2794		vf->num_req_queues = req_pairs;
2795		i40e_vc_reset_vf(vf, true);
 
2796		return 0;
2797	}
2798
2799	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2800				      (u8 *)vfres, sizeof(*vfres));
2801}
2802
2803/**
2804 * i40e_vc_get_stats_msg
2805 * @vf: pointer to the VF info
2806 * @msg: pointer to the msg buffer
 
2807 *
2808 * called from the VF to get vsi stats
2809 **/
2810static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2811{
2812	struct virtchnl_queue_select *vqs =
2813	    (struct virtchnl_queue_select *)msg;
2814	struct i40e_pf *pf = vf->pf;
2815	struct i40e_eth_stats stats;
2816	int aq_ret = 0;
2817	struct i40e_vsi *vsi;
2818
2819	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2820
2821	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2822		aq_ret = -EINVAL;
2823		goto error_param;
2824	}
2825
2826	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2827		aq_ret = -EINVAL;
2828		goto error_param;
2829	}
2830
2831	vsi = pf->vsi[vf->lan_vsi_idx];
2832	if (!vsi) {
2833		aq_ret = -EINVAL;
2834		goto error_param;
2835	}
2836	i40e_update_eth_stats(vsi);
2837	stats = vsi->eth_stats;
2838
2839error_param:
2840	/* send the response back to the VF */
2841	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2842				      (u8 *)&stats, sizeof(stats));
2843}
2844
2845/**
2846 * i40e_can_vf_change_mac
2847 * @vf: pointer to the VF info
2848 *
2849 * Return true if the VF is allowed to change its MAC filters, false otherwise
2850 */
2851static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
2852{
2853	/* If the VF MAC address has been set administratively (via the
2854	 * ndo_set_vf_mac command), then deny permission to the VF to
2855	 * add/delete unicast MAC addresses, unless the VF is trusted
2856	 */
2857	if (vf->pf_set_mac && !vf->trusted)
2858		return false;
2859
2860	return true;
2861}
2862
2863#define I40E_MAX_MACVLAN_PER_HW 3072
2864#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
2865	(num_ports))
2866/* If the VF is not trusted restrict the number of MAC/VLAN it can program
2867 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2868 */
2869#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2870#define I40E_VC_MAX_VLAN_PER_VF 16
2871
2872#define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports)		\
2873({	typeof(vf_num) vf_num_ = (vf_num);				\
2874	typeof(num_ports) num_ports_ = (num_ports);			\
2875	((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ *		\
2876	I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) +			\
2877	I40E_VC_MAX_MAC_ADDR_PER_VF; })
2878/**
2879 * i40e_check_vf_permission
2880 * @vf: pointer to the VF info
2881 * @al: MAC address list from virtchnl
2882 *
2883 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2884 * if any address in the list is not valid. Checks the following conditions:
2885 *
2886 * 1) broadcast and zero addresses are never valid
2887 * 2) unicast addresses are not allowed if the VMM has administratively set
2888 *    the VF MAC address, unless the VF is marked as privileged.
2889 * 3) There is enough space to add all the addresses.
2890 *
2891 * Note that to guarantee consistency, it is expected this function be called
2892 * while holding the mac_filter_hash_lock, as otherwise the current number of
2893 * addresses might not be accurate.
2894 **/
2895static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2896					   struct virtchnl_ether_addr_list *al)
2897{
2898	struct i40e_pf *pf = vf->pf;
2899	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2900	struct i40e_hw *hw = &pf->hw;
2901	int mac2add_cnt = 0;
2902	int i;
2903
 
 
 
 
 
 
 
 
 
 
 
2904	for (i = 0; i < al->num_elements; i++) {
2905		struct i40e_mac_filter *f;
2906		u8 *addr = al->list[i].addr;
2907
2908		if (is_broadcast_ether_addr(addr) ||
2909		    is_zero_ether_addr(addr)) {
2910			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2911				addr);
2912			return -EINVAL;
2913		}
2914
2915		/* If the host VMM administrator has set the VF MAC address
2916		 * administratively via the ndo_set_vf_mac command then deny
2917		 * permission to the VF to add or delete unicast MAC addresses.
2918		 * Unless the VF is privileged and then it can do whatever.
2919		 * The VF may request to set the MAC address filter already
2920		 * assigned to it so do not return an error in that case.
2921		 */
2922		if (!i40e_can_vf_change_mac(vf) &&
2923		    !is_multicast_ether_addr(addr) &&
2924		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2925			dev_err(&pf->pdev->dev,
2926				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2927			return -EPERM;
2928		}
2929
2930		/*count filters that really will be added*/
2931		f = i40e_find_mac(vsi, addr);
2932		if (!f)
2933			++mac2add_cnt;
2934	}
2935
2936	/* If this VF is not privileged, then we can't add more than a limited
2937	 * number of addresses. Check to make sure that the additions do not
2938	 * push us over the limit.
2939	 */
2940	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2941		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2942		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2943			dev_err(&pf->pdev->dev,
2944				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2945			return -EPERM;
2946		}
2947	/* If this VF is trusted, it can use more resources than untrusted.
2948	 * However to ensure that every trusted VF has appropriate number of
2949	 * resources, divide whole pool of resources per port and then across
2950	 * all VFs.
2951	 */
2952	} else {
2953		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2954		    I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2955						       hw->num_ports)) {
2956			dev_err(&pf->pdev->dev,
2957				"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2958			return -EPERM;
2959		}
2960	}
2961	return 0;
2962}
2963
2964/**
2965 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2966 * @vc_ether_addr: used to extract the type
2967 **/
2968static u8
2969i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2970{
2971	return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2972}
2973
2974/**
2975 * i40e_is_vc_addr_legacy
2976 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2977 *
2978 * check if the MAC address is from an older VF
2979 **/
2980static bool
2981i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2982{
2983	return i40e_vc_ether_addr_type(vc_ether_addr) ==
2984		VIRTCHNL_ETHER_ADDR_LEGACY;
2985}
2986
2987/**
2988 * i40e_is_vc_addr_primary
2989 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2990 *
2991 * check if the MAC address is the VF's primary MAC
2992 * This function should only be called when the MAC address in
2993 * virtchnl_ether_addr is a valid unicast MAC
2994 **/
2995static bool
2996i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
2997{
2998	return i40e_vc_ether_addr_type(vc_ether_addr) ==
2999		VIRTCHNL_ETHER_ADDR_PRIMARY;
3000}
3001
3002/**
3003 * i40e_update_vf_mac_addr
3004 * @vf: VF to update
3005 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3006 *
3007 * update the VF's cached hardware MAC if allowed
3008 **/
3009static void
3010i40e_update_vf_mac_addr(struct i40e_vf *vf,
3011			struct virtchnl_ether_addr *vc_ether_addr)
3012{
3013	u8 *mac_addr = vc_ether_addr->addr;
3014
3015	if (!is_valid_ether_addr(mac_addr))
3016		return;
3017
3018	/* If request to add MAC filter is a primary request update its default
3019	 * MAC address with the requested one. If it is a legacy request then
3020	 * check if current default is empty if so update the default MAC
3021	 */
3022	if (i40e_is_vc_addr_primary(vc_ether_addr)) {
3023		ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3024	} else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
3025		if (is_zero_ether_addr(vf->default_lan_addr.addr))
3026			ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3027	}
3028}
3029
3030/**
3031 * i40e_vc_add_mac_addr_msg
3032 * @vf: pointer to the VF info
3033 * @msg: pointer to the msg buffer
 
3034 *
3035 * add guest mac address filter
3036 **/
3037static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3038{
3039	struct virtchnl_ether_addr_list *al =
3040	    (struct virtchnl_ether_addr_list *)msg;
3041	struct i40e_pf *pf = vf->pf;
3042	struct i40e_vsi *vsi = NULL;
3043	int ret = 0;
 
3044	int i;
3045
3046	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3047	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3048		ret = -EINVAL;
3049		goto error_param;
3050	}
3051
3052	vsi = pf->vsi[vf->lan_vsi_idx];
3053
3054	/* Lock once, because all function inside for loop accesses VSI's
3055	 * MAC filter list which needs to be protected using same lock.
3056	 */
3057	spin_lock_bh(&vsi->mac_filter_hash_lock);
3058
3059	ret = i40e_check_vf_permission(vf, al);
3060	if (ret) {
3061		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3062		goto error_param;
3063	}
3064
3065	/* add new addresses to the list */
3066	for (i = 0; i < al->num_elements; i++) {
3067		struct i40e_mac_filter *f;
3068
3069		f = i40e_find_mac(vsi, al->list[i].addr);
3070		if (!f) {
3071			f = i40e_add_mac_filter(vsi, al->list[i].addr);
3072
3073			if (!f) {
3074				dev_err(&pf->pdev->dev,
3075					"Unable to add MAC filter %pM for VF %d\n",
3076					al->list[i].addr, vf->vf_id);
3077				ret = -EINVAL;
3078				spin_unlock_bh(&vsi->mac_filter_hash_lock);
3079				goto error_param;
 
 
3080			}
3081		}
3082		i40e_update_vf_mac_addr(vf, &al->list[i]);
3083	}
3084	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3085
3086	/* program the updated filter list */
3087	ret = i40e_sync_vsi_filters(vsi);
3088	if (ret)
3089		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3090			vf->vf_id, ret);
3091
3092error_param:
3093	/* send the response to the VF */
3094	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3095				      ret, NULL, 0);
3096}
3097
3098/**
3099 * i40e_vc_del_mac_addr_msg
3100 * @vf: pointer to the VF info
3101 * @msg: pointer to the msg buffer
 
3102 *
3103 * remove guest mac address filter
3104 **/
3105static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3106{
3107	struct virtchnl_ether_addr_list *al =
3108	    (struct virtchnl_ether_addr_list *)msg;
3109	bool was_unimac_deleted = false;
3110	struct i40e_pf *pf = vf->pf;
3111	struct i40e_vsi *vsi = NULL;
3112	int ret = 0;
 
3113	int i;
3114
3115	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3116	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3117		ret = -EINVAL;
3118		goto error_param;
3119	}
3120
3121	for (i = 0; i < al->num_elements; i++) {
3122		if (is_broadcast_ether_addr(al->list[i].addr) ||
3123		    is_zero_ether_addr(al->list[i].addr)) {
3124			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3125				al->list[i].addr, vf->vf_id);
3126			ret = -EINVAL;
3127			goto error_param;
3128		}
3129	}
3130	vsi = pf->vsi[vf->lan_vsi_idx];
3131
3132	spin_lock_bh(&vsi->mac_filter_hash_lock);
3133	/* delete addresses from the list */
3134	for (i = 0; i < al->num_elements; i++) {
3135		const u8 *addr = al->list[i].addr;
3136
3137		/* Allow to delete VF primary MAC only if it was not set
3138		 * administratively by PF or if VF is trusted.
3139		 */
3140		if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
3141			if (i40e_can_vf_change_mac(vf))
3142				was_unimac_deleted = true;
3143			else
3144				continue;
3145		}
3146
3147		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3148			ret = -EINVAL;
3149			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3150			goto error_param;
 
 
3151		}
3152	}
3153
3154	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3155
3156	if (was_unimac_deleted)
3157		eth_zero_addr(vf->default_lan_addr.addr);
3158
3159	/* program the updated filter list */
3160	ret = i40e_sync_vsi_filters(vsi);
3161	if (ret)
3162		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3163			vf->vf_id, ret);
3164
3165	if (vf->trusted && was_unimac_deleted) {
3166		struct i40e_mac_filter *f;
3167		struct hlist_node *h;
3168		u8 *macaddr = NULL;
3169		int bkt;
3170
3171		/* set last unicast mac address as default */
3172		spin_lock_bh(&vsi->mac_filter_hash_lock);
3173		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3174			if (is_valid_ether_addr(f->macaddr))
3175				macaddr = f->macaddr;
3176		}
3177		if (macaddr)
3178			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3179		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3180	}
3181error_param:
3182	/* send the response to the VF */
3183	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
 
3184}
3185
3186/**
3187 * i40e_vc_add_vlan_msg
3188 * @vf: pointer to the VF info
3189 * @msg: pointer to the msg buffer
 
3190 *
3191 * program guest vlan id
3192 **/
3193static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3194{
3195	struct virtchnl_vlan_filter_list *vfl =
3196	    (struct virtchnl_vlan_filter_list *)msg;
3197	struct i40e_pf *pf = vf->pf;
3198	struct i40e_vsi *vsi = NULL;
3199	int aq_ret = 0;
 
3200	int i;
3201
3202	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3203	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3204		dev_err(&pf->pdev->dev,
3205			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3206		goto error_param;
3207	}
3208	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3209	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3210		aq_ret = -EINVAL;
3211		goto error_param;
3212	}
3213
3214	for (i = 0; i < vfl->num_elements; i++) {
3215		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3216			aq_ret = -EINVAL;
3217			dev_err(&pf->pdev->dev,
3218				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3219			goto error_param;
3220		}
3221	}
3222	vsi = pf->vsi[vf->lan_vsi_idx];
3223	if (vsi->info.pvid) {
3224		aq_ret = -EINVAL;
3225		goto error_param;
3226	}
3227
3228	i40e_vlan_stripping_enable(vsi);
3229	for (i = 0; i < vfl->num_elements; i++) {
3230		/* add new VLAN filter */
3231		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3232		if (!ret)
3233			vf->num_vlan++;
3234
3235		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3236			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3237							   true,
3238							   vfl->vlan_id[i],
3239							   NULL);
3240		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3241			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3242							   true,
3243							   vfl->vlan_id[i],
3244							   NULL);
3245
3246		if (ret)
3247			dev_err(&pf->pdev->dev,
3248				"Unable to add VLAN filter %d for VF %d, error %d\n",
3249				vfl->vlan_id[i], vf->vf_id, ret);
3250	}
3251
3252error_param:
3253	/* send the response to the VF */
3254	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3255}
3256
3257/**
3258 * i40e_vc_remove_vlan_msg
3259 * @vf: pointer to the VF info
3260 * @msg: pointer to the msg buffer
 
3261 *
3262 * remove programmed guest vlan id
3263 **/
3264static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3265{
3266	struct virtchnl_vlan_filter_list *vfl =
3267	    (struct virtchnl_vlan_filter_list *)msg;
3268	struct i40e_pf *pf = vf->pf;
3269	struct i40e_vsi *vsi = NULL;
3270	int aq_ret = 0;
 
3271	int i;
3272
3273	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3274	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3275		aq_ret = -EINVAL;
3276		goto error_param;
3277	}
3278
3279	for (i = 0; i < vfl->num_elements; i++) {
3280		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3281			aq_ret = -EINVAL;
3282			goto error_param;
3283		}
3284	}
3285
3286	vsi = pf->vsi[vf->lan_vsi_idx];
3287	if (vsi->info.pvid) {
3288		if (vfl->num_elements > 1 || vfl->vlan_id[0])
3289			aq_ret = -EINVAL;
3290		goto error_param;
3291	}
3292
3293	for (i = 0; i < vfl->num_elements; i++) {
3294		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3295		vf->num_vlan--;
3296
3297		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3298			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3299							   false,
3300							   vfl->vlan_id[i],
3301							   NULL);
3302		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3303			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3304							   false,
3305							   vfl->vlan_id[i],
3306							   NULL);
3307	}
3308
3309error_param:
3310	/* send the response to the VF */
3311	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3312}
3313
3314/**
3315 * i40e_vc_rdma_msg
3316 * @vf: pointer to the VF info
3317 * @msg: pointer to the msg buffer
3318 * @msglen: msg length
3319 *
3320 * called from the VF for the iwarp msgs
3321 **/
3322static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3323{
3324	struct i40e_pf *pf = vf->pf;
3325	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3326	int aq_ret = 0;
3327
3328	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3329	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3330		aq_ret = -EINVAL;
3331		goto error_param;
3332	}
3333
3334	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3335				     msg, msglen);
3336
3337error_param:
3338	/* send the response to the VF */
3339	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3340				       aq_ret);
3341}
3342
3343/**
3344 * i40e_vc_rdma_qvmap_msg
3345 * @vf: pointer to the VF info
3346 * @msg: pointer to the msg buffer
 
3347 * @config: config qvmap or release it
3348 *
3349 * called from the VF for the iwarp msgs
3350 **/
3351static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
 
3352{
3353	struct virtchnl_rdma_qvlist_info *qvlist_info =
3354				(struct virtchnl_rdma_qvlist_info *)msg;
3355	int aq_ret = 0;
3356
3357	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3358	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3359		aq_ret = -EINVAL;
3360		goto error_param;
3361	}
3362
3363	if (config) {
3364		if (i40e_config_rdma_qvlist(vf, qvlist_info))
3365			aq_ret = -EINVAL;
3366	} else {
3367		i40e_release_rdma_qvlist(vf);
3368	}
3369
3370error_param:
3371	/* send the response to the VF */
3372	return i40e_vc_send_resp_to_vf(vf,
3373			       config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
3374			       VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3375			       aq_ret);
3376}
3377
3378/**
3379 * i40e_vc_config_rss_key
3380 * @vf: pointer to the VF info
3381 * @msg: pointer to the msg buffer
 
3382 *
3383 * Configure the VF's RSS key
3384 **/
3385static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3386{
3387	struct virtchnl_rss_key *vrk =
3388		(struct virtchnl_rss_key *)msg;
3389	struct i40e_pf *pf = vf->pf;
3390	struct i40e_vsi *vsi = NULL;
3391	int aq_ret = 0;
 
3392
3393	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3394	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3395	    vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3396		aq_ret = -EINVAL;
3397		goto err;
3398	}
3399
3400	vsi = pf->vsi[vf->lan_vsi_idx];
3401	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3402err:
3403	/* send the response to the VF */
3404	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3405				       aq_ret);
3406}
3407
3408/**
3409 * i40e_vc_config_rss_lut
3410 * @vf: pointer to the VF info
3411 * @msg: pointer to the msg buffer
 
3412 *
3413 * Configure the VF's RSS LUT
3414 **/
3415static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3416{
3417	struct virtchnl_rss_lut *vrl =
3418		(struct virtchnl_rss_lut *)msg;
3419	struct i40e_pf *pf = vf->pf;
3420	struct i40e_vsi *vsi = NULL;
3421	int aq_ret = 0;
3422	u16 i;
3423
3424	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3425	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3426	    vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3427		aq_ret = -EINVAL;
3428		goto err;
3429	}
3430
3431	for (i = 0; i < vrl->lut_entries; i++)
3432		if (vrl->lut[i] >= vf->num_queue_pairs) {
3433			aq_ret = -EINVAL;
3434			goto err;
3435		}
3436
3437	vsi = pf->vsi[vf->lan_vsi_idx];
3438	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3439	/* send the response to the VF */
3440err:
3441	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3442				       aq_ret);
3443}
3444
3445/**
3446 * i40e_vc_get_rss_hena
3447 * @vf: pointer to the VF info
3448 * @msg: pointer to the msg buffer
 
3449 *
3450 * Return the RSS HENA bits allowed by the hardware
3451 **/
3452static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3453{
3454	struct virtchnl_rss_hena *vrh = NULL;
3455	struct i40e_pf *pf = vf->pf;
3456	int aq_ret = 0;
3457	int len = 0;
3458
3459	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3460		aq_ret = -EINVAL;
3461		goto err;
3462	}
3463	len = sizeof(struct virtchnl_rss_hena);
3464
3465	vrh = kzalloc(len, GFP_KERNEL);
3466	if (!vrh) {
3467		aq_ret = -ENOMEM;
3468		len = 0;
3469		goto err;
3470	}
3471	vrh->hena = i40e_pf_get_default_rss_hena(pf);
3472err:
3473	/* send the response back to the VF */
3474	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3475					aq_ret, (u8 *)vrh, len);
3476	kfree(vrh);
3477	return aq_ret;
3478}
3479
3480/**
3481 * i40e_vc_set_rss_hena
3482 * @vf: pointer to the VF info
3483 * @msg: pointer to the msg buffer
 
3484 *
3485 * Set the RSS HENA bits for the VF
3486 **/
3487static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3488{
3489	struct virtchnl_rss_hena *vrh =
3490		(struct virtchnl_rss_hena *)msg;
3491	struct i40e_pf *pf = vf->pf;
3492	struct i40e_hw *hw = &pf->hw;
3493	int aq_ret = 0;
3494
3495	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3496		aq_ret = -EINVAL;
3497		goto err;
3498	}
3499	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3500	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3501			  (u32)(vrh->hena >> 32));
3502
3503	/* send the response to the VF */
3504err:
3505	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3506}
3507
3508/**
3509 * i40e_vc_enable_vlan_stripping
3510 * @vf: pointer to the VF info
3511 * @msg: pointer to the msg buffer
 
3512 *
3513 * Enable vlan header stripping for the VF
3514 **/
3515static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
 
3516{
3517	struct i40e_vsi *vsi;
3518	int aq_ret = 0;
3519
3520	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3521		aq_ret = -EINVAL;
3522		goto err;
3523	}
3524
3525	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3526	i40e_vlan_stripping_enable(vsi);
3527
3528	/* send the response to the VF */
3529err:
3530	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3531				       aq_ret);
3532}
3533
3534/**
3535 * i40e_vc_disable_vlan_stripping
3536 * @vf: pointer to the VF info
3537 * @msg: pointer to the msg buffer
 
3538 *
3539 * Disable vlan header stripping for the VF
3540 **/
3541static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
 
3542{
3543	struct i40e_vsi *vsi;
3544	int aq_ret = 0;
3545
3546	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3547		aq_ret = -EINVAL;
3548		goto err;
3549	}
3550
3551	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3552	i40e_vlan_stripping_disable(vsi);
3553
3554	/* send the response to the VF */
3555err:
3556	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3557				       aq_ret);
3558}
3559
3560/**
3561 * i40e_validate_cloud_filter
3562 * @vf: pointer to VF structure
3563 * @tc_filter: pointer to filter requested
3564 *
3565 * This function validates cloud filter programmed as TC filter for ADq
3566 **/
3567static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3568				      struct virtchnl_filter *tc_filter)
3569{
3570	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3571	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3572	struct i40e_pf *pf = vf->pf;
3573	struct i40e_vsi *vsi = NULL;
3574	struct i40e_mac_filter *f;
3575	struct hlist_node *h;
3576	bool found = false;
3577	int bkt;
3578
3579	if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
3580		dev_info(&pf->pdev->dev,
3581			 "VF %d: ADQ doesn't support this action (%d)\n",
3582			 vf->vf_id, tc_filter->action);
3583		goto err;
3584	}
3585
3586	/* action_meta is TC number here to which the filter is applied */
3587	if (!tc_filter->action_meta ||
3588	    tc_filter->action_meta > vf->num_tc) {
3589		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3590			 vf->vf_id, tc_filter->action_meta);
3591		goto err;
3592	}
3593
3594	/* Check filter if it's programmed for advanced mode or basic mode.
3595	 * There are two ADq modes (for VF only),
3596	 * 1. Basic mode: intended to allow as many filter options as possible
3597	 *		  to be added to a VF in Non-trusted mode. Main goal is
3598	 *		  to add filters to its own MAC and VLAN id.
3599	 * 2. Advanced mode: is for allowing filters to be applied other than
3600	 *		  its own MAC or VLAN. This mode requires the VF to be
3601	 *		  Trusted.
3602	 */
3603	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3604		vsi = pf->vsi[vf->lan_vsi_idx];
3605		f = i40e_find_mac(vsi, data.dst_mac);
3606
3607		if (!f) {
3608			dev_info(&pf->pdev->dev,
3609				 "Destination MAC %pM doesn't belong to VF %d\n",
3610				 data.dst_mac, vf->vf_id);
3611			goto err;
3612		}
3613
3614		if (mask.vlan_id) {
3615			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3616					   hlist) {
3617				if (f->vlan == ntohs(data.vlan_id)) {
3618					found = true;
3619					break;
3620				}
3621			}
3622			if (!found) {
3623				dev_info(&pf->pdev->dev,
3624					 "VF %d doesn't have any VLAN id %u\n",
3625					 vf->vf_id, ntohs(data.vlan_id));
3626				goto err;
3627			}
3628		}
3629	} else {
3630		/* Check if VF is trusted */
3631		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3632			dev_err(&pf->pdev->dev,
3633				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3634				vf->vf_id);
3635			return -EIO;
3636		}
3637	}
3638
3639	if (mask.dst_mac[0] & data.dst_mac[0]) {
3640		if (is_broadcast_ether_addr(data.dst_mac) ||
3641		    is_zero_ether_addr(data.dst_mac)) {
3642			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3643				 vf->vf_id, data.dst_mac);
3644			goto err;
3645		}
3646	}
3647
3648	if (mask.src_mac[0] & data.src_mac[0]) {
3649		if (is_broadcast_ether_addr(data.src_mac) ||
3650		    is_zero_ether_addr(data.src_mac)) {
3651			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3652				 vf->vf_id, data.src_mac);
3653			goto err;
3654		}
3655	}
3656
3657	if (mask.dst_port & data.dst_port) {
3658		if (!data.dst_port) {
3659			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3660				 vf->vf_id);
3661			goto err;
3662		}
3663	}
3664
3665	if (mask.src_port & data.src_port) {
3666		if (!data.src_port) {
3667			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3668				 vf->vf_id);
3669			goto err;
3670		}
3671	}
3672
3673	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3674	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3675		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3676			 vf->vf_id);
3677		goto err;
3678	}
3679
3680	if (mask.vlan_id & data.vlan_id) {
3681		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3682			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3683				 vf->vf_id);
3684			goto err;
3685		}
3686	}
3687
3688	return 0;
3689err:
3690	return -EIO;
3691}
3692
3693/**
3694 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3695 * @vf: pointer to the VF info
3696 * @seid: seid of the vsi it is searching for
3697 **/
3698static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3699{
3700	struct i40e_pf *pf = vf->pf;
3701	struct i40e_vsi *vsi = NULL;
3702	int i;
3703
3704	for (i = 0; i < vf->num_tc ; i++) {
3705		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3706		if (vsi && vsi->seid == seid)
3707			return vsi;
3708	}
3709	return NULL;
3710}
3711
3712/**
3713 * i40e_del_all_cloud_filters
3714 * @vf: pointer to the VF info
3715 *
3716 * This function deletes all cloud filters
3717 **/
3718static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3719{
3720	struct i40e_cloud_filter *cfilter = NULL;
3721	struct i40e_pf *pf = vf->pf;
3722	struct i40e_vsi *vsi = NULL;
3723	struct hlist_node *node;
3724	int ret;
3725
3726	hlist_for_each_entry_safe(cfilter, node,
3727				  &vf->cloud_filter_list, cloud_node) {
3728		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3729
3730		if (!vsi) {
3731			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3732				vf->vf_id, cfilter->seid);
3733			continue;
3734		}
3735
3736		if (cfilter->dst_port)
3737			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3738								false);
3739		else
3740			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3741		if (ret)
3742			dev_err(&pf->pdev->dev,
3743				"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3744				vf->vf_id, ERR_PTR(ret),
3745				i40e_aq_str(&pf->hw,
3746					    pf->hw.aq.asq_last_status));
3747
3748		hlist_del(&cfilter->cloud_node);
3749		kfree(cfilter);
3750		vf->num_cloud_filters--;
3751	}
3752}
3753
3754/**
3755 * i40e_vc_del_cloud_filter
3756 * @vf: pointer to the VF info
3757 * @msg: pointer to the msg buffer
3758 *
3759 * This function deletes a cloud filter programmed as TC filter for ADq
3760 **/
3761static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3762{
3763	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3764	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3765	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3766	struct i40e_cloud_filter cfilter, *cf = NULL;
3767	struct i40e_pf *pf = vf->pf;
3768	struct i40e_vsi *vsi = NULL;
3769	struct hlist_node *node;
3770	int aq_ret = 0;
3771	int i, ret;
3772
3773	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3774		aq_ret = -EINVAL;
3775		goto err;
3776	}
3777
3778	if (!vf->adq_enabled) {
3779		dev_info(&pf->pdev->dev,
3780			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3781			 vf->vf_id);
3782		aq_ret = -EINVAL;
3783		goto err;
3784	}
3785
3786	if (i40e_validate_cloud_filter(vf, vcf)) {
3787		dev_info(&pf->pdev->dev,
3788			 "VF %d: Invalid input, can't apply cloud filter\n",
3789			 vf->vf_id);
3790		aq_ret = -EINVAL;
3791		goto err;
3792	}
3793
3794	memset(&cfilter, 0, sizeof(cfilter));
3795	/* parse destination mac address */
3796	for (i = 0; i < ETH_ALEN; i++)
3797		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3798
3799	/* parse source mac address */
3800	for (i = 0; i < ETH_ALEN; i++)
3801		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3802
3803	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3804	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3805	cfilter.src_port = mask.src_port & tcf.src_port;
3806
3807	switch (vcf->flow_type) {
3808	case VIRTCHNL_TCP_V4_FLOW:
3809		cfilter.n_proto = ETH_P_IP;
3810		if (mask.dst_ip[0] & tcf.dst_ip[0])
3811			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3812			       ARRAY_SIZE(tcf.dst_ip));
3813		else if (mask.src_ip[0] & tcf.dst_ip[0])
3814			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3815			       ARRAY_SIZE(tcf.dst_ip));
3816		break;
3817	case VIRTCHNL_TCP_V6_FLOW:
3818		cfilter.n_proto = ETH_P_IPV6;
3819		if (mask.dst_ip[3] & tcf.dst_ip[3])
3820			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3821			       sizeof(cfilter.ip.v6.dst_ip6));
3822		if (mask.src_ip[3] & tcf.src_ip[3])
3823			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3824			       sizeof(cfilter.ip.v6.src_ip6));
3825		break;
3826	default:
3827		/* TC filter can be configured based on different combinations
3828		 * and in this case IP is not a part of filter config
3829		 */
3830		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3831			 vf->vf_id);
3832	}
3833
3834	/* get the vsi to which the tc belongs to */
3835	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3836	cfilter.seid = vsi->seid;
3837	cfilter.flags = vcf->field_flags;
3838
3839	/* Deleting TC filter */
3840	if (tcf.dst_port)
3841		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3842	else
3843		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3844	if (ret) {
3845		dev_err(&pf->pdev->dev,
3846			"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3847			vf->vf_id, ERR_PTR(ret),
3848			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3849		goto err;
3850	}
3851
3852	hlist_for_each_entry_safe(cf, node,
3853				  &vf->cloud_filter_list, cloud_node) {
3854		if (cf->seid != cfilter.seid)
3855			continue;
3856		if (mask.dst_port)
3857			if (cfilter.dst_port != cf->dst_port)
3858				continue;
3859		if (mask.dst_mac[0])
3860			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3861				continue;
3862		/* for ipv4 data to be valid, only first byte of mask is set */
3863		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3864			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3865				   ARRAY_SIZE(tcf.dst_ip)))
3866				continue;
3867		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3868		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3869			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3870				   sizeof(cfilter.ip.v6.src_ip6)))
3871				continue;
3872		if (mask.vlan_id)
3873			if (cfilter.vlan_id != cf->vlan_id)
3874				continue;
3875
3876		hlist_del(&cf->cloud_node);
3877		kfree(cf);
3878		vf->num_cloud_filters--;
3879	}
3880
3881err:
3882	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3883				       aq_ret);
3884}
3885
3886/**
3887 * i40e_vc_add_cloud_filter
3888 * @vf: pointer to the VF info
3889 * @msg: pointer to the msg buffer
3890 *
3891 * This function adds a cloud filter programmed as TC filter for ADq
3892 **/
3893static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3894{
3895	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3896	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3897	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3898	struct i40e_cloud_filter *cfilter = NULL;
3899	struct i40e_pf *pf = vf->pf;
3900	struct i40e_vsi *vsi = NULL;
3901	int aq_ret = 0;
3902	int i;
3903
3904	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3905		aq_ret = -EINVAL;
3906		goto err_out;
3907	}
3908
3909	if (!vf->adq_enabled) {
3910		dev_info(&pf->pdev->dev,
3911			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3912			 vf->vf_id);
3913		aq_ret = -EINVAL;
3914		goto err_out;
3915	}
3916
3917	if (i40e_validate_cloud_filter(vf, vcf)) {
3918		dev_info(&pf->pdev->dev,
3919			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3920			 vf->vf_id);
3921		aq_ret = -EINVAL;
3922		goto err_out;
3923	}
3924
3925	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3926	if (!cfilter) {
3927		aq_ret = -ENOMEM;
3928		goto err_out;
3929	}
3930
3931	/* parse destination mac address */
3932	for (i = 0; i < ETH_ALEN; i++)
3933		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3934
3935	/* parse source mac address */
3936	for (i = 0; i < ETH_ALEN; i++)
3937		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3938
3939	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3940	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3941	cfilter->src_port = mask.src_port & tcf.src_port;
3942
3943	switch (vcf->flow_type) {
3944	case VIRTCHNL_TCP_V4_FLOW:
3945		cfilter->n_proto = ETH_P_IP;
3946		if (mask.dst_ip[0] & tcf.dst_ip[0])
3947			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3948			       ARRAY_SIZE(tcf.dst_ip));
3949		else if (mask.src_ip[0] & tcf.dst_ip[0])
3950			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3951			       ARRAY_SIZE(tcf.dst_ip));
3952		break;
3953	case VIRTCHNL_TCP_V6_FLOW:
3954		cfilter->n_proto = ETH_P_IPV6;
3955		if (mask.dst_ip[3] & tcf.dst_ip[3])
3956			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3957			       sizeof(cfilter->ip.v6.dst_ip6));
3958		if (mask.src_ip[3] & tcf.src_ip[3])
3959			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3960			       sizeof(cfilter->ip.v6.src_ip6));
3961		break;
3962	default:
3963		/* TC filter can be configured based on different combinations
3964		 * and in this case IP is not a part of filter config
3965		 */
3966		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3967			 vf->vf_id);
3968	}
3969
3970	/* get the VSI to which the TC belongs to */
3971	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3972	cfilter->seid = vsi->seid;
3973	cfilter->flags = vcf->field_flags;
3974
3975	/* Adding cloud filter programmed as TC filter */
3976	if (tcf.dst_port)
3977		aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3978	else
3979		aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3980	if (aq_ret) {
3981		dev_err(&pf->pdev->dev,
3982			"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3983			vf->vf_id, ERR_PTR(aq_ret),
3984			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3985		goto err_free;
3986	}
3987
3988	INIT_HLIST_NODE(&cfilter->cloud_node);
3989	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3990	/* release the pointer passing it to the collection */
3991	cfilter = NULL;
3992	vf->num_cloud_filters++;
3993err_free:
3994	kfree(cfilter);
3995err_out:
3996	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3997				       aq_ret);
3998}
3999
4000/**
4001 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
4002 * @vf: pointer to the VF info
4003 * @msg: pointer to the msg buffer
4004 **/
4005static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
4006{
4007	struct virtchnl_tc_info *tci =
4008		(struct virtchnl_tc_info *)msg;
4009	struct i40e_pf *pf = vf->pf;
4010	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4011	int i, adq_request_qps = 0;
4012	int aq_ret = 0;
4013	u64 speed = 0;
4014
4015	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4016		aq_ret = -EINVAL;
4017		goto err;
4018	}
4019
4020	/* ADq cannot be applied if spoof check is ON */
4021	if (vf->spoofchk) {
4022		dev_err(&pf->pdev->dev,
4023			"Spoof check is ON, turn it OFF to enable ADq\n");
4024		aq_ret = -EINVAL;
4025		goto err;
4026	}
4027
4028	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
4029		dev_err(&pf->pdev->dev,
4030			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
4031			vf->vf_id);
4032		aq_ret = -EINVAL;
4033		goto err;
4034	}
4035
4036	/* max number of traffic classes for VF currently capped at 4 */
4037	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
4038		dev_err(&pf->pdev->dev,
4039			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
4040			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
4041		aq_ret = -EINVAL;
4042		goto err;
4043	}
4044
4045	/* validate queues for each TC */
4046	for (i = 0; i < tci->num_tc; i++)
4047		if (!tci->list[i].count ||
4048		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
4049			dev_err(&pf->pdev->dev,
4050				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
4051				vf->vf_id, i, tci->list[i].count,
4052				I40E_DEFAULT_QUEUES_PER_VF);
4053			aq_ret = -EINVAL;
4054			goto err;
4055		}
4056
4057	/* need Max VF queues but already have default number of queues */
4058	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4059
4060	if (pf->queues_left < adq_request_qps) {
4061		dev_err(&pf->pdev->dev,
4062			"No queues left to allocate to VF %d\n",
4063			vf->vf_id);
4064		aq_ret = -EINVAL;
4065		goto err;
4066	} else {
4067		/* we need to allocate max VF queues to enable ADq so as to
4068		 * make sure ADq enabled VF always gets back queues when it
4069		 * goes through a reset.
4070		 */
4071		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4072	}
4073
4074	/* get link speed in MB to validate rate limit */
4075	speed = i40e_vc_link_speed2mbps(ls->link_speed);
4076	if (speed == SPEED_UNKNOWN) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4077		dev_err(&pf->pdev->dev,
4078			"Cannot detect link speed\n");
4079		aq_ret = -EINVAL;
4080		goto err;
4081	}
4082
4083	/* parse data from the queue channel info */
4084	vf->num_tc = tci->num_tc;
4085	for (i = 0; i < vf->num_tc; i++) {
4086		if (tci->list[i].max_tx_rate) {
4087			if (tci->list[i].max_tx_rate > speed) {
4088				dev_err(&pf->pdev->dev,
4089					"Invalid max tx rate %llu specified for VF %d.",
4090					tci->list[i].max_tx_rate,
4091					vf->vf_id);
4092				aq_ret = -EINVAL;
4093				goto err;
4094			} else {
4095				vf->ch[i].max_tx_rate =
4096					tci->list[i].max_tx_rate;
4097			}
4098		}
4099		vf->ch[i].num_qps = tci->list[i].count;
4100	}
4101
4102	/* set this flag only after making sure all inputs are sane */
4103	vf->adq_enabled = true;
 
 
 
 
 
4104
4105	/* reset the VF in order to allocate resources */
4106	i40e_vc_reset_vf(vf, true);
 
4107
4108	return 0;
4109
4110	/* send the response to the VF */
4111err:
4112	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4113				       aq_ret);
4114}
4115
4116/**
4117 * i40e_vc_del_qch_msg
4118 * @vf: pointer to the VF info
4119 * @msg: pointer to the msg buffer
4120 **/
4121static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4122{
4123	struct i40e_pf *pf = vf->pf;
4124	int aq_ret = 0;
4125
4126	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4127		aq_ret = -EINVAL;
4128		goto err;
4129	}
4130
4131	if (vf->adq_enabled) {
4132		i40e_del_all_cloud_filters(vf);
4133		i40e_del_qch(vf);
4134		vf->adq_enabled = false;
4135		vf->num_tc = 0;
4136		dev_info(&pf->pdev->dev,
4137			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4138			 vf->vf_id);
4139	} else {
4140		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4141			 vf->vf_id);
4142		aq_ret = -EINVAL;
4143	}
4144
4145	/* reset the VF in order to allocate resources */
4146	i40e_vc_reset_vf(vf, true);
 
4147
4148	return 0;
4149
4150err:
4151	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4152				       aq_ret);
4153}
4154
4155/**
4156 * i40e_vc_process_vf_msg
4157 * @pf: pointer to the PF structure
4158 * @vf_id: source VF id
4159 * @v_opcode: operation code
4160 * @v_retval: unused return value code
4161 * @msg: pointer to the msg buffer
4162 * @msglen: msg length
 
4163 *
4164 * called from the common aeq/arq handler to
4165 * process request from VF
4166 **/
4167int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4168			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
4169{
4170	struct i40e_hw *hw = &pf->hw;
4171	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4172	struct i40e_vf *vf;
4173	int ret;
4174
4175	pf->vf_aq_requests++;
4176	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4177		return -EINVAL;
4178	vf = &(pf->vf[local_vf_id]);
4179
4180	/* Check if VF is disabled. */
4181	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4182		return -EINVAL;
4183
4184	/* perform basic checks on the msg */
4185	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4186
 
 
 
 
 
 
 
 
 
 
 
 
 
4187	if (ret) {
4188		i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4189		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4190			local_vf_id, v_opcode, msglen);
4191		return ret;
 
 
 
 
 
4192	}
4193
4194	switch (v_opcode) {
4195	case VIRTCHNL_OP_VERSION:
4196		ret = i40e_vc_get_version_msg(vf, msg);
4197		break;
4198	case VIRTCHNL_OP_GET_VF_RESOURCES:
4199		ret = i40e_vc_get_vf_resources_msg(vf, msg);
4200		i40e_vc_notify_vf_link_state(vf);
4201		break;
4202	case VIRTCHNL_OP_RESET_VF:
4203		i40e_vc_reset_vf(vf, false);
4204		ret = 0;
4205		break;
4206	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4207		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4208		break;
4209	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4210		ret = i40e_vc_config_queues_msg(vf, msg);
4211		break;
4212	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4213		ret = i40e_vc_config_irq_map_msg(vf, msg);
4214		break;
4215	case VIRTCHNL_OP_ENABLE_QUEUES:
4216		ret = i40e_vc_enable_queues_msg(vf, msg);
4217		i40e_vc_notify_vf_link_state(vf);
4218		break;
4219	case VIRTCHNL_OP_DISABLE_QUEUES:
4220		ret = i40e_vc_disable_queues_msg(vf, msg);
4221		break;
4222	case VIRTCHNL_OP_ADD_ETH_ADDR:
4223		ret = i40e_vc_add_mac_addr_msg(vf, msg);
4224		break;
4225	case VIRTCHNL_OP_DEL_ETH_ADDR:
4226		ret = i40e_vc_del_mac_addr_msg(vf, msg);
4227		break;
4228	case VIRTCHNL_OP_ADD_VLAN:
4229		ret = i40e_vc_add_vlan_msg(vf, msg);
4230		break;
4231	case VIRTCHNL_OP_DEL_VLAN:
4232		ret = i40e_vc_remove_vlan_msg(vf, msg);
4233		break;
4234	case VIRTCHNL_OP_GET_STATS:
4235		ret = i40e_vc_get_stats_msg(vf, msg);
4236		break;
4237	case VIRTCHNL_OP_RDMA:
4238		ret = i40e_vc_rdma_msg(vf, msg, msglen);
4239		break;
4240	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
4241		ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4242		break;
4243	case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
4244		ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4245		break;
4246	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4247		ret = i40e_vc_config_rss_key(vf, msg);
4248		break;
4249	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4250		ret = i40e_vc_config_rss_lut(vf, msg);
4251		break;
4252	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4253		ret = i40e_vc_get_rss_hena(vf, msg);
4254		break;
4255	case VIRTCHNL_OP_SET_RSS_HENA:
4256		ret = i40e_vc_set_rss_hena(vf, msg);
4257		break;
4258	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4259		ret = i40e_vc_enable_vlan_stripping(vf, msg);
4260		break;
4261	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4262		ret = i40e_vc_disable_vlan_stripping(vf, msg);
4263		break;
4264	case VIRTCHNL_OP_REQUEST_QUEUES:
4265		ret = i40e_vc_request_queues_msg(vf, msg);
4266		break;
4267	case VIRTCHNL_OP_ENABLE_CHANNELS:
4268		ret = i40e_vc_add_qch_msg(vf, msg);
4269		break;
4270	case VIRTCHNL_OP_DISABLE_CHANNELS:
4271		ret = i40e_vc_del_qch_msg(vf, msg);
4272		break;
4273	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4274		ret = i40e_vc_add_cloud_filter(vf, msg);
4275		break;
4276	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4277		ret = i40e_vc_del_cloud_filter(vf, msg);
4278		break;
4279	case VIRTCHNL_OP_UNKNOWN:
4280	default:
4281		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4282			v_opcode, local_vf_id);
4283		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4284					      -EOPNOTSUPP);
4285		break;
4286	}
4287
4288	return ret;
4289}
4290
4291/**
4292 * i40e_vc_process_vflr_event
4293 * @pf: pointer to the PF structure
4294 *
4295 * called from the vlfr irq handler to
4296 * free up VF resources and state variables
4297 **/
4298int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4299{
4300	struct i40e_hw *hw = &pf->hw;
4301	u32 reg, reg_idx, bit_idx;
4302	struct i40e_vf *vf;
4303	int vf_id;
4304
4305	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4306		return 0;
4307
4308	/* Re-enable the VFLR interrupt cause here, before looking for which
4309	 * VF got reset. Otherwise, if another VF gets a reset while the
4310	 * first one is being processed, that interrupt will be lost, and
4311	 * that VF will be stuck in reset forever.
4312	 */
4313	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4314	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4315	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4316	i40e_flush(hw);
4317
4318	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4319	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4320		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4321		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4322		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
4323		vf = &pf->vf[vf_id];
4324		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4325		if (reg & BIT(bit_idx))
4326			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4327			i40e_reset_vf(vf, true);
4328	}
4329
4330	return 0;
4331}
4332
4333/**
4334 * i40e_validate_vf
4335 * @pf: the physical function
4336 * @vf_id: VF identifier
4337 *
4338 * Check that the VF is enabled and the VSI exists.
4339 *
4340 * Returns 0 on success, negative on failure
4341 **/
4342static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4343{
4344	struct i40e_vsi *vsi;
4345	struct i40e_vf *vf;
4346	int ret = 0;
4347
4348	if (vf_id >= pf->num_alloc_vfs) {
4349		dev_err(&pf->pdev->dev,
4350			"Invalid VF Identifier %d\n", vf_id);
4351		ret = -EINVAL;
4352		goto err_out;
4353	}
4354	vf = &pf->vf[vf_id];
4355	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4356	if (!vsi)
4357		ret = -EINVAL;
4358err_out:
4359	return ret;
4360}
4361
4362/**
4363 * i40e_check_vf_init_timeout
4364 * @vf: the virtual function
4365 *
4366 * Check that the VF's initialization was successfully done and if not
4367 * wait up to 300ms for its finish.
4368 *
4369 * Returns true when VF is initialized, false on timeout
4370 **/
4371static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4372{
4373	int i;
4374
4375	/* When the VF is resetting wait until it is done.
4376	 * It can take up to 200 milliseconds, but wait for
4377	 * up to 300 milliseconds to be safe.
4378	 */
4379	for (i = 0; i < 15; i++) {
4380		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4381			return true;
4382		msleep(20);
4383	}
4384
4385	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4386		dev_err(&vf->pf->pdev->dev,
4387			"VF %d still in reset. Try again.\n", vf->vf_id);
4388		return false;
4389	}
4390
4391	return true;
4392}
4393
4394/**
4395 * i40e_ndo_set_vf_mac
4396 * @netdev: network interface device structure
4397 * @vf_id: VF identifier
4398 * @mac: mac address
4399 *
4400 * program VF mac address
4401 **/
4402int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4403{
4404	struct i40e_netdev_priv *np = netdev_priv(netdev);
4405	struct i40e_vsi *vsi = np->vsi;
4406	struct i40e_pf *pf = vsi->back;
4407	struct i40e_mac_filter *f;
4408	struct i40e_vf *vf;
4409	int ret = 0;
4410	struct hlist_node *h;
4411	int bkt;
4412
4413	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4414		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4415		return -EAGAIN;
4416	}
4417
4418	/* validate the request */
4419	ret = i40e_validate_vf(pf, vf_id);
4420	if (ret)
 
 
4421		goto error_param;
 
4422
4423	vf = &pf->vf[vf_id];
4424	if (!i40e_check_vf_init_timeout(vf)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
4425		ret = -EAGAIN;
4426		goto error_param;
4427	}
4428	vsi = pf->vsi[vf->lan_vsi_idx];
4429
4430	if (is_multicast_ether_addr(mac)) {
4431		dev_err(&pf->pdev->dev,
4432			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4433		ret = -EINVAL;
4434		goto error_param;
4435	}
4436
4437	/* Lock once because below invoked function add/del_filter requires
4438	 * mac_filter_hash_lock to be held
4439	 */
4440	spin_lock_bh(&vsi->mac_filter_hash_lock);
4441
4442	/* delete the temporary mac address */
4443	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4444		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4445
4446	/* Delete all the filters for this VSI - we're going to kill it
4447	 * anyway.
4448	 */
4449	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4450		__i40e_del_filter(vsi, f);
4451
4452	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4453
4454	/* program mac filter */
4455	if (i40e_sync_vsi_filters(vsi)) {
4456		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4457		ret = -EIO;
4458		goto error_param;
4459	}
4460	ether_addr_copy(vf->default_lan_addr.addr, mac);
4461
4462	if (is_zero_ether_addr(mac)) {
4463		vf->pf_set_mac = false;
4464		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4465	} else {
4466		vf->pf_set_mac = true;
4467		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4468			 mac, vf_id);
4469	}
4470
4471	/* Force the VF interface down so it has to bring up with new MAC
4472	 * address
4473	 */
4474	i40e_vc_reset_vf(vf, true);
4475	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4476
4477error_param:
4478	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4479	return ret;
4480}
4481
4482/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4483 * i40e_ndo_set_vf_port_vlan
4484 * @netdev: network interface device structure
4485 * @vf_id: VF identifier
4486 * @vlan_id: mac address
4487 * @qos: priority setting
4488 * @vlan_proto: vlan protocol
4489 *
4490 * program VF vlan id and/or qos
4491 **/
4492int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4493			      u16 vlan_id, u8 qos, __be16 vlan_proto)
4494{
4495	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4496	struct i40e_netdev_priv *np = netdev_priv(netdev);
4497	bool allmulti = false, alluni = false;
4498	struct i40e_pf *pf = np->vsi->back;
4499	struct i40e_vsi *vsi;
4500	struct i40e_vf *vf;
4501	int ret = 0;
4502
4503	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4504		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4505		return -EAGAIN;
4506	}
4507
4508	/* validate the request */
4509	ret = i40e_validate_vf(pf, vf_id);
4510	if (ret)
 
4511		goto error_pvid;
 
4512
4513	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4514		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4515		ret = -EINVAL;
4516		goto error_pvid;
4517	}
4518
4519	if (vlan_proto != htons(ETH_P_8021Q)) {
4520		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4521		ret = -EPROTONOSUPPORT;
4522		goto error_pvid;
4523	}
4524
4525	vf = &pf->vf[vf_id];
4526	if (!i40e_check_vf_init_timeout(vf)) {
 
 
 
4527		ret = -EAGAIN;
4528		goto error_pvid;
4529	}
4530	vsi = pf->vsi[vf->lan_vsi_idx];
4531
4532	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4533		/* duplicate request, so just return success */
4534		goto error_pvid;
4535
4536	i40e_vlan_stripping_enable(vsi);
 
 
 
 
 
 
 
 
 
 
 
4537
4538	/* Locked once because multiple functions below iterate list */
4539	spin_lock_bh(&vsi->mac_filter_hash_lock);
4540
4541	/* Check for condition where there was already a port VLAN ID
4542	 * filter set and now it is being deleted by setting it to zero.
4543	 * Additionally check for the condition where there was a port
4544	 * VLAN but now there is a new and different port VLAN being set.
4545	 * Before deleting all the old VLAN filters we must add new ones
4546	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4547	 * MAC addresses deleted.
4548	 */
4549	if ((!(vlan_id || qos) ||
4550	     vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4551	    vsi->info.pvid) {
4552		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4553		if (ret) {
4554			dev_info(&vsi->back->pdev->dev,
4555				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4556				 vsi->back->hw.aq.asq_last_status);
4557			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4558			goto error_pvid;
4559		}
4560	}
4561
4562	if (vsi->info.pvid) {
4563		/* remove all filters on the old VLAN */
4564		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4565					   VLAN_VID_MASK));
4566	}
4567
4568	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4569
4570	/* disable promisc modes in case they were enabled */
4571	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4572					      allmulti, alluni);
4573	if (ret) {
4574		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4575		goto error_pvid;
4576	}
4577
4578	if (vlan_id || qos)
4579		ret = i40e_vsi_add_pvid(vsi, vlanprio);
4580	else
4581		i40e_vsi_remove_pvid(vsi);
4582	spin_lock_bh(&vsi->mac_filter_hash_lock);
4583
4584	if (vlan_id) {
4585		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4586			 vlan_id, qos, vf_id);
4587
4588		/* add new VLAN filter for each MAC */
4589		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4590		if (ret) {
4591			dev_info(&vsi->back->pdev->dev,
4592				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4593				 vsi->back->hw.aq.asq_last_status);
4594			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4595			goto error_pvid;
4596		}
4597
4598		/* remove the previously added non-VLAN MAC filters */
4599		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4600	}
4601
4602	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4603
4604	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4605		alluni = true;
4606
4607	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4608		allmulti = true;
4609
4610	/* Schedule the worker thread to take care of applying changes */
4611	i40e_service_event_schedule(vsi->back);
4612
4613	if (ret) {
4614		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4615		goto error_pvid;
4616	}
4617
4618	/* The Port VLAN needs to be saved across resets the same as the
4619	 * default LAN MAC address.
4620	 */
4621	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4622
4623	i40e_vc_reset_vf(vf, true);
4624	/* During reset the VF got a new VSI, so refresh a pointer. */
4625	vsi = pf->vsi[vf->lan_vsi_idx];
4626
4627	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4628	if (ret) {
4629		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4630		goto error_pvid;
4631	}
4632
4633	ret = 0;
4634
4635error_pvid:
4636	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4637	return ret;
4638}
4639
4640/**
4641 * i40e_ndo_set_vf_bw
4642 * @netdev: network interface device structure
4643 * @vf_id: VF identifier
4644 * @min_tx_rate: Minimum Tx rate
4645 * @max_tx_rate: Maximum Tx rate
4646 *
4647 * configure VF Tx rate
4648 **/
4649int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4650		       int max_tx_rate)
4651{
4652	struct i40e_netdev_priv *np = netdev_priv(netdev);
4653	struct i40e_pf *pf = np->vsi->back;
4654	struct i40e_vsi *vsi;
4655	struct i40e_vf *vf;
4656	int ret = 0;
4657
4658	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4659		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4660		return -EAGAIN;
4661	}
4662
4663	/* validate the request */
4664	ret = i40e_validate_vf(pf, vf_id);
4665	if (ret)
 
4666		goto error;
 
4667
4668	if (min_tx_rate) {
4669		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4670			min_tx_rate, vf_id);
4671		ret = -EINVAL;
4672		goto error;
4673	}
4674
4675	vf = &pf->vf[vf_id];
4676	if (!i40e_check_vf_init_timeout(vf)) {
 
 
 
4677		ret = -EAGAIN;
4678		goto error;
4679	}
4680	vsi = pf->vsi[vf->lan_vsi_idx];
4681
4682	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4683	if (ret)
4684		goto error;
4685
4686	vf->tx_rate = max_tx_rate;
4687error:
4688	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4689	return ret;
4690}
4691
4692/**
4693 * i40e_ndo_get_vf_config
4694 * @netdev: network interface device structure
4695 * @vf_id: VF identifier
4696 * @ivi: VF configuration structure
4697 *
4698 * return VF configuration
4699 **/
4700int i40e_ndo_get_vf_config(struct net_device *netdev,
4701			   int vf_id, struct ifla_vf_info *ivi)
4702{
4703	struct i40e_netdev_priv *np = netdev_priv(netdev);
4704	struct i40e_vsi *vsi = np->vsi;
4705	struct i40e_pf *pf = vsi->back;
4706	struct i40e_vf *vf;
4707	int ret = 0;
4708
4709	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4710		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4711		return -EAGAIN;
4712	}
4713
4714	/* validate the request */
4715	ret = i40e_validate_vf(pf, vf_id);
4716	if (ret)
 
4717		goto error_param;
 
4718
4719	vf = &pf->vf[vf_id];
4720	/* first vsi is always the LAN vsi */
4721	vsi = pf->vsi[vf->lan_vsi_idx];
4722	if (!vsi) {
4723		ret = -ENOENT;
 
 
4724		goto error_param;
4725	}
4726
4727	ivi->vf = vf_id;
4728
4729	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4730
4731	ivi->max_tx_rate = vf->tx_rate;
4732	ivi->min_tx_rate = 0;
4733	ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK);
4734	ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK);
 
4735	if (vf->link_forced == false)
4736		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4737	else if (vf->link_up == true)
4738		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4739	else
4740		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4741	ivi->spoofchk = vf->spoofchk;
4742	ivi->trusted = vf->trusted;
4743	ret = 0;
4744
4745error_param:
4746	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4747	return ret;
4748}
4749
4750/**
4751 * i40e_ndo_set_vf_link_state
4752 * @netdev: network interface device structure
4753 * @vf_id: VF identifier
4754 * @link: required link state
4755 *
4756 * Set the link state of a specified VF, regardless of physical link state
4757 **/
4758int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4759{
4760	struct i40e_netdev_priv *np = netdev_priv(netdev);
4761	struct i40e_pf *pf = np->vsi->back;
4762	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4763	struct virtchnl_pf_event pfe;
4764	struct i40e_hw *hw = &pf->hw;
4765	struct i40e_vsi *vsi;
4766	unsigned long q_map;
4767	struct i40e_vf *vf;
4768	int abs_vf_id;
4769	int ret = 0;
4770	int tmp;
4771
4772	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4773		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4774		return -EAGAIN;
4775	}
4776
4777	/* validate the request */
4778	if (vf_id >= pf->num_alloc_vfs) {
4779		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4780		ret = -EINVAL;
4781		goto error_out;
4782	}
4783
4784	vf = &pf->vf[vf_id];
4785	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4786
4787	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4788	pfe.severity = PF_EVENT_SEVERITY_INFO;
4789
4790	switch (link) {
4791	case IFLA_VF_LINK_STATE_AUTO:
4792		vf->link_forced = false;
4793		vf->is_disabled_from_host = false;
4794		/* reset needed to reinit VF resources */
4795		i40e_vc_reset_vf(vf, true);
4796		i40e_set_vf_link_state(vf, &pfe, ls);
 
4797		break;
4798	case IFLA_VF_LINK_STATE_ENABLE:
4799		vf->link_forced = true;
4800		vf->link_up = true;
4801		vf->is_disabled_from_host = false;
4802		/* reset needed to reinit VF resources */
4803		i40e_vc_reset_vf(vf, true);
4804		i40e_set_vf_link_state(vf, &pfe, ls);
4805		break;
4806	case IFLA_VF_LINK_STATE_DISABLE:
4807		vf->link_forced = true;
4808		vf->link_up = false;
4809		i40e_set_vf_link_state(vf, &pfe, ls);
4810
4811		vsi = pf->vsi[vf->lan_vsi_idx];
4812		q_map = BIT(vsi->num_queue_pairs) - 1;
4813
4814		vf->is_disabled_from_host = true;
4815
4816		/* Try to stop both Tx&Rx rings even if one of the calls fails
4817		 * to ensure we stop the rings even in case of errors.
4818		 * If any of them returns with an error then the first
4819		 * error that occurred will be returned.
4820		 */
4821		tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
4822		ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
4823
4824		ret = tmp ? tmp : ret;
4825		break;
4826	default:
4827		ret = -EINVAL;
4828		goto error_out;
4829	}
4830	/* Notify the VF of its new link state */
4831	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4832			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4833
4834error_out:
4835	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4836	return ret;
4837}
4838
4839/**
4840 * i40e_ndo_set_vf_spoofchk
4841 * @netdev: network interface device structure
4842 * @vf_id: VF identifier
4843 * @enable: flag to enable or disable feature
4844 *
4845 * Enable or disable VF spoof checking
4846 **/
4847int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4848{
4849	struct i40e_netdev_priv *np = netdev_priv(netdev);
4850	struct i40e_vsi *vsi = np->vsi;
4851	struct i40e_pf *pf = vsi->back;
4852	struct i40e_vsi_context ctxt;
4853	struct i40e_hw *hw = &pf->hw;
4854	struct i40e_vf *vf;
4855	int ret = 0;
4856
4857	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4858		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4859		return -EAGAIN;
4860	}
4861
4862	/* validate the request */
4863	if (vf_id >= pf->num_alloc_vfs) {
4864		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4865		ret = -EINVAL;
4866		goto out;
4867	}
4868
4869	vf = &(pf->vf[vf_id]);
4870	if (!i40e_check_vf_init_timeout(vf)) {
 
 
4871		ret = -EAGAIN;
4872		goto out;
4873	}
4874
4875	if (enable == vf->spoofchk)
4876		goto out;
4877
4878	vf->spoofchk = enable;
4879	memset(&ctxt, 0, sizeof(ctxt));
4880	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4881	ctxt.pf_num = pf->hw.pf_id;
4882	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4883	if (enable)
4884		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4885					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4886	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4887	if (ret) {
4888		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4889			ret);
4890		ret = -EIO;
4891	}
4892out:
4893	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4894	return ret;
4895}
4896
4897/**
4898 * i40e_ndo_set_vf_trust
4899 * @netdev: network interface device structure of the pf
4900 * @vf_id: VF identifier
4901 * @setting: trust setting
4902 *
4903 * Enable or disable VF trust setting
4904 **/
4905int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4906{
4907	struct i40e_netdev_priv *np = netdev_priv(netdev);
4908	struct i40e_pf *pf = np->vsi->back;
4909	struct i40e_vf *vf;
4910	int ret = 0;
4911
4912	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4913		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4914		return -EAGAIN;
4915	}
4916
4917	/* validate the request */
4918	if (vf_id >= pf->num_alloc_vfs) {
4919		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4920		ret = -EINVAL;
4921		goto out;
4922	}
4923
4924	if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
4925		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4926		ret = -EINVAL;
4927		goto out;
4928	}
4929
4930	vf = &pf->vf[vf_id];
4931
4932	if (setting == vf->trusted)
4933		goto out;
4934
4935	vf->trusted = setting;
4936
4937	/* request PF to sync mac/vlan filters for the VF */
4938	set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4939	pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4940
4941	i40e_vc_reset_vf(vf, true);
4942	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4943		 vf_id, setting ? "" : "un");
4944
4945	if (vf->adq_enabled) {
4946		if (!vf->trusted) {
4947			dev_info(&pf->pdev->dev,
4948				 "VF %u no longer Trusted, deleting all cloud filters\n",
4949				 vf_id);
4950			i40e_del_all_cloud_filters(vf);
4951		}
4952	}
4953
4954out:
4955	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4956	return ret;
4957}
4958
4959/**
4960 * i40e_get_vf_stats - populate some stats for the VF
4961 * @netdev: the netdev of the PF
4962 * @vf_id: the host OS identifier (0-127)
4963 * @vf_stats: pointer to the OS memory to be initialized
4964 */
4965int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4966		      struct ifla_vf_stats *vf_stats)
4967{
4968	struct i40e_netdev_priv *np = netdev_priv(netdev);
4969	struct i40e_pf *pf = np->vsi->back;
4970	struct i40e_eth_stats *stats;
4971	struct i40e_vsi *vsi;
4972	struct i40e_vf *vf;
4973
4974	/* validate the request */
4975	if (i40e_validate_vf(pf, vf_id))
4976		return -EINVAL;
4977
4978	vf = &pf->vf[vf_id];
4979	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4980		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4981		return -EBUSY;
4982	}
4983
4984	vsi = pf->vsi[vf->lan_vsi_idx];
4985	if (!vsi)
4986		return -EINVAL;
4987
4988	i40e_update_eth_stats(vsi);
4989	stats = &vsi->eth_stats;
4990
4991	memset(vf_stats, 0, sizeof(*vf_stats));
4992
4993	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4994		stats->rx_multicast;
4995	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4996		stats->tx_multicast;
4997	vf_stats->rx_bytes   = stats->rx_bytes;
4998	vf_stats->tx_bytes   = stats->tx_bytes;
4999	vf_stats->broadcast  = stats->rx_broadcast;
5000	vf_stats->multicast  = stats->rx_multicast;
5001	vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
5002	vf_stats->tx_dropped = stats->tx_discards;
5003
5004	return 0;
5005}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*******************************************************************************
   3 *
   4 * Intel Ethernet Controller XL710 Family Linux Driver
   5 * Copyright(c) 2013 - 2016 Intel Corporation.
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along
  17 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 *
  19 * The full GNU General Public License is included in this distribution in
  20 * the file called "COPYING".
  21 *
  22 * Contact Information:
  23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25 *
  26 ******************************************************************************/
  27
  28#include "i40e.h"
 
 
  29
  30/*********************notification routines***********************/
  31
  32/**
  33 * i40e_vc_vf_broadcast
  34 * @pf: pointer to the PF structure
  35 * @opcode: operation code
  36 * @retval: return value
  37 * @msg: pointer to the msg buffer
  38 * @msglen: msg length
  39 *
  40 * send a message to all VFs on a given PF
  41 **/
  42static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  43				 enum virtchnl_ops v_opcode,
  44				 i40e_status v_retval, u8 *msg,
  45				 u16 msglen)
  46{
  47	struct i40e_hw *hw = &pf->hw;
  48	struct i40e_vf *vf = pf->vf;
  49	int i;
  50
  51	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  52		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  53		/* Not all vfs are enabled so skip the ones that are not */
  54		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
  55		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
  56			continue;
  57
  58		/* Ignore return value on purpose - a given VF may fail, but
  59		 * we need to keep going and send to all of them
  60		 */
  61		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  62				       msg, msglen, NULL);
  63	}
  64}
  65
  66/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67 * i40e_vc_notify_vf_link_state
  68 * @vf: pointer to the VF structure
  69 *
  70 * send a link status message to a single VF
  71 **/
  72static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
  73{
  74	struct virtchnl_pf_event pfe;
  75	struct i40e_pf *pf = vf->pf;
  76	struct i40e_hw *hw = &pf->hw;
  77	struct i40e_link_status *ls = &pf->hw.phy.link_info;
  78	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  79
  80	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
  81	pfe.severity = PF_EVENT_SEVERITY_INFO;
  82	if (vf->link_forced) {
  83		pfe.event_data.link_event.link_status = vf->link_up;
  84		pfe.event_data.link_event.link_speed =
  85			(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
  86	} else {
  87		pfe.event_data.link_event.link_status =
  88			ls->link_info & I40E_AQ_LINK_UP;
  89		pfe.event_data.link_event.link_speed =
  90			i40e_virtchnl_link_speed(ls->link_speed);
  91	}
  92	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
  93			       0, (u8 *)&pfe, sizeof(pfe), NULL);
  94}
  95
  96/**
  97 * i40e_vc_notify_link_state
  98 * @pf: pointer to the PF structure
  99 *
 100 * send a link status message to all VFs on a given PF
 101 **/
 102void i40e_vc_notify_link_state(struct i40e_pf *pf)
 103{
 104	int i;
 105
 106	for (i = 0; i < pf->num_alloc_vfs; i++)
 107		i40e_vc_notify_vf_link_state(&pf->vf[i]);
 108}
 109
 110/**
 111 * i40e_vc_notify_reset
 112 * @pf: pointer to the PF structure
 113 *
 114 * indicate a pending reset to all VFs on a given PF
 115 **/
 116void i40e_vc_notify_reset(struct i40e_pf *pf)
 117{
 118	struct virtchnl_pf_event pfe;
 119
 120	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 121	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 122	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
 123			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
 124}
 125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126/**
 127 * i40e_vc_notify_vf_reset
 128 * @vf: pointer to the VF structure
 129 *
 130 * indicate a pending reset to the given VF
 131 **/
 132void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 133{
 134	struct virtchnl_pf_event pfe;
 135	int abs_vf_id;
 136
 137	/* validate the request */
 138	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 139		return;
 140
 141	/* verify if the VF is in either init or active before proceeding */
 142	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
 143	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
 144		return;
 145
 146	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 147
 148	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 149	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 150	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 151			       0, (u8 *)&pfe,
 152			       sizeof(struct virtchnl_pf_event), NULL);
 153}
 154/***********************misc routines*****************************/
 155
 156/**
 157 * i40e_vc_disable_vf
 158 * @vf: pointer to the VF info
 159 *
 160 * Disable the VF through a SW reset.
 161 **/
 162static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 163{
 
 164	int i;
 165
 166	i40e_vc_notify_vf_reset(vf);
 
 167
 168	/* We want to ensure that an actual reset occurs initiated after this
 169	 * function was called. However, we do not want to wait forever, so
 170	 * we'll give a reasonable time and print a message if we failed to
 171	 * ensure a reset.
 172	 */
 173	for (i = 0; i < 20; i++) {
 
 
 
 
 
 174		if (i40e_reset_vf(vf, false))
 175			return;
 176		usleep_range(10000, 20000);
 177	}
 178
 179	dev_warn(&vf->pf->pdev->dev,
 180		 "Failed to initiate reset for VF %d after 200 milliseconds\n",
 181		 vf->vf_id);
 
 
 
 
 
 182}
 183
 184/**
 185 * i40e_vc_isvalid_vsi_id
 186 * @vf: pointer to the VF info
 187 * @vsi_id: VF relative VSI id
 188 *
 189 * check for the valid VSI id
 190 **/
 191static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 192{
 193	struct i40e_pf *pf = vf->pf;
 194	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 195
 196	return (vsi && (vsi->vf_id == vf->vf_id));
 197}
 198
 199/**
 200 * i40e_vc_isvalid_queue_id
 201 * @vf: pointer to the VF info
 202 * @vsi_id: vsi id
 203 * @qid: vsi relative queue id
 204 *
 205 * check for the valid queue id
 206 **/
 207static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 208					    u8 qid)
 209{
 210	struct i40e_pf *pf = vf->pf;
 211	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 212
 213	return (vsi && (qid < vsi->alloc_queue_pairs));
 214}
 215
 216/**
 217 * i40e_vc_isvalid_vector_id
 218 * @vf: pointer to the VF info
 219 * @vector_id: VF relative vector id
 220 *
 221 * check for the valid vector id
 222 **/
 223static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 224{
 225	struct i40e_pf *pf = vf->pf;
 226
 227	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 228}
 229
 230/***********************vf resource mgmt routines*****************/
 231
 232/**
 233 * i40e_vc_get_pf_queue_id
 234 * @vf: pointer to the VF info
 235 * @vsi_id: id of VSI as provided by the FW
 236 * @vsi_queue_id: vsi relative queue id
 237 *
 238 * return PF relative queue id
 239 **/
 240static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 241				   u8 vsi_queue_id)
 242{
 243	struct i40e_pf *pf = vf->pf;
 244	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 245	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 246
 247	if (!vsi)
 248		return pf_queue_id;
 249
 250	if (le16_to_cpu(vsi->info.mapping_flags) &
 251	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 252		pf_queue_id =
 253			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 254	else
 255		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 256			      vsi_queue_id;
 257
 258	return pf_queue_id;
 259}
 260
 261/**
 262 * i40e_get_real_pf_qid
 263 * @vf: pointer to the VF info
 264 * @vsi_id: vsi id
 265 * @queue_id: queue number
 266 *
 267 * wrapper function to get pf_queue_id handling ADq code as well
 268 **/
 269static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
 270{
 271	int i;
 272
 273	if (vf->adq_enabled) {
 274		/* Although VF considers all the queues(can be 1 to 16) as its
 275		 * own but they may actually belong to different VSIs(up to 4).
 276		 * We need to find which queues belongs to which VSI.
 277		 */
 278		for (i = 0; i < vf->num_tc; i++) {
 279			if (queue_id < vf->ch[i].num_qps) {
 280				vsi_id = vf->ch[i].vsi_id;
 281				break;
 282			}
 283			/* find right queue id which is relative to a
 284			 * given VSI.
 285			 */
 286			queue_id -= vf->ch[i].num_qps;
 287			}
 288		}
 289
 290	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
 291}
 292
 293/**
 294 * i40e_config_irq_link_list
 295 * @vf: pointer to the VF info
 296 * @vsi_id: id of VSI as given by the FW
 297 * @vecmap: irq map info
 298 *
 299 * configure irq link list from the map
 300 **/
 301static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 302				      struct virtchnl_vector_map *vecmap)
 303{
 304	unsigned long linklistmap = 0, tempmap;
 305	struct i40e_pf *pf = vf->pf;
 306	struct i40e_hw *hw = &pf->hw;
 307	u16 vsi_queue_id, pf_queue_id;
 308	enum i40e_queue_type qtype;
 309	u16 next_q, vector_id, size;
 310	u32 reg, reg_idx;
 311	u16 itr_idx = 0;
 312
 313	vector_id = vecmap->vector_id;
 314	/* setup the head */
 315	if (0 == vector_id)
 316		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 317	else
 318		reg_idx = I40E_VPINT_LNKLSTN(
 319		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 320		     (vector_id - 1));
 321
 322	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 323		/* Special case - No queues mapped on this vector */
 324		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 325		goto irq_list_done;
 326	}
 327	tempmap = vecmap->rxq_map;
 328	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 329		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 330				    vsi_queue_id));
 331	}
 332
 333	tempmap = vecmap->txq_map;
 334	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 335		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 336				     vsi_queue_id + 1));
 337	}
 338
 339	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
 340	next_q = find_first_bit(&linklistmap, size);
 341	if (unlikely(next_q == size))
 342		goto irq_list_done;
 343
 344	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 345	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 346	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
 347	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 348
 349	wr32(hw, reg_idx, reg);
 350
 351	while (next_q < size) {
 352		switch (qtype) {
 353		case I40E_QUEUE_TYPE_RX:
 354			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 355			itr_idx = vecmap->rxitr_idx;
 356			break;
 357		case I40E_QUEUE_TYPE_TX:
 358			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 359			itr_idx = vecmap->txitr_idx;
 360			break;
 361		default:
 362			break;
 363		}
 364
 365		next_q = find_next_bit(&linklistmap, size, next_q + 1);
 366		if (next_q < size) {
 367			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 368			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 369			pf_queue_id = i40e_get_real_pf_qid(vf,
 370							   vsi_id,
 371							   vsi_queue_id);
 372		} else {
 373			pf_queue_id = I40E_QUEUE_END_OF_LIST;
 374			qtype = 0;
 375		}
 376
 377		/* format for the RQCTL & TQCTL regs is same */
 378		reg = (vector_id) |
 379		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 380		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 381		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 382		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 383		wr32(hw, reg_idx, reg);
 384	}
 385
 386	/* if the vf is running in polling mode and using interrupt zero,
 387	 * need to disable auto-mask on enabling zero interrupt for VFs.
 388	 */
 389	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
 390	    (vector_id == 0)) {
 391		reg = rd32(hw, I40E_GLINT_CTL);
 392		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
 393			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
 394			wr32(hw, I40E_GLINT_CTL, reg);
 395		}
 396	}
 397
 398irq_list_done:
 399	i40e_flush(hw);
 400}
 401
 402/**
 403 * i40e_release_iwarp_qvlist
 404 * @vf: pointer to the VF.
 405 *
 406 **/
 407static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
 408{
 409	struct i40e_pf *pf = vf->pf;
 410	struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
 411	u32 msix_vf;
 412	u32 i;
 413
 414	if (!vf->qvlist_info)
 415		return;
 416
 417	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 418	for (i = 0; i < qvlist_info->num_vectors; i++) {
 419		struct virtchnl_iwarp_qv_info *qv_info;
 420		u32 next_q_index, next_q_type;
 421		struct i40e_hw *hw = &pf->hw;
 422		u32 v_idx, reg_idx, reg;
 423
 424		qv_info = &qvlist_info->qv_info[i];
 425		if (!qv_info)
 426			continue;
 427		v_idx = qv_info->v_idx;
 428		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 429			/* Figure out the queue after CEQ and make that the
 430			 * first queue.
 431			 */
 432			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 433			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
 434			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
 435					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
 436			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
 437					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
 438
 439			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 440			reg = (next_q_index &
 441			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 442			       (next_q_type <<
 443			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 444
 445			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 446		}
 447	}
 448	kfree(vf->qvlist_info);
 449	vf->qvlist_info = NULL;
 450}
 451
 452/**
 453 * i40e_config_iwarp_qvlist
 454 * @vf: pointer to the VF info
 455 * @qvlist_info: queue and vector list
 456 *
 457 * Return 0 on success or < 0 on error
 458 **/
 459static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
 460				    struct virtchnl_iwarp_qvlist_info *qvlist_info)
 
 461{
 462	struct i40e_pf *pf = vf->pf;
 463	struct i40e_hw *hw = &pf->hw;
 464	struct virtchnl_iwarp_qv_info *qv_info;
 465	u32 v_idx, i, reg_idx, reg;
 466	u32 next_q_idx, next_q_type;
 467	u32 msix_vf, size;
 
 
 
 
 468
 469	size = sizeof(struct virtchnl_iwarp_qvlist_info) +
 470	       (sizeof(struct virtchnl_iwarp_qv_info) *
 471						(qvlist_info->num_vectors - 1));
 
 
 
 
 
 
 
 
 
 472	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
 473	if (!vf->qvlist_info)
 474		return -ENOMEM;
 475
 
 476	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 477
 478	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 479	for (i = 0; i < qvlist_info->num_vectors; i++) {
 480		qv_info = &qvlist_info->qv_info[i];
 481		if (!qv_info)
 482			continue;
 483		v_idx = qv_info->v_idx;
 484
 485		/* Validate vector id belongs to this vf */
 486		if (!i40e_vc_isvalid_vector_id(vf, v_idx))
 487			goto err;
 
 
 
 
 488
 489		vf->qvlist_info->qv_info[i] = *qv_info;
 490
 491		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 492		/* We might be sharing the interrupt, so get the first queue
 493		 * index and type, push it down the list by adding the new
 494		 * queue on top. Also link it with the new queue in CEQCTL.
 495		 */
 496		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
 497		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
 498				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
 499		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
 500				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 501
 502		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 503			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 504			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
 505			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
 506			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
 507			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
 508			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
 509			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
 510
 511			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 512			reg = (qv_info->ceq_idx &
 513			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 514			       (I40E_QUEUE_TYPE_PE_CEQ <<
 515			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 516			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 517		}
 518
 519		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
 520			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
 521			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
 522			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
 523
 524			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
 525		}
 526	}
 527
 528	return 0;
 529err:
 530	kfree(vf->qvlist_info);
 531	vf->qvlist_info = NULL;
 532	return -EINVAL;
 
 533}
 534
 535/**
 536 * i40e_config_vsi_tx_queue
 537 * @vf: pointer to the VF info
 538 * @vsi_id: id of VSI as provided by the FW
 539 * @vsi_queue_id: vsi relative queue index
 540 * @info: config. info
 541 *
 542 * configure tx queue
 543 **/
 544static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 545				    u16 vsi_queue_id,
 546				    struct virtchnl_txq_info *info)
 547{
 548	struct i40e_pf *pf = vf->pf;
 549	struct i40e_hw *hw = &pf->hw;
 550	struct i40e_hmc_obj_txq tx_ctx;
 551	struct i40e_vsi *vsi;
 552	u16 pf_queue_id;
 553	u32 qtx_ctl;
 554	int ret = 0;
 555
 556	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
 557		ret = -ENOENT;
 558		goto error_context;
 559	}
 560	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 561	vsi = i40e_find_vsi_from_id(pf, vsi_id);
 562	if (!vsi) {
 563		ret = -ENOENT;
 564		goto error_context;
 565	}
 566
 567	/* clear the context structure first */
 568	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 569
 570	/* only set the required fields */
 571	tx_ctx.base = info->dma_ring_addr / 128;
 572	tx_ctx.qlen = info->ring_len;
 573	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 574	tx_ctx.rdylist_act = 0;
 575	tx_ctx.head_wb_ena = info->headwb_enabled;
 576	tx_ctx.head_wb_addr = info->dma_headwb_addr;
 577
 578	/* clear the context in the HMC */
 579	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 580	if (ret) {
 581		dev_err(&pf->pdev->dev,
 582			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
 583			pf_queue_id, ret);
 584		ret = -ENOENT;
 585		goto error_context;
 586	}
 587
 588	/* set the context in the HMC */
 589	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 590	if (ret) {
 591		dev_err(&pf->pdev->dev,
 592			"Failed to set VF LAN Tx queue context %d error: %d\n",
 593			pf_queue_id, ret);
 594		ret = -ENOENT;
 595		goto error_context;
 596	}
 597
 598	/* associate this queue with the PCI VF function */
 599	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 600	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 601		    & I40E_QTX_CTL_PF_INDX_MASK);
 602	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 603		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 604		    & I40E_QTX_CTL_VFVM_INDX_MASK);
 605	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 606	i40e_flush(hw);
 607
 608error_context:
 609	return ret;
 610}
 611
 612/**
 613 * i40e_config_vsi_rx_queue
 614 * @vf: pointer to the VF info
 615 * @vsi_id: id of VSI  as provided by the FW
 616 * @vsi_queue_id: vsi relative queue index
 617 * @info: config. info
 618 *
 619 * configure rx queue
 620 **/
 621static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 622				    u16 vsi_queue_id,
 623				    struct virtchnl_rxq_info *info)
 624{
 
 625	struct i40e_pf *pf = vf->pf;
 
 626	struct i40e_hw *hw = &pf->hw;
 627	struct i40e_hmc_obj_rxq rx_ctx;
 628	u16 pf_queue_id;
 629	int ret = 0;
 630
 631	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 632
 633	/* clear the context structure first */
 634	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 635
 636	/* only set the required fields */
 637	rx_ctx.base = info->dma_ring_addr / 128;
 638	rx_ctx.qlen = info->ring_len;
 639
 640	if (info->splithdr_enabled) {
 641		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 642				  I40E_RX_SPLIT_IP      |
 643				  I40E_RX_SPLIT_TCP_UDP |
 644				  I40E_RX_SPLIT_SCTP;
 645		/* header length validation */
 646		if (info->hdr_size > ((2 * 1024) - 64)) {
 647			ret = -EINVAL;
 648			goto error_param;
 649		}
 650		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 651
 652		/* set split mode 10b */
 653		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 654	}
 655
 656	/* databuffer length validation */
 657	if (info->databuffer_size > ((16 * 1024) - 128)) {
 658		ret = -EINVAL;
 659		goto error_param;
 660	}
 661	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 662
 663	/* max pkt. length validation */
 664	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 665		ret = -EINVAL;
 666		goto error_param;
 667	}
 668	rx_ctx.rxmax = info->max_pkt_size;
 669
 
 
 
 
 670	/* enable 32bytes desc always */
 671	rx_ctx.dsize = 1;
 672
 673	/* default values */
 674	rx_ctx.lrxqthresh = 1;
 675	rx_ctx.crcstrip = 1;
 676	rx_ctx.prefena = 1;
 677	rx_ctx.l2tsel = 1;
 678
 679	/* clear the context in the HMC */
 680	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 681	if (ret) {
 682		dev_err(&pf->pdev->dev,
 683			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
 684			pf_queue_id, ret);
 685		ret = -ENOENT;
 686		goto error_param;
 687	}
 688
 689	/* set the context in the HMC */
 690	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 691	if (ret) {
 692		dev_err(&pf->pdev->dev,
 693			"Failed to set VF LAN Rx queue context %d error: %d\n",
 694			pf_queue_id, ret);
 695		ret = -ENOENT;
 696		goto error_param;
 697	}
 698
 699error_param:
 700	return ret;
 701}
 702
 703/**
 704 * i40e_alloc_vsi_res
 705 * @vf: pointer to the VF info
 706 * @idx: VSI index, applies only for ADq mode, zero otherwise
 707 *
 708 * alloc VF vsi context & resources
 709 **/
 710static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
 711{
 712	struct i40e_mac_filter *f = NULL;
 713	struct i40e_pf *pf = vf->pf;
 714	struct i40e_vsi *vsi;
 715	u64 max_tx_rate = 0;
 716	int ret = 0;
 717
 718	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
 719			     vf->vf_id);
 720
 721	if (!vsi) {
 722		dev_err(&pf->pdev->dev,
 723			"add vsi failed for VF %d, aq_err %d\n",
 724			vf->vf_id, pf->hw.aq.asq_last_status);
 725		ret = -ENOENT;
 726		goto error_alloc_vsi_res;
 727	}
 728
 729	if (!idx) {
 730		u64 hena = i40e_pf_get_default_rss_hena(pf);
 731		u8 broadcast[ETH_ALEN];
 732
 733		vf->lan_vsi_idx = vsi->idx;
 734		vf->lan_vsi_id = vsi->id;
 735		/* If the port VLAN has been configured and then the
 736		 * VF driver was removed then the VSI port VLAN
 737		 * configuration was destroyed.  Check if there is
 738		 * a port VLAN and restore the VSI configuration if
 739		 * needed.
 740		 */
 741		if (vf->port_vlan_id)
 742			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 743
 744		spin_lock_bh(&vsi->mac_filter_hash_lock);
 745		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
 746			f = i40e_add_mac_filter(vsi,
 747						vf->default_lan_addr.addr);
 748			if (!f)
 749				dev_info(&pf->pdev->dev,
 750					 "Could not add MAC filter %pM for VF %d\n",
 751					vf->default_lan_addr.addr, vf->vf_id);
 752		}
 753		eth_broadcast_addr(broadcast);
 754		f = i40e_add_mac_filter(vsi, broadcast);
 755		if (!f)
 756			dev_info(&pf->pdev->dev,
 757				 "Could not allocate VF broadcast filter\n");
 758		spin_unlock_bh(&vsi->mac_filter_hash_lock);
 759		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
 760		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
 761		/* program mac filter only for VF VSI */
 762		ret = i40e_sync_vsi_filters(vsi);
 763		if (ret)
 764			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 765	}
 766
 767	/* storing VSI index and id for ADq and don't apply the mac filter */
 768	if (vf->adq_enabled) {
 769		vf->ch[idx].vsi_idx = vsi->idx;
 770		vf->ch[idx].vsi_id = vsi->id;
 771	}
 772
 773	/* Set VF bandwidth if specified */
 774	if (vf->tx_rate) {
 775		max_tx_rate = vf->tx_rate;
 776	} else if (vf->ch[idx].max_tx_rate) {
 777		max_tx_rate = vf->ch[idx].max_tx_rate;
 778	}
 779
 780	if (max_tx_rate) {
 781		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
 782		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 783						  max_tx_rate, 0, NULL);
 784		if (ret)
 785			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 786				vf->vf_id, ret);
 787	}
 788
 789error_alloc_vsi_res:
 790	return ret;
 791}
 792
 793/**
 794 * i40e_map_pf_queues_to_vsi
 795 * @vf: pointer to the VF info
 796 *
 797 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 798 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
 799 **/
 800static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
 801{
 802	struct i40e_pf *pf = vf->pf;
 803	struct i40e_hw *hw = &pf->hw;
 804	u32 reg, num_tc = 1; /* VF has at least one traffic class */
 805	u16 vsi_id, qps;
 806	int i, j;
 807
 808	if (vf->adq_enabled)
 809		num_tc = vf->num_tc;
 810
 811	for (i = 0; i < num_tc; i++) {
 812		if (vf->adq_enabled) {
 813			qps = vf->ch[i].num_qps;
 814			vsi_id =  vf->ch[i].vsi_id;
 815		} else {
 816			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 817			vsi_id = vf->lan_vsi_id;
 818		}
 819
 820		for (j = 0; j < 7; j++) {
 821			if (j * 2 >= qps) {
 822				/* end of list */
 823				reg = 0x07FF07FF;
 824			} else {
 825				u16 qid = i40e_vc_get_pf_queue_id(vf,
 826								  vsi_id,
 827								  j * 2);
 828				reg = qid;
 829				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
 830							      (j * 2) + 1);
 831				reg |= qid << 16;
 832			}
 833			i40e_write_rx_ctl(hw,
 834					  I40E_VSILAN_QTABLE(j, vsi_id),
 835					  reg);
 836		}
 837	}
 838}
 839
 840/**
 841 * i40e_map_pf_to_vf_queues
 842 * @vf: pointer to the VF info
 843 *
 844 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 845 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
 846 **/
 847static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
 848{
 849	struct i40e_pf *pf = vf->pf;
 850	struct i40e_hw *hw = &pf->hw;
 851	u32 reg, total_qps = 0;
 852	u32 qps, num_tc = 1; /* VF has at least one traffic class */
 853	u16 vsi_id, qid;
 854	int i, j;
 855
 856	if (vf->adq_enabled)
 857		num_tc = vf->num_tc;
 858
 859	for (i = 0; i < num_tc; i++) {
 860		if (vf->adq_enabled) {
 861			qps = vf->ch[i].num_qps;
 862			vsi_id =  vf->ch[i].vsi_id;
 863		} else {
 864			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 865			vsi_id = vf->lan_vsi_id;
 866		}
 867
 868		for (j = 0; j < qps; j++) {
 869			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
 870
 871			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 872			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
 873			     reg);
 874			total_qps++;
 875		}
 876	}
 877}
 878
 879/**
 880 * i40e_enable_vf_mappings
 881 * @vf: pointer to the VF info
 882 *
 883 * enable VF mappings
 884 **/
 885static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 886{
 887	struct i40e_pf *pf = vf->pf;
 888	struct i40e_hw *hw = &pf->hw;
 889	u32 reg;
 890
 891	/* Tell the hardware we're using noncontiguous mapping. HW requires
 892	 * that VF queues be mapped using this method, even when they are
 893	 * contiguous in real life
 894	 */
 895	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 896			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 897
 898	/* enable VF vplan_qtable mappings */
 899	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 900	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 901
 902	i40e_map_pf_to_vf_queues(vf);
 903	i40e_map_pf_queues_to_vsi(vf);
 904
 905	i40e_flush(hw);
 906}
 907
 908/**
 909 * i40e_disable_vf_mappings
 910 * @vf: pointer to the VF info
 911 *
 912 * disable VF mappings
 913 **/
 914static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 915{
 916	struct i40e_pf *pf = vf->pf;
 917	struct i40e_hw *hw = &pf->hw;
 918	int i;
 919
 920	/* disable qp mappings */
 921	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 922	for (i = 0; i < I40E_MAX_VSI_QP; i++)
 923		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 924		     I40E_QUEUE_END_OF_LIST);
 925	i40e_flush(hw);
 926}
 927
 928/**
 929 * i40e_free_vf_res
 930 * @vf: pointer to the VF info
 931 *
 932 * free VF resources
 933 **/
 934static void i40e_free_vf_res(struct i40e_vf *vf)
 935{
 936	struct i40e_pf *pf = vf->pf;
 937	struct i40e_hw *hw = &pf->hw;
 938	u32 reg_idx, reg;
 939	int i, j, msix_vf;
 940
 941	/* Start by disabling VF's configuration API to prevent the OS from
 942	 * accessing the VF's VSI after it's freed / invalidated.
 943	 */
 944	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
 945
 946	/* It's possible the VF had requeuested more queues than the default so
 947	 * do the accounting here when we're about to free them.
 948	 */
 949	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
 950		pf->queues_left += vf->num_queue_pairs -
 951				   I40E_DEFAULT_QUEUES_PER_VF;
 952	}
 953
 954	/* free vsi & disconnect it from the parent uplink */
 955	if (vf->lan_vsi_idx) {
 956		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 957		vf->lan_vsi_idx = 0;
 958		vf->lan_vsi_id = 0;
 959		vf->num_mac = 0;
 960	}
 961
 962	/* do the accounting and remove additional ADq VSI's */
 963	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
 964		for (j = 0; j < vf->num_tc; j++) {
 965			/* At this point VSI0 is already released so don't
 966			 * release it again and only clear their values in
 967			 * structure variables
 968			 */
 969			if (j)
 970				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
 971			vf->ch[j].vsi_idx = 0;
 972			vf->ch[j].vsi_id = 0;
 973		}
 974	}
 975	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 976
 977	/* disable interrupts so the VF starts in a known state */
 978	for (i = 0; i < msix_vf; i++) {
 979		/* format is same for both registers */
 980		if (0 == i)
 981			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 982		else
 983			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 984						      (vf->vf_id))
 985						     + (i - 1));
 986		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 987		i40e_flush(hw);
 988	}
 989
 990	/* clear the irq settings */
 991	for (i = 0; i < msix_vf; i++) {
 992		/* format is same for both registers */
 993		if (0 == i)
 994			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 995		else
 996			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 997						      (vf->vf_id))
 998						     + (i - 1));
 999		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1000		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1001		wr32(hw, reg_idx, reg);
1002		i40e_flush(hw);
1003	}
1004	/* reset some of the state variables keeping track of the resources */
1005	vf->num_queue_pairs = 0;
1006	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1007	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1008}
1009
1010/**
1011 * i40e_alloc_vf_res
1012 * @vf: pointer to the VF info
1013 *
1014 * allocate VF resources
1015 **/
1016static int i40e_alloc_vf_res(struct i40e_vf *vf)
1017{
1018	struct i40e_pf *pf = vf->pf;
1019	int total_queue_pairs = 0;
1020	int ret, idx;
1021
1022	if (vf->num_req_queues &&
1023	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1024		pf->num_vf_qps = vf->num_req_queues;
1025	else
1026		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1027
1028	/* allocate hw vsi context & associated resources */
1029	ret = i40e_alloc_vsi_res(vf, 0);
1030	if (ret)
1031		goto error_alloc;
1032	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1033
1034	/* allocate additional VSIs based on tc information for ADq */
1035	if (vf->adq_enabled) {
1036		if (pf->queues_left >=
1037		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1038			/* TC 0 always belongs to VF VSI */
1039			for (idx = 1; idx < vf->num_tc; idx++) {
1040				ret = i40e_alloc_vsi_res(vf, idx);
1041				if (ret)
1042					goto error_alloc;
1043			}
1044			/* send correct number of queues */
1045			total_queue_pairs = I40E_MAX_VF_QUEUES;
1046		} else {
1047			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1048				 vf->vf_id);
1049			vf->adq_enabled = false;
1050		}
1051	}
1052
1053	/* We account for each VF to get a default number of queue pairs.  If
1054	 * the VF has now requested more, we need to account for that to make
1055	 * certain we never request more queues than we actually have left in
1056	 * HW.
1057	 */
1058	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1059		pf->queues_left -=
1060			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1061
1062	if (vf->trusted)
1063		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1064	else
1065		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1066
1067	/* store the total qps number for the runtime
1068	 * VF req validation
1069	 */
1070	vf->num_queue_pairs = total_queue_pairs;
1071
1072	/* VF is now completely initialized */
1073	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1074
1075error_alloc:
1076	if (ret)
1077		i40e_free_vf_res(vf);
1078
1079	return ret;
1080}
1081
1082#define VF_DEVICE_STATUS 0xAA
1083#define VF_TRANS_PENDING_MASK 0x20
1084/**
1085 * i40e_quiesce_vf_pci
1086 * @vf: pointer to the VF structure
1087 *
1088 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1089 * if the transactions never clear.
1090 **/
1091static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1092{
1093	struct i40e_pf *pf = vf->pf;
1094	struct i40e_hw *hw = &pf->hw;
1095	int vf_abs_id, i;
1096	u32 reg;
1097
1098	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1099
1100	wr32(hw, I40E_PF_PCI_CIAA,
1101	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1102	for (i = 0; i < 100; i++) {
1103		reg = rd32(hw, I40E_PF_PCI_CIAD);
1104		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1105			return 0;
1106		udelay(1);
1107	}
1108	return -EIO;
1109}
1110
1111/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112 * i40e_trigger_vf_reset
1113 * @vf: pointer to the VF structure
1114 * @flr: VFLR was issued or not
1115 *
1116 * Trigger hardware to start a reset for a particular VF. Expects the caller
1117 * to wait the proper amount of time to allow hardware to reset the VF before
1118 * it cleans up and restores VF functionality.
1119 **/
1120static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1121{
1122	struct i40e_pf *pf = vf->pf;
1123	struct i40e_hw *hw = &pf->hw;
1124	u32 reg, reg_idx, bit_idx;
 
 
1125
1126	/* warn the VF */
1127	clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1128
1129	/* Disable VF's configuration API during reset. The flag is re-enabled
1130	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1131	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1132	 * to do it earlier to give some time to finish to any VF config
1133	 * functions that may still be running at this point.
1134	 */
1135	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1136
1137	/* In the case of a VFLR, the HW has already reset the VF and we
1138	 * just need to clean up, so don't hit the VFRTRIG register.
1139	 */
1140	if (!flr) {
1141		/* reset VF using VPGEN_VFRTRIG reg */
 
 
 
 
 
 
 
 
 
 
 
 
1142		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1143		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1144		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1145		i40e_flush(hw);
1146	}
1147	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1148	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1149	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1150	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1151	i40e_flush(hw);
1152
1153	if (i40e_quiesce_vf_pci(vf))
1154		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1155			vf->vf_id);
1156}
1157
1158/**
1159 * i40e_cleanup_reset_vf
1160 * @vf: pointer to the VF structure
1161 *
1162 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1163 * have verified whether the reset is finished properly, and ensure the
1164 * minimum amount of wait time has passed.
1165 **/
1166static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1167{
1168	struct i40e_pf *pf = vf->pf;
1169	struct i40e_hw *hw = &pf->hw;
1170	u32 reg;
1171
 
 
 
1172	/* free VF resources to begin resetting the VSI state */
1173	i40e_free_vf_res(vf);
1174
1175	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1176	 * By doing this we allow HW to access VF memory at any point. If we
1177	 * did it any sooner, HW could access memory while it was being freed
1178	 * in i40e_free_vf_res(), causing an IOMMU fault.
1179	 *
1180	 * On the other hand, this needs to be done ASAP, because the VF driver
1181	 * is waiting for this to happen and may report a timeout. It's
1182	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1183	 * it.
1184	 */
1185	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1186	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1187	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1188
1189	/* reallocate VF resources to finish resetting the VSI state */
1190	if (!i40e_alloc_vf_res(vf)) {
1191		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1192		i40e_enable_vf_mappings(vf);
1193		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1194		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1195		/* Do not notify the client during VF init */
1196		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1197					&vf->vf_states))
1198			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1199		vf->num_vlan = 0;
1200	}
1201
1202	/* Tell the VF driver the reset is done. This needs to be done only
1203	 * after VF has been fully initialized, because the VF driver may
1204	 * request resources immediately after setting this flag.
1205	 */
1206	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1207}
1208
1209/**
1210 * i40e_reset_vf
1211 * @vf: pointer to the VF structure
1212 * @flr: VFLR was issued or not
1213 *
1214 * Returns true if the VF is reset, false otherwise.
 
1215 **/
1216bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1217{
1218	struct i40e_pf *pf = vf->pf;
1219	struct i40e_hw *hw = &pf->hw;
1220	bool rsd = false;
1221	u32 reg;
1222	int i;
1223
1224	/* If the VFs have been disabled, this means something else is
1225	 * resetting the VF, so we shouldn't continue.
1226	 */
1227	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1228		return false;
 
 
 
 
 
1229
1230	i40e_trigger_vf_reset(vf, flr);
1231
1232	/* poll VPGEN_VFRSTAT reg to make sure
1233	 * that reset is complete
1234	 */
1235	for (i = 0; i < 10; i++) {
1236		/* VF reset requires driver to first reset the VF and then
1237		 * poll the status register to make sure that the reset
1238		 * completed successfully. Due to internal HW FIFO flushes,
1239		 * we must wait 10ms before the register will be valid.
1240		 */
1241		usleep_range(10000, 20000);
1242		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1243		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1244			rsd = true;
1245			break;
1246		}
1247	}
1248
1249	if (flr)
1250		usleep_range(10000, 20000);
1251
1252	if (!rsd)
1253		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1254			vf->vf_id);
1255	usleep_range(10000, 20000);
1256
1257	/* On initial reset, we don't have any queues to disable */
1258	if (vf->lan_vsi_idx != 0)
1259		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1260
1261	i40e_cleanup_reset_vf(vf);
1262
1263	i40e_flush(hw);
1264	clear_bit(__I40E_VF_DISABLE, pf->state);
 
1265
1266	return true;
1267}
1268
1269/**
1270 * i40e_reset_all_vfs
1271 * @pf: pointer to the PF structure
1272 * @flr: VFLR was issued or not
1273 *
1274 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1275 * VF, then do all the waiting in one chunk, and finally finish restoring each
1276 * VF after the wait. This is useful during PF routines which need to reset
1277 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1278 *
1279 * Returns true if any VFs were reset, and false otherwise.
1280 **/
1281bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1282{
1283	struct i40e_hw *hw = &pf->hw;
1284	struct i40e_vf *vf;
1285	int i, v;
1286	u32 reg;
 
1287
1288	/* If we don't have any VFs, then there is nothing to reset */
1289	if (!pf->num_alloc_vfs)
1290		return false;
1291
1292	/* If VFs have been disabled, there is no need to reset */
1293	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1294		return false;
1295
1296	/* Begin reset on all VFs at once */
1297	for (v = 0; v < pf->num_alloc_vfs; v++)
1298		i40e_trigger_vf_reset(&pf->vf[v], flr);
 
 
 
1299
1300	/* HW requires some time to make sure it can flush the FIFO for a VF
1301	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1302	 * sequence to make sure that it has completed. We'll keep track of
1303	 * the VFs using a simple iterator that increments once that VF has
1304	 * finished resetting.
1305	 */
1306	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1307		usleep_range(10000, 20000);
1308
1309		/* Check each VF in sequence, beginning with the VF to fail
1310		 * the previous check.
1311		 */
1312		while (v < pf->num_alloc_vfs) {
1313			vf = &pf->vf[v];
1314			reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1315			if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1316				break;
 
1317
1318			/* If the current VF has finished resetting, move on
1319			 * to the next VF in sequence.
1320			 */
1321			v++;
1322		}
1323	}
1324
1325	if (flr)
1326		usleep_range(10000, 20000);
1327
1328	/* Display a warning if at least one VF didn't manage to reset in
1329	 * time, but continue on with the operation.
1330	 */
1331	if (v < pf->num_alloc_vfs)
1332		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1333			pf->vf[v].vf_id);
1334	usleep_range(10000, 20000);
1335
1336	/* Begin disabling all the rings associated with VFs, but do not wait
1337	 * between each VF.
1338	 */
1339	for (v = 0; v < pf->num_alloc_vfs; v++) {
1340		/* On initial reset, we don't have any queues to disable */
1341		if (pf->vf[v].lan_vsi_idx == 0)
1342			continue;
1343
1344		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
 
 
 
 
1345	}
1346
1347	/* Now that we've notified HW to disable all of the VF rings, wait
1348	 * until they finish.
1349	 */
1350	for (v = 0; v < pf->num_alloc_vfs; v++) {
1351		/* On initial reset, we don't have any queues to disable */
1352		if (pf->vf[v].lan_vsi_idx == 0)
 
 
 
 
1353			continue;
1354
1355		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1356	}
1357
1358	/* Hw may need up to 50ms to finish disabling the RX queues. We
1359	 * minimize the wait by delaying only once for all VFs.
1360	 */
1361	mdelay(50);
1362
1363	/* Finish the reset on each VF */
1364	for (v = 0; v < pf->num_alloc_vfs; v++)
1365		i40e_cleanup_reset_vf(&pf->vf[v]);
 
 
 
 
 
1366
1367	i40e_flush(hw);
 
1368	clear_bit(__I40E_VF_DISABLE, pf->state);
1369
1370	return true;
1371}
1372
1373/**
1374 * i40e_free_vfs
1375 * @pf: pointer to the PF structure
1376 *
1377 * free VF resources
1378 **/
1379void i40e_free_vfs(struct i40e_pf *pf)
1380{
1381	struct i40e_hw *hw = &pf->hw;
1382	u32 reg_idx, bit_idx;
1383	int i, tmp, vf_id;
1384
1385	if (!pf->vf)
1386		return;
 
 
1387	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1388		usleep_range(1000, 2000);
1389
1390	i40e_notify_client_of_vf_enable(pf, 0);
1391
 
 
 
 
 
 
 
 
 
1392	/* Amortize wait time by stopping all VFs at the same time */
1393	for (i = 0; i < pf->num_alloc_vfs; i++) {
1394		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1395			continue;
1396
1397		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1398	}
1399
1400	for (i = 0; i < pf->num_alloc_vfs; i++) {
1401		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1402			continue;
1403
1404		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1405	}
1406
1407	/* Disable IOV before freeing resources. This lets any VF drivers
1408	 * running in the host get themselves cleaned up before we yank
1409	 * the carpet out from underneath their feet.
1410	 */
1411	if (!pci_vfs_assigned(pf->pdev))
1412		pci_disable_sriov(pf->pdev);
1413	else
1414		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1415
1416	/* free up VF resources */
1417	tmp = pf->num_alloc_vfs;
1418	pf->num_alloc_vfs = 0;
1419	for (i = 0; i < tmp; i++) {
1420		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1421			i40e_free_vf_res(&pf->vf[i]);
1422		/* disable qp mappings */
1423		i40e_disable_vf_mappings(&pf->vf[i]);
1424	}
1425
1426	kfree(pf->vf);
1427	pf->vf = NULL;
1428
1429	/* This check is for when the driver is unloaded while VFs are
1430	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1431	 * before this function ever gets called.
1432	 */
1433	if (!pci_vfs_assigned(pf->pdev)) {
1434		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1435		 * work correctly when SR-IOV gets re-enabled.
1436		 */
1437		for (vf_id = 0; vf_id < tmp; vf_id++) {
1438			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1439			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1440			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1441		}
1442	}
1443	clear_bit(__I40E_VF_DISABLE, pf->state);
 
1444}
1445
1446#ifdef CONFIG_PCI_IOV
1447/**
1448 * i40e_alloc_vfs
1449 * @pf: pointer to the PF structure
1450 * @num_alloc_vfs: number of VFs to allocate
1451 *
1452 * allocate VF resources
1453 **/
1454int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1455{
1456	struct i40e_vf *vfs;
1457	int i, ret = 0;
1458
1459	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1460	i40e_irq_dynamic_disable_icr0(pf);
1461
1462	/* Check to see if we're just allocating resources for extant VFs */
1463	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1464		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1465		if (ret) {
1466			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1467			pf->num_alloc_vfs = 0;
1468			goto err_iov;
1469		}
1470	}
1471	/* allocate memory */
1472	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1473	if (!vfs) {
1474		ret = -ENOMEM;
1475		goto err_alloc;
1476	}
1477	pf->vf = vfs;
1478
1479	/* apply default profile */
1480	for (i = 0; i < num_alloc_vfs; i++) {
1481		vfs[i].pf = pf;
1482		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1483		vfs[i].vf_id = i;
1484
1485		/* assign default capabilities */
1486		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1487		vfs[i].spoofchk = true;
1488
1489		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1490
1491	}
1492	pf->num_alloc_vfs = num_alloc_vfs;
1493
1494	/* VF resources get allocated during reset */
1495	i40e_reset_all_vfs(pf, false);
1496
1497	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1498
1499err_alloc:
1500	if (ret)
1501		i40e_free_vfs(pf);
1502err_iov:
1503	/* Re-enable interrupt 0. */
1504	i40e_irq_dynamic_enable_icr0(pf);
1505	return ret;
1506}
1507
1508#endif
1509/**
1510 * i40e_pci_sriov_enable
1511 * @pdev: pointer to a pci_dev structure
1512 * @num_vfs: number of VFs to allocate
1513 *
1514 * Enable or change the number of VFs
1515 **/
1516static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1517{
1518#ifdef CONFIG_PCI_IOV
1519	struct i40e_pf *pf = pci_get_drvdata(pdev);
1520	int pre_existing_vfs = pci_num_vf(pdev);
1521	int err = 0;
1522
1523	if (test_bit(__I40E_TESTING, pf->state)) {
1524		dev_warn(&pdev->dev,
1525			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1526		err = -EPERM;
1527		goto err_out;
1528	}
1529
1530	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1531		i40e_free_vfs(pf);
1532	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1533		goto out;
1534
1535	if (num_vfs > pf->num_req_vfs) {
1536		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1537			 num_vfs, pf->num_req_vfs);
1538		err = -EPERM;
1539		goto err_out;
1540	}
1541
1542	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1543	err = i40e_alloc_vfs(pf, num_vfs);
1544	if (err) {
1545		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1546		goto err_out;
1547	}
1548
1549out:
1550	return num_vfs;
1551
1552err_out:
1553	return err;
1554#endif
1555	return 0;
1556}
1557
1558/**
1559 * i40e_pci_sriov_configure
1560 * @pdev: pointer to a pci_dev structure
1561 * @num_vfs: number of VFs to allocate
1562 *
1563 * Enable or change the number of VFs. Called when the user updates the number
1564 * of VFs in sysfs.
1565 **/
1566int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1567{
1568	struct i40e_pf *pf = pci_get_drvdata(pdev);
 
 
 
 
 
 
1569
1570	if (num_vfs) {
1571		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1572			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1573			i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1574		}
1575		return i40e_pci_sriov_enable(pdev, num_vfs);
 
1576	}
1577
1578	if (!pci_vfs_assigned(pf->pdev)) {
1579		i40e_free_vfs(pf);
1580		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1581		i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1582	} else {
1583		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1584		return -EINVAL;
 
1585	}
1586	return 0;
 
 
1587}
1588
1589/***********************virtual channel routines******************/
1590
1591/**
1592 * i40e_vc_send_msg_to_vf
1593 * @vf: pointer to the VF info
1594 * @v_opcode: virtual channel opcode
1595 * @v_retval: virtual channel return value
1596 * @msg: pointer to the msg buffer
1597 * @msglen: msg length
1598 *
1599 * send msg to VF
1600 **/
1601static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1602				  u32 v_retval, u8 *msg, u16 msglen)
1603{
1604	struct i40e_pf *pf;
1605	struct i40e_hw *hw;
1606	int abs_vf_id;
1607	i40e_status aq_ret;
1608
1609	/* validate the request */
1610	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1611		return -EINVAL;
1612
1613	pf = vf->pf;
1614	hw = &pf->hw;
1615	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1616
1617	/* single place to detect unsuccessful return values */
1618	if (v_retval) {
1619		vf->num_invalid_msgs++;
1620		dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1621			 vf->vf_id, v_opcode, v_retval);
1622		if (vf->num_invalid_msgs >
1623		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1624			dev_err(&pf->pdev->dev,
1625				"Number of invalid messages exceeded for VF %d\n",
1626				vf->vf_id);
1627			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1628			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1629		}
1630	} else {
1631		vf->num_valid_msgs++;
1632		/* reset the invalid counter, if a valid message is received. */
1633		vf->num_invalid_msgs = 0;
1634	}
1635
1636	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1637					msg, msglen, NULL);
1638	if (aq_ret) {
1639		dev_info(&pf->pdev->dev,
1640			 "Unable to send the message to VF %d aq_err %d\n",
1641			 vf->vf_id, pf->hw.aq.asq_last_status);
1642		return -EIO;
1643	}
1644
1645	return 0;
1646}
1647
1648/**
1649 * i40e_vc_send_resp_to_vf
1650 * @vf: pointer to the VF info
1651 * @opcode: operation code
1652 * @retval: return value
1653 *
1654 * send resp msg to VF
1655 **/
1656static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1657				   enum virtchnl_ops opcode,
1658				   i40e_status retval)
1659{
1660	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1661}
1662
1663/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1664 * i40e_vc_get_version_msg
1665 * @vf: pointer to the VF info
 
1666 *
1667 * called from the VF to request the API version used by the PF
1668 **/
1669static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1670{
1671	struct virtchnl_version_info info = {
1672		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1673	};
1674
1675	vf->vf_ver = *(struct virtchnl_version_info *)msg;
1676	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1677	if (VF_IS_V10(&vf->vf_ver))
1678		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1679	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1680				      I40E_SUCCESS, (u8 *)&info,
1681				      sizeof(struct virtchnl_version_info));
1682}
1683
1684/**
1685 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1686 * @vf: pointer to VF structure
1687 **/
1688static void i40e_del_qch(struct i40e_vf *vf)
1689{
1690	struct i40e_pf *pf = vf->pf;
1691	int i;
1692
1693	/* first element in the array belongs to primary VF VSI and we shouldn't
1694	 * delete it. We should however delete the rest of the VSIs created
1695	 */
1696	for (i = 1; i < vf->num_tc; i++) {
1697		if (vf->ch[i].vsi_idx) {
1698			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1699			vf->ch[i].vsi_idx = 0;
1700			vf->ch[i].vsi_id = 0;
1701		}
1702	}
1703}
1704
1705/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1706 * i40e_vc_get_vf_resources_msg
1707 * @vf: pointer to the VF info
1708 * @msg: pointer to the msg buffer
1709 * @msglen: msg length
1710 *
1711 * called from the VF to request its resources
1712 **/
1713static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1714{
1715	struct virtchnl_vf_resource *vfres = NULL;
1716	struct i40e_pf *pf = vf->pf;
1717	i40e_status aq_ret = 0;
1718	struct i40e_vsi *vsi;
1719	int num_vsis = 1;
1720	int len = 0;
 
1721	int ret;
1722
1723	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1724		aq_ret = I40E_ERR_PARAM;
1725		goto err;
1726	}
1727
1728	len = (sizeof(struct virtchnl_vf_resource) +
1729	       sizeof(struct virtchnl_vsi_resource) * num_vsis);
1730
1731	vfres = kzalloc(len, GFP_KERNEL);
1732	if (!vfres) {
1733		aq_ret = I40E_ERR_NO_MEMORY;
1734		len = 0;
1735		goto err;
1736	}
1737	if (VF_IS_V11(&vf->vf_ver))
1738		vf->driver_caps = *(u32 *)msg;
1739	else
1740		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1741				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1742				  VIRTCHNL_VF_OFFLOAD_VLAN;
1743
1744	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
 
1745	vsi = pf->vsi[vf->lan_vsi_idx];
1746	if (!vsi->info.pvid)
1747		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1748
1749	if (i40e_vf_client_capable(pf, vf->vf_id) &&
1750	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1751		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1752		set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1753	} else {
1754		clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1755	}
1756
1757	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1758		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1759	} else {
1760		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1761		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1762			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1763		else
1764			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1765	}
1766
1767	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1768		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1769			vfres->vf_cap_flags |=
1770				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1771	}
1772
1773	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1774		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1775
1776	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1777	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1778		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1779
1780	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1781		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1782			dev_err(&pf->pdev->dev,
1783				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1784				 vf->vf_id);
1785			aq_ret = I40E_ERR_PARAM;
1786			goto err;
1787		}
1788		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1789	}
1790
1791	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1792		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1793			vfres->vf_cap_flags |=
1794					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1795	}
1796
1797	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1798		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1799
1800	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1801		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1802
1803	vfres->num_vsis = num_vsis;
1804	vfres->num_queue_pairs = vf->num_queue_pairs;
1805	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1806	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1807	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
 
1808
1809	if (vf->lan_vsi_idx) {
1810		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1811		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1812		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1813		/* VFs only use TC 0 */
1814		vfres->vsi_res[0].qset_handle
1815					  = le16_to_cpu(vsi->info.qs_handle[0]);
 
 
 
 
1816		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1817				vf->default_lan_addr.addr);
1818	}
1819	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1820
1821err:
1822	/* send the response back to the VF */
1823	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1824				     aq_ret, (u8 *)vfres, len);
1825
1826	kfree(vfres);
1827	return ret;
1828}
1829
1830/**
1831 * i40e_vc_reset_vf_msg
1832 * @vf: pointer to the VF info
1833 * @msg: pointer to the msg buffer
1834 * @msglen: msg length
1835 *
1836 * called from the VF to reset itself,
1837 * unlike other virtchnl messages, PF driver
1838 * doesn't send the response back to the VF
1839 **/
1840static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1841{
1842	if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1843		i40e_reset_vf(vf, false);
1844}
1845
1846/**
1847 * i40e_getnum_vf_vsi_vlan_filters
1848 * @vsi: pointer to the vsi
1849 *
1850 * called to get the number of VLANs offloaded on this VF
1851 **/
1852static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1853{
1854	struct i40e_mac_filter *f;
1855	int num_vlans = 0, bkt;
1856
1857	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1858		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1859			num_vlans++;
1860	}
1861
1862	return num_vlans;
1863}
1864
1865/**
1866 * i40e_vc_config_promiscuous_mode_msg
1867 * @vf: pointer to the VF info
1868 * @msg: pointer to the msg buffer
1869 * @msglen: msg length
1870 *
1871 * called from the VF to configure the promiscuous mode of
1872 * VF vsis
1873 **/
1874static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1875					       u8 *msg, u16 msglen)
1876{
1877	struct virtchnl_promisc_info *info =
1878	    (struct virtchnl_promisc_info *)msg;
1879	struct i40e_pf *pf = vf->pf;
1880	struct i40e_hw *hw = &pf->hw;
1881	struct i40e_mac_filter *f;
1882	i40e_status aq_ret = 0;
1883	bool allmulti = false;
1884	struct i40e_vsi *vsi;
1885	bool alluni = false;
1886	int aq_err = 0;
1887	int bkt;
1888
1889	vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1890	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1891	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1892	    !vsi) {
1893		aq_ret = I40E_ERR_PARAM;
1894		goto error_param;
1895	}
1896	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1897		dev_err(&pf->pdev->dev,
1898			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
1899			vf->vf_id);
1900		/* Lie to the VF on purpose. */
 
 
 
1901		aq_ret = 0;
1902		goto error_param;
 
 
 
 
 
 
 
 
 
 
1903	}
 
1904	/* Multicast promiscuous handling*/
1905	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1906		allmulti = true;
1907
1908	if (vf->port_vlan_id) {
1909		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1910							    allmulti,
1911							    vf->port_vlan_id,
1912							    NULL);
1913	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1914		hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1915			if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1916				continue;
1917			aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1918								    vsi->seid,
1919								    allmulti,
1920								    f->vlan,
1921								    NULL);
1922			aq_err = pf->hw.aq.asq_last_status;
1923			if (aq_ret) {
1924				dev_err(&pf->pdev->dev,
1925					"Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1926					f->vlan,
1927					i40e_stat_str(&pf->hw, aq_ret),
1928					i40e_aq_str(&pf->hw, aq_err));
1929				break;
1930			}
1931		}
1932	} else {
1933		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1934							       allmulti, NULL);
1935		aq_err = pf->hw.aq.asq_last_status;
1936		if (aq_ret) {
1937			dev_err(&pf->pdev->dev,
1938				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1939				vf->vf_id,
1940				i40e_stat_str(&pf->hw, aq_ret),
1941				i40e_aq_str(&pf->hw, aq_err));
1942			goto error_param;
1943		}
1944	}
1945
1946	if (!aq_ret) {
 
 
 
 
 
 
 
1947		dev_info(&pf->pdev->dev,
1948			 "VF %d successfully set multicast promiscuous mode\n",
1949			 vf->vf_id);
1950		if (allmulti)
1951			set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1952		else
1953			clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1954	}
1955
1956	if (info->flags & FLAG_VF_UNICAST_PROMISC)
1957		alluni = true;
1958	if (vf->port_vlan_id) {
1959		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1960							    alluni,
1961							    vf->port_vlan_id,
1962							    NULL);
1963	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1964		hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1965			if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1966				continue;
1967			aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1968								    vsi->seid,
1969								    alluni,
1970								    f->vlan,
1971								    NULL);
1972			aq_err = pf->hw.aq.asq_last_status;
1973			if (aq_ret)
1974				dev_err(&pf->pdev->dev,
1975					"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1976					f->vlan,
1977					i40e_stat_str(&pf->hw, aq_ret),
1978					i40e_aq_str(&pf->hw, aq_err));
1979		}
1980	} else {
1981		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1982							     alluni, NULL,
1983							     true);
1984		aq_err = pf->hw.aq.asq_last_status;
1985		if (aq_ret) {
1986			dev_err(&pf->pdev->dev,
1987				"VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1988				vf->vf_id, info->flags,
1989				i40e_stat_str(&pf->hw, aq_ret),
1990				i40e_aq_str(&pf->hw, aq_err));
1991			goto error_param;
1992		}
1993	}
1994
1995	if (!aq_ret) {
1996		dev_info(&pf->pdev->dev,
1997			 "VF %d successfully set unicast promiscuous mode\n",
1998			 vf->vf_id);
1999		if (alluni)
2000			set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2001		else
2002			clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2003	}
2004
2005error_param:
2006	/* send the response to the VF */
2007	return i40e_vc_send_resp_to_vf(vf,
2008				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2009				       aq_ret);
2010}
2011
2012/**
2013 * i40e_vc_config_queues_msg
2014 * @vf: pointer to the VF info
2015 * @msg: pointer to the msg buffer
2016 * @msglen: msg length
2017 *
2018 * called from the VF to configure the rx/tx
2019 * queues
2020 **/
2021static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2022{
2023	struct virtchnl_vsi_queue_config_info *qci =
2024	    (struct virtchnl_vsi_queue_config_info *)msg;
2025	struct virtchnl_queue_pair_info *qpi;
 
2026	struct i40e_pf *pf = vf->pf;
2027	u16 vsi_id, vsi_queue_id = 0;
2028	i40e_status aq_ret = 0;
2029	int i, j = 0, idx = 0;
 
 
 
2030
2031	vsi_id = qci->vsi_id;
 
 
 
2032
2033	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2034		aq_ret = I40E_ERR_PARAM;
2035		goto error_param;
2036	}
2037
2038	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2039		aq_ret = I40E_ERR_PARAM;
2040		goto error_param;
2041	}
2042
 
 
 
 
 
 
 
 
 
 
 
2043	for (i = 0; i < qci->num_queue_pairs; i++) {
2044		qpi = &qci->qpair[i];
2045
2046		if (!vf->adq_enabled) {
 
 
 
 
 
 
2047			vsi_queue_id = qpi->txq.queue_id;
2048
2049			if (qpi->txq.vsi_id != qci->vsi_id ||
2050			    qpi->rxq.vsi_id != qci->vsi_id ||
2051			    qpi->rxq.queue_id != vsi_queue_id) {
2052				aq_ret = I40E_ERR_PARAM;
2053				goto error_param;
2054			}
2055		}
2056
2057		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
2058			aq_ret = I40E_ERR_PARAM;
2059			goto error_param;
 
 
 
2060		}
2061
2062		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2063					     &qpi->rxq) ||
2064		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2065					     &qpi->txq)) {
2066			aq_ret = I40E_ERR_PARAM;
2067			goto error_param;
2068		}
2069
2070		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2071		 * VF does not know about these additional VSIs and all
2072		 * it cares is about its own queues. PF configures these queues
2073		 * to its appropriate VSIs based on TC mapping
2074		 **/
2075		if (vf->adq_enabled) {
 
 
 
 
2076			if (j == (vf->ch[idx].num_qps - 1)) {
2077				idx++;
2078				j = 0; /* resetting the queue count */
2079				vsi_queue_id = 0;
2080			} else {
2081				j++;
2082				vsi_queue_id++;
2083			}
2084			vsi_id = vf->ch[idx].vsi_id;
2085		}
2086	}
2087	/* set vsi num_queue_pairs in use to num configured by VF */
2088	if (!vf->adq_enabled) {
2089		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2090			qci->num_queue_pairs;
2091	} else {
2092		for (i = 0; i < vf->num_tc; i++)
2093			pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2094			       vf->ch[i].num_qps;
 
 
 
 
 
 
2095	}
2096
2097error_param:
2098	/* send the response to the VF */
2099	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2100				       aq_ret);
2101}
2102
2103/**
2104 * i40e_validate_queue_map
 
2105 * @vsi_id: vsi id
2106 * @queuemap: Tx or Rx queue map
2107 *
2108 * check if Tx or Rx queue map is valid
2109 **/
2110static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2111				   unsigned long queuemap)
2112{
2113	u16 vsi_queue_id, queue_id;
2114
2115	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2116		if (vf->adq_enabled) {
2117			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2118			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2119		} else {
2120			queue_id = vsi_queue_id;
2121		}
2122
2123		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2124			return -EINVAL;
2125	}
2126
2127	return 0;
2128}
2129
2130/**
2131 * i40e_vc_config_irq_map_msg
2132 * @vf: pointer to the VF info
2133 * @msg: pointer to the msg buffer
2134 * @msglen: msg length
2135 *
2136 * called from the VF to configure the irq to
2137 * queue map
2138 **/
2139static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2140{
2141	struct virtchnl_irq_map_info *irqmap_info =
2142	    (struct virtchnl_irq_map_info *)msg;
2143	struct virtchnl_vector_map *map;
2144	u16 vsi_id, vector_id;
2145	i40e_status aq_ret = 0;
2146	int i;
2147
2148	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2149		aq_ret = I40E_ERR_PARAM;
 
 
 
 
 
 
2150		goto error_param;
2151	}
2152
2153	for (i = 0; i < irqmap_info->num_vectors; i++) {
2154		map = &irqmap_info->vecmap[i];
2155		vector_id = map->vector_id;
2156		vsi_id = map->vsi_id;
2157		/* validate msg params */
2158		if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
2159		    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2160			aq_ret = I40E_ERR_PARAM;
2161			goto error_param;
2162		}
 
2163
2164		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2165			aq_ret = I40E_ERR_PARAM;
2166			goto error_param;
2167		}
2168
2169		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2170			aq_ret = I40E_ERR_PARAM;
2171			goto error_param;
2172		}
2173
2174		i40e_config_irq_link_list(vf, vsi_id, map);
2175	}
2176error_param:
2177	/* send the response to the VF */
2178	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2179				       aq_ret);
2180}
2181
2182/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2183 * i40e_vc_enable_queues_msg
2184 * @vf: pointer to the VF info
2185 * @msg: pointer to the msg buffer
2186 * @msglen: msg length
2187 *
2188 * called from the VF to enable all or specific queue(s)
2189 **/
2190static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2191{
2192	struct virtchnl_queue_select *vqs =
2193	    (struct virtchnl_queue_select *)msg;
2194	struct i40e_pf *pf = vf->pf;
2195	u16 vsi_id = vqs->vsi_id;
2196	i40e_status aq_ret = 0;
2197	int i;
2198
 
 
 
 
 
 
 
 
2199	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2200		aq_ret = I40E_ERR_PARAM;
2201		goto error_param;
2202	}
2203
2204	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2205		aq_ret = I40E_ERR_PARAM;
2206		goto error_param;
2207	}
2208
2209	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2210		aq_ret = I40E_ERR_PARAM;
2211		goto error_param;
2212	}
2213
2214	if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
2215		aq_ret = I40E_ERR_TIMEOUT;
 
 
 
 
 
 
 
 
 
2216
2217	/* need to start the rings for additional ADq VSI's as well */
2218	if (vf->adq_enabled) {
2219		/* zero belongs to LAN VSI */
2220		for (i = 1; i < vf->num_tc; i++) {
2221			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2222				aq_ret = I40E_ERR_TIMEOUT;
2223		}
2224	}
2225
2226error_param:
2227	/* send the response to the VF */
2228	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2229				       aq_ret);
2230}
2231
2232/**
2233 * i40e_vc_disable_queues_msg
2234 * @vf: pointer to the VF info
2235 * @msg: pointer to the msg buffer
2236 * @msglen: msg length
2237 *
2238 * called from the VF to disable all or specific
2239 * queue(s)
2240 **/
2241static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2242{
2243	struct virtchnl_queue_select *vqs =
2244	    (struct virtchnl_queue_select *)msg;
2245	struct i40e_pf *pf = vf->pf;
2246	i40e_status aq_ret = 0;
2247
2248	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2249		aq_ret = I40E_ERR_PARAM;
2250		goto error_param;
2251	}
2252
2253	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2254		aq_ret = I40E_ERR_PARAM;
2255		goto error_param;
2256	}
2257
2258	if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2259		aq_ret = I40E_ERR_PARAM;
2260		goto error_param;
2261	}
2262
2263	i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
2264
 
 
 
 
 
 
 
 
 
2265error_param:
2266	/* send the response to the VF */
2267	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2268				       aq_ret);
2269}
2270
2271/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2272 * i40e_vc_request_queues_msg
2273 * @vf: pointer to the VF info
2274 * @msg: pointer to the msg buffer
2275 * @msglen: msg length
2276 *
2277 * VFs get a default number of queues but can use this message to request a
2278 * different number.  If the request is successful, PF will reset the VF and
2279 * return 0.  If unsuccessful, PF will send message informing VF of number of
2280 * available queues and return result of sending VF a message.
2281 **/
2282static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2283{
2284	struct virtchnl_vf_res_request *vfres =
2285		(struct virtchnl_vf_res_request *)msg;
2286	int req_pairs = vfres->num_queue_pairs;
2287	int cur_pairs = vf->num_queue_pairs;
2288	struct i40e_pf *pf = vf->pf;
2289
2290	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2291		return -EINVAL;
2292
2293	if (req_pairs <= 0) {
2294		dev_err(&pf->pdev->dev,
2295			"VF %d tried to request %d queues.  Ignoring.\n",
2296			vf->vf_id, req_pairs);
2297	} else if (req_pairs > I40E_MAX_VF_QUEUES) {
2298		dev_err(&pf->pdev->dev,
2299			"VF %d tried to request more than %d queues.\n",
2300			vf->vf_id,
2301			I40E_MAX_VF_QUEUES);
2302		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2303	} else if (req_pairs - cur_pairs > pf->queues_left) {
2304		dev_warn(&pf->pdev->dev,
2305			 "VF %d requested %d more queues, but only %d left.\n",
2306			 vf->vf_id,
2307			 req_pairs - cur_pairs,
2308			 pf->queues_left);
2309		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
 
 
 
 
 
 
2310	} else {
2311		/* successful request */
2312		vf->num_req_queues = req_pairs;
2313		i40e_vc_notify_vf_reset(vf);
2314		i40e_reset_vf(vf, false);
2315		return 0;
2316	}
2317
2318	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2319				      (u8 *)vfres, sizeof(*vfres));
2320}
2321
2322/**
2323 * i40e_vc_get_stats_msg
2324 * @vf: pointer to the VF info
2325 * @msg: pointer to the msg buffer
2326 * @msglen: msg length
2327 *
2328 * called from the VF to get vsi stats
2329 **/
2330static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2331{
2332	struct virtchnl_queue_select *vqs =
2333	    (struct virtchnl_queue_select *)msg;
2334	struct i40e_pf *pf = vf->pf;
2335	struct i40e_eth_stats stats;
2336	i40e_status aq_ret = 0;
2337	struct i40e_vsi *vsi;
2338
2339	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2340
2341	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2342		aq_ret = I40E_ERR_PARAM;
2343		goto error_param;
2344	}
2345
2346	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2347		aq_ret = I40E_ERR_PARAM;
2348		goto error_param;
2349	}
2350
2351	vsi = pf->vsi[vf->lan_vsi_idx];
2352	if (!vsi) {
2353		aq_ret = I40E_ERR_PARAM;
2354		goto error_param;
2355	}
2356	i40e_update_eth_stats(vsi);
2357	stats = vsi->eth_stats;
2358
2359error_param:
2360	/* send the response back to the VF */
2361	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2362				      (u8 *)&stats, sizeof(stats));
2363}
2364
2365/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
2366#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
2367#define I40E_VC_MAX_VLAN_PER_VF 8
 
 
 
 
 
 
 
 
 
 
 
2368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2369/**
2370 * i40e_check_vf_permission
2371 * @vf: pointer to the VF info
2372 * @al: MAC address list from virtchnl
2373 *
2374 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2375 * if any address in the list is not valid. Checks the following conditions:
2376 *
2377 * 1) broadcast and zero addresses are never valid
2378 * 2) unicast addresses are not allowed if the VMM has administratively set
2379 *    the VF MAC address, unless the VF is marked as privileged.
2380 * 3) There is enough space to add all the addresses.
2381 *
2382 * Note that to guarantee consistency, it is expected this function be called
2383 * while holding the mac_filter_hash_lock, as otherwise the current number of
2384 * addresses might not be accurate.
2385 **/
2386static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2387					   struct virtchnl_ether_addr_list *al)
2388{
2389	struct i40e_pf *pf = vf->pf;
 
 
 
2390	int i;
2391
2392	/* If this VF is not privileged, then we can't add more than a limited
2393	 * number of addresses. Check to make sure that the additions do not
2394	 * push us over the limit.
2395	 */
2396	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2397	    (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2398		dev_err(&pf->pdev->dev,
2399			"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2400		return -EPERM;
2401	}
2402
2403	for (i = 0; i < al->num_elements; i++) {
 
2404		u8 *addr = al->list[i].addr;
2405
2406		if (is_broadcast_ether_addr(addr) ||
2407		    is_zero_ether_addr(addr)) {
2408			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2409				addr);
2410			return I40E_ERR_INVALID_MAC_ADDR;
2411		}
2412
2413		/* If the host VMM administrator has set the VF MAC address
2414		 * administratively via the ndo_set_vf_mac command then deny
2415		 * permission to the VF to add or delete unicast MAC addresses.
2416		 * Unless the VF is privileged and then it can do whatever.
2417		 * The VF may request to set the MAC address filter already
2418		 * assigned to it so do not return an error in that case.
2419		 */
2420		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2421		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2422		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2423			dev_err(&pf->pdev->dev,
2424				"VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2425			return -EPERM;
2426		}
 
 
 
 
 
2427	}
2428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2429	return 0;
2430}
2431
2432/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2433 * i40e_vc_add_mac_addr_msg
2434 * @vf: pointer to the VF info
2435 * @msg: pointer to the msg buffer
2436 * @msglen: msg length
2437 *
2438 * add guest mac address filter
2439 **/
2440static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2441{
2442	struct virtchnl_ether_addr_list *al =
2443	    (struct virtchnl_ether_addr_list *)msg;
2444	struct i40e_pf *pf = vf->pf;
2445	struct i40e_vsi *vsi = NULL;
2446	u16 vsi_id = al->vsi_id;
2447	i40e_status ret = 0;
2448	int i;
2449
2450	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2451	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2452		ret = I40E_ERR_PARAM;
2453		goto error_param;
2454	}
2455
2456	vsi = pf->vsi[vf->lan_vsi_idx];
2457
2458	/* Lock once, because all function inside for loop accesses VSI's
2459	 * MAC filter list which needs to be protected using same lock.
2460	 */
2461	spin_lock_bh(&vsi->mac_filter_hash_lock);
2462
2463	ret = i40e_check_vf_permission(vf, al);
2464	if (ret) {
2465		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2466		goto error_param;
2467	}
2468
2469	/* add new addresses to the list */
2470	for (i = 0; i < al->num_elements; i++) {
2471		struct i40e_mac_filter *f;
2472
2473		f = i40e_find_mac(vsi, al->list[i].addr);
2474		if (!f) {
2475			f = i40e_add_mac_filter(vsi, al->list[i].addr);
2476
2477			if (!f) {
2478				dev_err(&pf->pdev->dev,
2479					"Unable to add MAC filter %pM for VF %d\n",
2480					al->list[i].addr, vf->vf_id);
2481				ret = I40E_ERR_PARAM;
2482				spin_unlock_bh(&vsi->mac_filter_hash_lock);
2483				goto error_param;
2484			} else {
2485				vf->num_mac++;
2486			}
2487		}
 
2488	}
2489	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2490
2491	/* program the updated filter list */
2492	ret = i40e_sync_vsi_filters(vsi);
2493	if (ret)
2494		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2495			vf->vf_id, ret);
2496
2497error_param:
2498	/* send the response to the VF */
2499	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2500				       ret);
2501}
2502
2503/**
2504 * i40e_vc_del_mac_addr_msg
2505 * @vf: pointer to the VF info
2506 * @msg: pointer to the msg buffer
2507 * @msglen: msg length
2508 *
2509 * remove guest mac address filter
2510 **/
2511static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2512{
2513	struct virtchnl_ether_addr_list *al =
2514	    (struct virtchnl_ether_addr_list *)msg;
 
2515	struct i40e_pf *pf = vf->pf;
2516	struct i40e_vsi *vsi = NULL;
2517	u16 vsi_id = al->vsi_id;
2518	i40e_status ret = 0;
2519	int i;
2520
2521	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2522	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2523		ret = I40E_ERR_PARAM;
2524		goto error_param;
2525	}
2526
2527	for (i = 0; i < al->num_elements; i++) {
2528		if (is_broadcast_ether_addr(al->list[i].addr) ||
2529		    is_zero_ether_addr(al->list[i].addr)) {
2530			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2531				al->list[i].addr, vf->vf_id);
2532			ret = I40E_ERR_INVALID_MAC_ADDR;
2533			goto error_param;
2534		}
2535	}
2536	vsi = pf->vsi[vf->lan_vsi_idx];
2537
2538	spin_lock_bh(&vsi->mac_filter_hash_lock);
2539	/* delete addresses from the list */
2540	for (i = 0; i < al->num_elements; i++)
 
 
 
 
 
 
 
 
 
 
 
 
2541		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2542			ret = I40E_ERR_INVALID_MAC_ADDR;
2543			spin_unlock_bh(&vsi->mac_filter_hash_lock);
2544			goto error_param;
2545		} else {
2546			vf->num_mac--;
2547		}
 
2548
2549	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2550
 
 
 
2551	/* program the updated filter list */
2552	ret = i40e_sync_vsi_filters(vsi);
2553	if (ret)
2554		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2555			vf->vf_id, ret);
2556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2557error_param:
2558	/* send the response to the VF */
2559	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2560				       ret);
2561}
2562
2563/**
2564 * i40e_vc_add_vlan_msg
2565 * @vf: pointer to the VF info
2566 * @msg: pointer to the msg buffer
2567 * @msglen: msg length
2568 *
2569 * program guest vlan id
2570 **/
2571static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2572{
2573	struct virtchnl_vlan_filter_list *vfl =
2574	    (struct virtchnl_vlan_filter_list *)msg;
2575	struct i40e_pf *pf = vf->pf;
2576	struct i40e_vsi *vsi = NULL;
2577	u16 vsi_id = vfl->vsi_id;
2578	i40e_status aq_ret = 0;
2579	int i;
2580
2581	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2582	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2583		dev_err(&pf->pdev->dev,
2584			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2585		goto error_param;
2586	}
2587	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2588	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2589		aq_ret = I40E_ERR_PARAM;
2590		goto error_param;
2591	}
2592
2593	for (i = 0; i < vfl->num_elements; i++) {
2594		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2595			aq_ret = I40E_ERR_PARAM;
2596			dev_err(&pf->pdev->dev,
2597				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2598			goto error_param;
2599		}
2600	}
2601	vsi = pf->vsi[vf->lan_vsi_idx];
2602	if (vsi->info.pvid) {
2603		aq_ret = I40E_ERR_PARAM;
2604		goto error_param;
2605	}
2606
2607	i40e_vlan_stripping_enable(vsi);
2608	for (i = 0; i < vfl->num_elements; i++) {
2609		/* add new VLAN filter */
2610		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2611		if (!ret)
2612			vf->num_vlan++;
2613
2614		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2615			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2616							   true,
2617							   vfl->vlan_id[i],
2618							   NULL);
2619		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2620			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2621							   true,
2622							   vfl->vlan_id[i],
2623							   NULL);
2624
2625		if (ret)
2626			dev_err(&pf->pdev->dev,
2627				"Unable to add VLAN filter %d for VF %d, error %d\n",
2628				vfl->vlan_id[i], vf->vf_id, ret);
2629	}
2630
2631error_param:
2632	/* send the response to the VF */
2633	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2634}
2635
2636/**
2637 * i40e_vc_remove_vlan_msg
2638 * @vf: pointer to the VF info
2639 * @msg: pointer to the msg buffer
2640 * @msglen: msg length
2641 *
2642 * remove programmed guest vlan id
2643 **/
2644static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2645{
2646	struct virtchnl_vlan_filter_list *vfl =
2647	    (struct virtchnl_vlan_filter_list *)msg;
2648	struct i40e_pf *pf = vf->pf;
2649	struct i40e_vsi *vsi = NULL;
2650	u16 vsi_id = vfl->vsi_id;
2651	i40e_status aq_ret = 0;
2652	int i;
2653
2654	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2655	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2656		aq_ret = I40E_ERR_PARAM;
2657		goto error_param;
2658	}
2659
2660	for (i = 0; i < vfl->num_elements; i++) {
2661		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2662			aq_ret = I40E_ERR_PARAM;
2663			goto error_param;
2664		}
2665	}
2666
2667	vsi = pf->vsi[vf->lan_vsi_idx];
2668	if (vsi->info.pvid) {
2669		aq_ret = I40E_ERR_PARAM;
 
2670		goto error_param;
2671	}
2672
2673	for (i = 0; i < vfl->num_elements; i++) {
2674		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2675		vf->num_vlan--;
2676
2677		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2678			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2679							   false,
2680							   vfl->vlan_id[i],
2681							   NULL);
2682		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2683			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2684							   false,
2685							   vfl->vlan_id[i],
2686							   NULL);
2687	}
2688
2689error_param:
2690	/* send the response to the VF */
2691	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2692}
2693
2694/**
2695 * i40e_vc_iwarp_msg
2696 * @vf: pointer to the VF info
2697 * @msg: pointer to the msg buffer
2698 * @msglen: msg length
2699 *
2700 * called from the VF for the iwarp msgs
2701 **/
2702static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2703{
2704	struct i40e_pf *pf = vf->pf;
2705	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2706	i40e_status aq_ret = 0;
2707
2708	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2709	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2710		aq_ret = I40E_ERR_PARAM;
2711		goto error_param;
2712	}
2713
2714	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2715				     msg, msglen);
2716
2717error_param:
2718	/* send the response to the VF */
2719	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2720				       aq_ret);
2721}
2722
2723/**
2724 * i40e_vc_iwarp_qvmap_msg
2725 * @vf: pointer to the VF info
2726 * @msg: pointer to the msg buffer
2727 * @msglen: msg length
2728 * @config: config qvmap or release it
2729 *
2730 * called from the VF for the iwarp msgs
2731 **/
2732static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2733				   bool config)
2734{
2735	struct virtchnl_iwarp_qvlist_info *qvlist_info =
2736				(struct virtchnl_iwarp_qvlist_info *)msg;
2737	i40e_status aq_ret = 0;
2738
2739	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2740	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2741		aq_ret = I40E_ERR_PARAM;
2742		goto error_param;
2743	}
2744
2745	if (config) {
2746		if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2747			aq_ret = I40E_ERR_PARAM;
2748	} else {
2749		i40e_release_iwarp_qvlist(vf);
2750	}
2751
2752error_param:
2753	/* send the response to the VF */
2754	return i40e_vc_send_resp_to_vf(vf,
2755			       config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2756			       VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2757			       aq_ret);
2758}
2759
2760/**
2761 * i40e_vc_config_rss_key
2762 * @vf: pointer to the VF info
2763 * @msg: pointer to the msg buffer
2764 * @msglen: msg length
2765 *
2766 * Configure the VF's RSS key
2767 **/
2768static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2769{
2770	struct virtchnl_rss_key *vrk =
2771		(struct virtchnl_rss_key *)msg;
2772	struct i40e_pf *pf = vf->pf;
2773	struct i40e_vsi *vsi = NULL;
2774	u16 vsi_id = vrk->vsi_id;
2775	i40e_status aq_ret = 0;
2776
2777	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2778	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2779	    (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2780		aq_ret = I40E_ERR_PARAM;
2781		goto err;
2782	}
2783
2784	vsi = pf->vsi[vf->lan_vsi_idx];
2785	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2786err:
2787	/* send the response to the VF */
2788	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2789				       aq_ret);
2790}
2791
2792/**
2793 * i40e_vc_config_rss_lut
2794 * @vf: pointer to the VF info
2795 * @msg: pointer to the msg buffer
2796 * @msglen: msg length
2797 *
2798 * Configure the VF's RSS LUT
2799 **/
2800static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2801{
2802	struct virtchnl_rss_lut *vrl =
2803		(struct virtchnl_rss_lut *)msg;
2804	struct i40e_pf *pf = vf->pf;
2805	struct i40e_vsi *vsi = NULL;
2806	u16 vsi_id = vrl->vsi_id;
2807	i40e_status aq_ret = 0;
2808
2809	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2810	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2811	    (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2812		aq_ret = I40E_ERR_PARAM;
2813		goto err;
2814	}
2815
 
 
 
 
 
 
2816	vsi = pf->vsi[vf->lan_vsi_idx];
2817	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2818	/* send the response to the VF */
2819err:
2820	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2821				       aq_ret);
2822}
2823
2824/**
2825 * i40e_vc_get_rss_hena
2826 * @vf: pointer to the VF info
2827 * @msg: pointer to the msg buffer
2828 * @msglen: msg length
2829 *
2830 * Return the RSS HENA bits allowed by the hardware
2831 **/
2832static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2833{
2834	struct virtchnl_rss_hena *vrh = NULL;
2835	struct i40e_pf *pf = vf->pf;
2836	i40e_status aq_ret = 0;
2837	int len = 0;
2838
2839	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2840		aq_ret = I40E_ERR_PARAM;
2841		goto err;
2842	}
2843	len = sizeof(struct virtchnl_rss_hena);
2844
2845	vrh = kzalloc(len, GFP_KERNEL);
2846	if (!vrh) {
2847		aq_ret = I40E_ERR_NO_MEMORY;
2848		len = 0;
2849		goto err;
2850	}
2851	vrh->hena = i40e_pf_get_default_rss_hena(pf);
2852err:
2853	/* send the response back to the VF */
2854	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2855					aq_ret, (u8 *)vrh, len);
2856	kfree(vrh);
2857	return aq_ret;
2858}
2859
2860/**
2861 * i40e_vc_set_rss_hena
2862 * @vf: pointer to the VF info
2863 * @msg: pointer to the msg buffer
2864 * @msglen: msg length
2865 *
2866 * Set the RSS HENA bits for the VF
2867 **/
2868static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2869{
2870	struct virtchnl_rss_hena *vrh =
2871		(struct virtchnl_rss_hena *)msg;
2872	struct i40e_pf *pf = vf->pf;
2873	struct i40e_hw *hw = &pf->hw;
2874	i40e_status aq_ret = 0;
2875
2876	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2877		aq_ret = I40E_ERR_PARAM;
2878		goto err;
2879	}
2880	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2881	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2882			  (u32)(vrh->hena >> 32));
2883
2884	/* send the response to the VF */
2885err:
2886	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2887}
2888
2889/**
2890 * i40e_vc_enable_vlan_stripping
2891 * @vf: pointer to the VF info
2892 * @msg: pointer to the msg buffer
2893 * @msglen: msg length
2894 *
2895 * Enable vlan header stripping for the VF
2896 **/
2897static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2898					 u16 msglen)
2899{
2900	struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2901	i40e_status aq_ret = 0;
2902
2903	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2904		aq_ret = I40E_ERR_PARAM;
2905		goto err;
2906	}
2907
 
2908	i40e_vlan_stripping_enable(vsi);
2909
2910	/* send the response to the VF */
2911err:
2912	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2913				       aq_ret);
2914}
2915
2916/**
2917 * i40e_vc_disable_vlan_stripping
2918 * @vf: pointer to the VF info
2919 * @msg: pointer to the msg buffer
2920 * @msglen: msg length
2921 *
2922 * Disable vlan header stripping for the VF
2923 **/
2924static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2925					  u16 msglen)
2926{
2927	struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2928	i40e_status aq_ret = 0;
2929
2930	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2931		aq_ret = I40E_ERR_PARAM;
2932		goto err;
2933	}
2934
 
2935	i40e_vlan_stripping_disable(vsi);
2936
2937	/* send the response to the VF */
2938err:
2939	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2940				       aq_ret);
2941}
2942
2943/**
2944 * i40e_validate_cloud_filter
2945 * @mask: mask for TC filter
2946 * @data: data for TC filter
2947 *
2948 * This function validates cloud filter programmed as TC filter for ADq
2949 **/
2950static int i40e_validate_cloud_filter(struct i40e_vf *vf,
2951				      struct virtchnl_filter *tc_filter)
2952{
2953	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
2954	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
2955	struct i40e_pf *pf = vf->pf;
2956	struct i40e_vsi *vsi = NULL;
2957	struct i40e_mac_filter *f;
2958	struct hlist_node *h;
2959	bool found = false;
2960	int bkt;
2961
2962	if (!tc_filter->action) {
2963		dev_info(&pf->pdev->dev,
2964			 "VF %d: Currently ADq doesn't support Drop Action\n",
2965			 vf->vf_id);
2966		goto err;
2967	}
2968
2969	/* action_meta is TC number here to which the filter is applied */
2970	if (!tc_filter->action_meta ||
2971	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
2972		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
2973			 vf->vf_id, tc_filter->action_meta);
2974		goto err;
2975	}
2976
2977	/* Check filter if it's programmed for advanced mode or basic mode.
2978	 * There are two ADq modes (for VF only),
2979	 * 1. Basic mode: intended to allow as many filter options as possible
2980	 *		  to be added to a VF in Non-trusted mode. Main goal is
2981	 *		  to add filters to its own MAC and VLAN id.
2982	 * 2. Advanced mode: is for allowing filters to be applied other than
2983	 *		  its own MAC or VLAN. This mode requires the VF to be
2984	 *		  Trusted.
2985	 */
2986	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
2987		vsi = pf->vsi[vf->lan_vsi_idx];
2988		f = i40e_find_mac(vsi, data.dst_mac);
2989
2990		if (!f) {
2991			dev_info(&pf->pdev->dev,
2992				 "Destination MAC %pM doesn't belong to VF %d\n",
2993				 data.dst_mac, vf->vf_id);
2994			goto err;
2995		}
2996
2997		if (mask.vlan_id) {
2998			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
2999					   hlist) {
3000				if (f->vlan == ntohs(data.vlan_id)) {
3001					found = true;
3002					break;
3003				}
3004			}
3005			if (!found) {
3006				dev_info(&pf->pdev->dev,
3007					 "VF %d doesn't have any VLAN id %u\n",
3008					 vf->vf_id, ntohs(data.vlan_id));
3009				goto err;
3010			}
3011		}
3012	} else {
3013		/* Check if VF is trusted */
3014		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3015			dev_err(&pf->pdev->dev,
3016				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3017				vf->vf_id);
3018			return I40E_ERR_CONFIG;
3019		}
3020	}
3021
3022	if (mask.dst_mac[0] & data.dst_mac[0]) {
3023		if (is_broadcast_ether_addr(data.dst_mac) ||
3024		    is_zero_ether_addr(data.dst_mac)) {
3025			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3026				 vf->vf_id, data.dst_mac);
3027			goto err;
3028		}
3029	}
3030
3031	if (mask.src_mac[0] & data.src_mac[0]) {
3032		if (is_broadcast_ether_addr(data.src_mac) ||
3033		    is_zero_ether_addr(data.src_mac)) {
3034			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3035				 vf->vf_id, data.src_mac);
3036			goto err;
3037		}
3038	}
3039
3040	if (mask.dst_port & data.dst_port) {
3041		if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
3042			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3043				 vf->vf_id);
3044			goto err;
3045		}
3046	}
3047
3048	if (mask.src_port & data.src_port) {
3049		if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
3050			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3051				 vf->vf_id);
3052			goto err;
3053		}
3054	}
3055
3056	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3057	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3058		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3059			 vf->vf_id);
3060		goto err;
3061	}
3062
3063	if (mask.vlan_id & data.vlan_id) {
3064		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3065			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3066				 vf->vf_id);
3067			goto err;
3068		}
3069	}
3070
3071	return I40E_SUCCESS;
3072err:
3073	return I40E_ERR_CONFIG;
3074}
3075
3076/**
3077 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3078 * @vf: pointer to the VF info
3079 * @seid - seid of the vsi it is searching for
3080 **/
3081static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3082{
3083	struct i40e_pf *pf = vf->pf;
3084	struct i40e_vsi *vsi = NULL;
3085	int i;
3086
3087	for (i = 0; i < vf->num_tc ; i++) {
3088		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3089		if (vsi && vsi->seid == seid)
3090			return vsi;
3091	}
3092	return NULL;
3093}
3094
3095/**
3096 * i40e_del_all_cloud_filters
3097 * @vf: pointer to the VF info
3098 *
3099 * This function deletes all cloud filters
3100 **/
3101static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3102{
3103	struct i40e_cloud_filter *cfilter = NULL;
3104	struct i40e_pf *pf = vf->pf;
3105	struct i40e_vsi *vsi = NULL;
3106	struct hlist_node *node;
3107	int ret;
3108
3109	hlist_for_each_entry_safe(cfilter, node,
3110				  &vf->cloud_filter_list, cloud_node) {
3111		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3112
3113		if (!vsi) {
3114			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3115				vf->vf_id, cfilter->seid);
3116			continue;
3117		}
3118
3119		if (cfilter->dst_port)
3120			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3121								false);
3122		else
3123			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3124		if (ret)
3125			dev_err(&pf->pdev->dev,
3126				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3127				vf->vf_id, i40e_stat_str(&pf->hw, ret),
3128				i40e_aq_str(&pf->hw,
3129					    pf->hw.aq.asq_last_status));
3130
3131		hlist_del(&cfilter->cloud_node);
3132		kfree(cfilter);
3133		vf->num_cloud_filters--;
3134	}
3135}
3136
3137/**
3138 * i40e_vc_del_cloud_filter
3139 * @vf: pointer to the VF info
3140 * @msg: pointer to the msg buffer
3141 *
3142 * This function deletes a cloud filter programmed as TC filter for ADq
3143 **/
3144static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3145{
3146	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3147	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3148	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3149	struct i40e_cloud_filter cfilter, *cf = NULL;
3150	struct i40e_pf *pf = vf->pf;
3151	struct i40e_vsi *vsi = NULL;
3152	struct hlist_node *node;
3153	i40e_status aq_ret = 0;
3154	int i, ret;
3155
3156	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3157		aq_ret = I40E_ERR_PARAM;
3158		goto err;
3159	}
3160
3161	if (!vf->adq_enabled) {
3162		dev_info(&pf->pdev->dev,
3163			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3164			 vf->vf_id);
3165		aq_ret = I40E_ERR_PARAM;
3166		goto err;
3167	}
3168
3169	if (i40e_validate_cloud_filter(vf, vcf)) {
3170		dev_info(&pf->pdev->dev,
3171			 "VF %d: Invalid input, can't apply cloud filter\n",
3172			 vf->vf_id);
3173		aq_ret = I40E_ERR_PARAM;
3174		goto err;
3175	}
3176
3177	memset(&cfilter, 0, sizeof(cfilter));
3178	/* parse destination mac address */
3179	for (i = 0; i < ETH_ALEN; i++)
3180		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3181
3182	/* parse source mac address */
3183	for (i = 0; i < ETH_ALEN; i++)
3184		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3185
3186	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3187	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3188	cfilter.src_port = mask.src_port & tcf.src_port;
3189
3190	switch (vcf->flow_type) {
3191	case VIRTCHNL_TCP_V4_FLOW:
3192		cfilter.n_proto = ETH_P_IP;
3193		if (mask.dst_ip[0] & tcf.dst_ip[0])
3194			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3195			       ARRAY_SIZE(tcf.dst_ip));
3196		else if (mask.src_ip[0] & tcf.dst_ip[0])
3197			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3198			       ARRAY_SIZE(tcf.dst_ip));
3199		break;
3200	case VIRTCHNL_TCP_V6_FLOW:
3201		cfilter.n_proto = ETH_P_IPV6;
3202		if (mask.dst_ip[3] & tcf.dst_ip[3])
3203			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3204			       sizeof(cfilter.ip.v6.dst_ip6));
3205		if (mask.src_ip[3] & tcf.src_ip[3])
3206			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3207			       sizeof(cfilter.ip.v6.src_ip6));
3208		break;
3209	default:
3210		/* TC filter can be configured based on different combinations
3211		 * and in this case IP is not a part of filter config
3212		 */
3213		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3214			 vf->vf_id);
3215	}
3216
3217	/* get the vsi to which the tc belongs to */
3218	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3219	cfilter.seid = vsi->seid;
3220	cfilter.flags = vcf->field_flags;
3221
3222	/* Deleting TC filter */
3223	if (tcf.dst_port)
3224		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3225	else
3226		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3227	if (ret) {
3228		dev_err(&pf->pdev->dev,
3229			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3230			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3231			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3232		goto err;
3233	}
3234
3235	hlist_for_each_entry_safe(cf, node,
3236				  &vf->cloud_filter_list, cloud_node) {
3237		if (cf->seid != cfilter.seid)
3238			continue;
3239		if (mask.dst_port)
3240			if (cfilter.dst_port != cf->dst_port)
3241				continue;
3242		if (mask.dst_mac[0])
3243			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3244				continue;
3245		/* for ipv4 data to be valid, only first byte of mask is set */
3246		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3247			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3248				   ARRAY_SIZE(tcf.dst_ip)))
3249				continue;
3250		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3251		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3252			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3253				   sizeof(cfilter.ip.v6.src_ip6)))
3254				continue;
3255		if (mask.vlan_id)
3256			if (cfilter.vlan_id != cf->vlan_id)
3257				continue;
3258
3259		hlist_del(&cf->cloud_node);
3260		kfree(cf);
3261		vf->num_cloud_filters--;
3262	}
3263
3264err:
3265	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3266				       aq_ret);
3267}
3268
3269/**
3270 * i40e_vc_add_cloud_filter
3271 * @vf: pointer to the VF info
3272 * @msg: pointer to the msg buffer
3273 *
3274 * This function adds a cloud filter programmed as TC filter for ADq
3275 **/
3276static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3277{
3278	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3279	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3280	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3281	struct i40e_cloud_filter *cfilter = NULL;
3282	struct i40e_pf *pf = vf->pf;
3283	struct i40e_vsi *vsi = NULL;
3284	i40e_status aq_ret = 0;
3285	int i, ret;
3286
3287	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3288		aq_ret = I40E_ERR_PARAM;
3289		goto err;
3290	}
3291
3292	if (!vf->adq_enabled) {
3293		dev_info(&pf->pdev->dev,
3294			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3295			 vf->vf_id);
3296		aq_ret = I40E_ERR_PARAM;
3297		goto err;
3298	}
3299
3300	if (i40e_validate_cloud_filter(vf, vcf)) {
3301		dev_info(&pf->pdev->dev,
3302			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3303			 vf->vf_id);
3304			aq_ret = I40E_ERR_PARAM;
3305			goto err;
3306	}
3307
3308	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3309	if (!cfilter)
3310		return -ENOMEM;
 
 
3311
3312	/* parse destination mac address */
3313	for (i = 0; i < ETH_ALEN; i++)
3314		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3315
3316	/* parse source mac address */
3317	for (i = 0; i < ETH_ALEN; i++)
3318		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3319
3320	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3321	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3322	cfilter->src_port = mask.src_port & tcf.src_port;
3323
3324	switch (vcf->flow_type) {
3325	case VIRTCHNL_TCP_V4_FLOW:
3326		cfilter->n_proto = ETH_P_IP;
3327		if (mask.dst_ip[0] & tcf.dst_ip[0])
3328			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3329			       ARRAY_SIZE(tcf.dst_ip));
3330		else if (mask.src_ip[0] & tcf.dst_ip[0])
3331			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3332			       ARRAY_SIZE(tcf.dst_ip));
3333		break;
3334	case VIRTCHNL_TCP_V6_FLOW:
3335		cfilter->n_proto = ETH_P_IPV6;
3336		if (mask.dst_ip[3] & tcf.dst_ip[3])
3337			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3338			       sizeof(cfilter->ip.v6.dst_ip6));
3339		if (mask.src_ip[3] & tcf.src_ip[3])
3340			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3341			       sizeof(cfilter->ip.v6.src_ip6));
3342		break;
3343	default:
3344		/* TC filter can be configured based on different combinations
3345		 * and in this case IP is not a part of filter config
3346		 */
3347		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3348			 vf->vf_id);
3349	}
3350
3351	/* get the VSI to which the TC belongs to */
3352	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3353	cfilter->seid = vsi->seid;
3354	cfilter->flags = vcf->field_flags;
3355
3356	/* Adding cloud filter programmed as TC filter */
3357	if (tcf.dst_port)
3358		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3359	else
3360		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3361	if (ret) {
3362		dev_err(&pf->pdev->dev,
3363			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3364			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3365			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3366		goto err;
3367	}
3368
3369	INIT_HLIST_NODE(&cfilter->cloud_node);
3370	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
 
 
3371	vf->num_cloud_filters++;
3372err:
 
 
3373	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3374				       aq_ret);
3375}
3376
3377/**
3378 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3379 * @vf: pointer to the VF info
3380 * @msg: pointer to the msg buffer
3381 **/
3382static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3383{
3384	struct virtchnl_tc_info *tci =
3385		(struct virtchnl_tc_info *)msg;
3386	struct i40e_pf *pf = vf->pf;
3387	struct i40e_link_status *ls = &pf->hw.phy.link_info;
3388	int i, adq_request_qps = 0, speed = 0;
3389	i40e_status aq_ret = 0;
 
3390
3391	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3392		aq_ret = I40E_ERR_PARAM;
3393		goto err;
3394	}
3395
3396	/* ADq cannot be applied if spoof check is ON */
3397	if (vf->spoofchk) {
3398		dev_err(&pf->pdev->dev,
3399			"Spoof check is ON, turn it OFF to enable ADq\n");
3400		aq_ret = I40E_ERR_PARAM;
3401		goto err;
3402	}
3403
3404	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3405		dev_err(&pf->pdev->dev,
3406			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3407			vf->vf_id);
3408		aq_ret = I40E_ERR_PARAM;
3409		goto err;
3410	}
3411
3412	/* max number of traffic classes for VF currently capped at 4 */
3413	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3414		dev_err(&pf->pdev->dev,
3415			"VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
3416			vf->vf_id, tci->num_tc);
3417		aq_ret = I40E_ERR_PARAM;
3418		goto err;
3419	}
3420
3421	/* validate queues for each TC */
3422	for (i = 0; i < tci->num_tc; i++)
3423		if (!tci->list[i].count ||
3424		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3425			dev_err(&pf->pdev->dev,
3426				"VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
3427				vf->vf_id, i, tci->list[i].count);
3428			aq_ret = I40E_ERR_PARAM;
 
3429			goto err;
3430		}
3431
3432	/* need Max VF queues but already have default number of queues */
3433	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3434
3435	if (pf->queues_left < adq_request_qps) {
3436		dev_err(&pf->pdev->dev,
3437			"No queues left to allocate to VF %d\n",
3438			vf->vf_id);
3439		aq_ret = I40E_ERR_PARAM;
3440		goto err;
3441	} else {
3442		/* we need to allocate max VF queues to enable ADq so as to
3443		 * make sure ADq enabled VF always gets back queues when it
3444		 * goes through a reset.
3445		 */
3446		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3447	}
3448
3449	/* get link speed in MB to validate rate limit */
3450	switch (ls->link_speed) {
3451	case VIRTCHNL_LINK_SPEED_100MB:
3452		speed = SPEED_100;
3453		break;
3454	case VIRTCHNL_LINK_SPEED_1GB:
3455		speed = SPEED_1000;
3456		break;
3457	case VIRTCHNL_LINK_SPEED_10GB:
3458		speed = SPEED_10000;
3459		break;
3460	case VIRTCHNL_LINK_SPEED_20GB:
3461		speed = SPEED_20000;
3462		break;
3463	case VIRTCHNL_LINK_SPEED_25GB:
3464		speed = SPEED_25000;
3465		break;
3466	case VIRTCHNL_LINK_SPEED_40GB:
3467		speed = SPEED_40000;
3468		break;
3469	default:
3470		dev_err(&pf->pdev->dev,
3471			"Cannot detect link speed\n");
3472		aq_ret = I40E_ERR_PARAM;
3473		goto err;
3474	}
3475
3476	/* parse data from the queue channel info */
3477	vf->num_tc = tci->num_tc;
3478	for (i = 0; i < vf->num_tc; i++) {
3479		if (tci->list[i].max_tx_rate) {
3480			if (tci->list[i].max_tx_rate > speed) {
3481				dev_err(&pf->pdev->dev,
3482					"Invalid max tx rate %llu specified for VF %d.",
3483					tci->list[i].max_tx_rate,
3484					vf->vf_id);
3485				aq_ret = I40E_ERR_PARAM;
3486				goto err;
3487			} else {
3488				vf->ch[i].max_tx_rate =
3489					tci->list[i].max_tx_rate;
3490			}
3491		}
3492		vf->ch[i].num_qps = tci->list[i].count;
3493	}
3494
3495	/* set this flag only after making sure all inputs are sane */
3496	vf->adq_enabled = true;
3497	/* num_req_queues is set when user changes number of queues via ethtool
3498	 * and this causes issue for default VSI(which depends on this variable)
3499	 * when ADq is enabled, hence reset it.
3500	 */
3501	vf->num_req_queues = 0;
3502
3503	/* reset the VF in order to allocate resources */
3504	i40e_vc_notify_vf_reset(vf);
3505	i40e_reset_vf(vf, false);
3506
3507	return I40E_SUCCESS;
3508
3509	/* send the response to the VF */
3510err:
3511	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3512				       aq_ret);
3513}
3514
3515/**
3516 * i40e_vc_del_qch_msg
3517 * @vf: pointer to the VF info
3518 * @msg: pointer to the msg buffer
3519 **/
3520static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3521{
3522	struct i40e_pf *pf = vf->pf;
3523	i40e_status aq_ret = 0;
3524
3525	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3526		aq_ret = I40E_ERR_PARAM;
3527		goto err;
3528	}
3529
3530	if (vf->adq_enabled) {
3531		i40e_del_all_cloud_filters(vf);
3532		i40e_del_qch(vf);
3533		vf->adq_enabled = false;
3534		vf->num_tc = 0;
3535		dev_info(&pf->pdev->dev,
3536			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3537			 vf->vf_id);
3538	} else {
3539		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3540			 vf->vf_id);
3541		aq_ret = I40E_ERR_PARAM;
3542	}
3543
3544	/* reset the VF in order to allocate resources */
3545	i40e_vc_notify_vf_reset(vf);
3546	i40e_reset_vf(vf, false);
3547
3548	return I40E_SUCCESS;
3549
3550err:
3551	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3552				       aq_ret);
3553}
3554
3555/**
3556 * i40e_vc_process_vf_msg
3557 * @pf: pointer to the PF structure
3558 * @vf_id: source VF id
 
 
3559 * @msg: pointer to the msg buffer
3560 * @msglen: msg length
3561 * @msghndl: msg handle
3562 *
3563 * called from the common aeq/arq handler to
3564 * process request from VF
3565 **/
3566int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3567			   u32 v_retval, u8 *msg, u16 msglen)
3568{
3569	struct i40e_hw *hw = &pf->hw;
3570	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3571	struct i40e_vf *vf;
3572	int ret;
3573
3574	pf->vf_aq_requests++;
3575	if (local_vf_id >= pf->num_alloc_vfs)
3576		return -EINVAL;
3577	vf = &(pf->vf[local_vf_id]);
3578
3579	/* Check if VF is disabled. */
3580	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3581		return I40E_ERR_PARAM;
3582
3583	/* perform basic checks on the msg */
3584	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3585
3586	/* perform additional checks specific to this driver */
3587	if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
3588		struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
3589
3590		if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
3591			ret = -EINVAL;
3592	} else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
3593		struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
3594
3595		if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
3596			ret = -EINVAL;
3597	}
3598
3599	if (ret) {
3600		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3601		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3602			local_vf_id, v_opcode, msglen);
3603		switch (ret) {
3604		case VIRTCHNL_ERR_PARAM:
3605			return -EPERM;
3606		default:
3607			return -EINVAL;
3608		}
3609	}
3610
3611	switch (v_opcode) {
3612	case VIRTCHNL_OP_VERSION:
3613		ret = i40e_vc_get_version_msg(vf, msg);
3614		break;
3615	case VIRTCHNL_OP_GET_VF_RESOURCES:
3616		ret = i40e_vc_get_vf_resources_msg(vf, msg);
3617		i40e_vc_notify_vf_link_state(vf);
3618		break;
3619	case VIRTCHNL_OP_RESET_VF:
3620		i40e_vc_reset_vf_msg(vf);
3621		ret = 0;
3622		break;
3623	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3624		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
3625		break;
3626	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3627		ret = i40e_vc_config_queues_msg(vf, msg, msglen);
3628		break;
3629	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3630		ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
3631		break;
3632	case VIRTCHNL_OP_ENABLE_QUEUES:
3633		ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
3634		i40e_vc_notify_vf_link_state(vf);
3635		break;
3636	case VIRTCHNL_OP_DISABLE_QUEUES:
3637		ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
3638		break;
3639	case VIRTCHNL_OP_ADD_ETH_ADDR:
3640		ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
3641		break;
3642	case VIRTCHNL_OP_DEL_ETH_ADDR:
3643		ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
3644		break;
3645	case VIRTCHNL_OP_ADD_VLAN:
3646		ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
3647		break;
3648	case VIRTCHNL_OP_DEL_VLAN:
3649		ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
3650		break;
3651	case VIRTCHNL_OP_GET_STATS:
3652		ret = i40e_vc_get_stats_msg(vf, msg, msglen);
3653		break;
3654	case VIRTCHNL_OP_IWARP:
3655		ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3656		break;
3657	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3658		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
3659		break;
3660	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3661		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
3662		break;
3663	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3664		ret = i40e_vc_config_rss_key(vf, msg, msglen);
3665		break;
3666	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3667		ret = i40e_vc_config_rss_lut(vf, msg, msglen);
3668		break;
3669	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3670		ret = i40e_vc_get_rss_hena(vf, msg, msglen);
3671		break;
3672	case VIRTCHNL_OP_SET_RSS_HENA:
3673		ret = i40e_vc_set_rss_hena(vf, msg, msglen);
3674		break;
3675	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3676		ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
3677		break;
3678	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3679		ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
3680		break;
3681	case VIRTCHNL_OP_REQUEST_QUEUES:
3682		ret = i40e_vc_request_queues_msg(vf, msg, msglen);
3683		break;
3684	case VIRTCHNL_OP_ENABLE_CHANNELS:
3685		ret = i40e_vc_add_qch_msg(vf, msg);
3686		break;
3687	case VIRTCHNL_OP_DISABLE_CHANNELS:
3688		ret = i40e_vc_del_qch_msg(vf, msg);
3689		break;
3690	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3691		ret = i40e_vc_add_cloud_filter(vf, msg);
3692		break;
3693	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3694		ret = i40e_vc_del_cloud_filter(vf, msg);
3695		break;
3696	case VIRTCHNL_OP_UNKNOWN:
3697	default:
3698		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3699			v_opcode, local_vf_id);
3700		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3701					      I40E_ERR_NOT_IMPLEMENTED);
3702		break;
3703	}
3704
3705	return ret;
3706}
3707
3708/**
3709 * i40e_vc_process_vflr_event
3710 * @pf: pointer to the PF structure
3711 *
3712 * called from the vlfr irq handler to
3713 * free up VF resources and state variables
3714 **/
3715int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3716{
3717	struct i40e_hw *hw = &pf->hw;
3718	u32 reg, reg_idx, bit_idx;
3719	struct i40e_vf *vf;
3720	int vf_id;
3721
3722	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3723		return 0;
3724
3725	/* Re-enable the VFLR interrupt cause here, before looking for which
3726	 * VF got reset. Otherwise, if another VF gets a reset while the
3727	 * first one is being processed, that interrupt will be lost, and
3728	 * that VF will be stuck in reset forever.
3729	 */
3730	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3731	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3732	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3733	i40e_flush(hw);
3734
3735	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3736	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3737		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3738		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3739		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
3740		vf = &pf->vf[vf_id];
3741		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3742		if (reg & BIT(bit_idx))
3743			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3744			i40e_reset_vf(vf, true);
3745	}
3746
3747	return 0;
3748}
3749
3750/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3751 * i40e_ndo_set_vf_mac
3752 * @netdev: network interface device structure
3753 * @vf_id: VF identifier
3754 * @mac: mac address
3755 *
3756 * program VF mac address
3757 **/
3758int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3759{
3760	struct i40e_netdev_priv *np = netdev_priv(netdev);
3761	struct i40e_vsi *vsi = np->vsi;
3762	struct i40e_pf *pf = vsi->back;
3763	struct i40e_mac_filter *f;
3764	struct i40e_vf *vf;
3765	int ret = 0;
3766	struct hlist_node *h;
3767	int bkt;
3768	u8 i;
 
 
 
 
3769
3770	/* validate the request */
3771	if (vf_id >= pf->num_alloc_vfs) {
3772		dev_err(&pf->pdev->dev,
3773			"Invalid VF Identifier %d\n", vf_id);
3774		ret = -EINVAL;
3775		goto error_param;
3776	}
3777
3778	vf = &(pf->vf[vf_id]);
3779	vsi = pf->vsi[vf->lan_vsi_idx];
3780
3781	/* When the VF is resetting wait until it is done.
3782	 * It can take up to 200 milliseconds,
3783	 * but wait for up to 300 milliseconds to be safe.
3784	 */
3785	for (i = 0; i < 15; i++) {
3786		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3787			break;
3788		msleep(20);
3789	}
3790	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3791		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3792			vf_id);
3793		ret = -EAGAIN;
3794		goto error_param;
3795	}
 
3796
3797	if (is_multicast_ether_addr(mac)) {
3798		dev_err(&pf->pdev->dev,
3799			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3800		ret = -EINVAL;
3801		goto error_param;
3802	}
3803
3804	/* Lock once because below invoked function add/del_filter requires
3805	 * mac_filter_hash_lock to be held
3806	 */
3807	spin_lock_bh(&vsi->mac_filter_hash_lock);
3808
3809	/* delete the temporary mac address */
3810	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
3811		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
3812
3813	/* Delete all the filters for this VSI - we're going to kill it
3814	 * anyway.
3815	 */
3816	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
3817		__i40e_del_filter(vsi, f);
3818
3819	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3820
3821	/* program mac filter */
3822	if (i40e_sync_vsi_filters(vsi)) {
3823		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
3824		ret = -EIO;
3825		goto error_param;
3826	}
3827	ether_addr_copy(vf->default_lan_addr.addr, mac);
3828
3829	if (is_zero_ether_addr(mac)) {
3830		vf->pf_set_mac = false;
3831		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
3832	} else {
3833		vf->pf_set_mac = true;
3834		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
3835			 mac, vf_id);
3836	}
3837
3838	/* Force the VF driver stop so it has to reload with new MAC address */
3839	i40e_vc_disable_vf(vf);
3840	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
 
 
3841
3842error_param:
 
3843	return ret;
3844}
3845
3846/**
3847 * i40e_vsi_has_vlans - True if VSI has configured VLANs
3848 * @vsi: pointer to the vsi
3849 *
3850 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
3851 * we have no configured VLANs. Do not call while holding the
3852 * mac_filter_hash_lock.
3853 */
3854static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
3855{
3856	bool have_vlans;
3857
3858	/* If we have a port VLAN, then the VSI cannot have any VLANs
3859	 * configured, as all MAC/VLAN filters will be assigned to the PVID.
3860	 */
3861	if (vsi->info.pvid)
3862		return false;
3863
3864	/* Since we don't have a PVID, we know that if the device is in VLAN
3865	 * mode it must be because of a VLAN filter configured on this VSI.
3866	 */
3867	spin_lock_bh(&vsi->mac_filter_hash_lock);
3868	have_vlans = i40e_is_vsi_in_vlan(vsi);
3869	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3870
3871	return have_vlans;
3872}
3873
3874/**
3875 * i40e_ndo_set_vf_port_vlan
3876 * @netdev: network interface device structure
3877 * @vf_id: VF identifier
3878 * @vlan_id: mac address
3879 * @qos: priority setting
3880 * @vlan_proto: vlan protocol
3881 *
3882 * program VF vlan id and/or qos
3883 **/
3884int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3885			      u16 vlan_id, u8 qos, __be16 vlan_proto)
3886{
3887	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3888	struct i40e_netdev_priv *np = netdev_priv(netdev);
 
3889	struct i40e_pf *pf = np->vsi->back;
3890	struct i40e_vsi *vsi;
3891	struct i40e_vf *vf;
3892	int ret = 0;
3893
 
 
 
 
 
3894	/* validate the request */
3895	if (vf_id >= pf->num_alloc_vfs) {
3896		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3897		ret = -EINVAL;
3898		goto error_pvid;
3899	}
3900
3901	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3902		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3903		ret = -EINVAL;
3904		goto error_pvid;
3905	}
3906
3907	if (vlan_proto != htons(ETH_P_8021Q)) {
3908		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3909		ret = -EPROTONOSUPPORT;
3910		goto error_pvid;
3911	}
3912
3913	vf = &(pf->vf[vf_id]);
3914	vsi = pf->vsi[vf->lan_vsi_idx];
3915	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3916		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3917			vf_id);
3918		ret = -EAGAIN;
3919		goto error_pvid;
3920	}
 
3921
3922	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
3923		/* duplicate request, so just return success */
3924		goto error_pvid;
3925
3926	if (i40e_vsi_has_vlans(vsi)) {
3927		dev_err(&pf->pdev->dev,
3928			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
3929			vf_id);
3930		/* Administrator Error - knock the VF offline until he does
3931		 * the right thing by reconfiguring his network correctly
3932		 * and then reloading the VF driver.
3933		 */
3934		i40e_vc_disable_vf(vf);
3935		/* During reset the VF got a new VSI, so refresh the pointer. */
3936		vsi = pf->vsi[vf->lan_vsi_idx];
3937	}
3938
3939	/* Locked once because multiple functions below iterate list */
3940	spin_lock_bh(&vsi->mac_filter_hash_lock);
3941
3942	/* Check for condition where there was already a port VLAN ID
3943	 * filter set and now it is being deleted by setting it to zero.
3944	 * Additionally check for the condition where there was a port
3945	 * VLAN but now there is a new and different port VLAN being set.
3946	 * Before deleting all the old VLAN filters we must add new ones
3947	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
3948	 * MAC addresses deleted.
3949	 */
3950	if ((!(vlan_id || qos) ||
3951	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
3952	    vsi->info.pvid) {
3953		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
3954		if (ret) {
3955			dev_info(&vsi->back->pdev->dev,
3956				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3957				 vsi->back->hw.aq.asq_last_status);
3958			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3959			goto error_pvid;
3960		}
3961	}
3962
3963	if (vsi->info.pvid) {
3964		/* remove all filters on the old VLAN */
3965		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
3966					   VLAN_VID_MASK));
3967	}
3968
3969	spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
 
 
 
 
 
 
 
 
3970	if (vlan_id || qos)
3971		ret = i40e_vsi_add_pvid(vsi, vlanprio);
3972	else
3973		i40e_vsi_remove_pvid(vsi);
3974	spin_lock_bh(&vsi->mac_filter_hash_lock);
3975
3976	if (vlan_id) {
3977		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
3978			 vlan_id, qos, vf_id);
3979
3980		/* add new VLAN filter for each MAC */
3981		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
3982		if (ret) {
3983			dev_info(&vsi->back->pdev->dev,
3984				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3985				 vsi->back->hw.aq.asq_last_status);
3986			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3987			goto error_pvid;
3988		}
3989
3990		/* remove the previously added non-VLAN MAC filters */
3991		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
3992	}
3993
3994	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3995
 
 
 
 
 
 
3996	/* Schedule the worker thread to take care of applying changes */
3997	i40e_service_event_schedule(vsi->back);
3998
3999	if (ret) {
4000		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4001		goto error_pvid;
4002	}
4003
4004	/* The Port VLAN needs to be saved across resets the same as the
4005	 * default LAN MAC address.
4006	 */
4007	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
 
 
 
 
 
 
 
 
 
 
 
4008	ret = 0;
4009
4010error_pvid:
 
4011	return ret;
4012}
4013
4014/**
4015 * i40e_ndo_set_vf_bw
4016 * @netdev: network interface device structure
4017 * @vf_id: VF identifier
4018 * @tx_rate: Tx rate
 
4019 *
4020 * configure VF Tx rate
4021 **/
4022int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4023		       int max_tx_rate)
4024{
4025	struct i40e_netdev_priv *np = netdev_priv(netdev);
4026	struct i40e_pf *pf = np->vsi->back;
4027	struct i40e_vsi *vsi;
4028	struct i40e_vf *vf;
4029	int ret = 0;
4030
 
 
 
 
 
4031	/* validate the request */
4032	if (vf_id >= pf->num_alloc_vfs) {
4033		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
4034		ret = -EINVAL;
4035		goto error;
4036	}
4037
4038	if (min_tx_rate) {
4039		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4040			min_tx_rate, vf_id);
4041		return -EINVAL;
 
4042	}
4043
4044	vf = &(pf->vf[vf_id]);
4045	vsi = pf->vsi[vf->lan_vsi_idx];
4046	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4047		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4048			vf_id);
4049		ret = -EAGAIN;
4050		goto error;
4051	}
 
4052
4053	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4054	if (ret)
4055		goto error;
4056
4057	vf->tx_rate = max_tx_rate;
4058error:
 
4059	return ret;
4060}
4061
4062/**
4063 * i40e_ndo_get_vf_config
4064 * @netdev: network interface device structure
4065 * @vf_id: VF identifier
4066 * @ivi: VF configuration structure
4067 *
4068 * return VF configuration
4069 **/
4070int i40e_ndo_get_vf_config(struct net_device *netdev,
4071			   int vf_id, struct ifla_vf_info *ivi)
4072{
4073	struct i40e_netdev_priv *np = netdev_priv(netdev);
4074	struct i40e_vsi *vsi = np->vsi;
4075	struct i40e_pf *pf = vsi->back;
4076	struct i40e_vf *vf;
4077	int ret = 0;
4078
 
 
 
 
 
4079	/* validate the request */
4080	if (vf_id >= pf->num_alloc_vfs) {
4081		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4082		ret = -EINVAL;
4083		goto error_param;
4084	}
4085
4086	vf = &(pf->vf[vf_id]);
4087	/* first vsi is always the LAN vsi */
4088	vsi = pf->vsi[vf->lan_vsi_idx];
4089	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4090		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4091			vf_id);
4092		ret = -EAGAIN;
4093		goto error_param;
4094	}
4095
4096	ivi->vf = vf_id;
4097
4098	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4099
4100	ivi->max_tx_rate = vf->tx_rate;
4101	ivi->min_tx_rate = 0;
4102	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4103	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4104		   I40E_VLAN_PRIORITY_SHIFT;
4105	if (vf->link_forced == false)
4106		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4107	else if (vf->link_up == true)
4108		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4109	else
4110		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4111	ivi->spoofchk = vf->spoofchk;
4112	ivi->trusted = vf->trusted;
4113	ret = 0;
4114
4115error_param:
 
4116	return ret;
4117}
4118
4119/**
4120 * i40e_ndo_set_vf_link_state
4121 * @netdev: network interface device structure
4122 * @vf_id: VF identifier
4123 * @link: required link state
4124 *
4125 * Set the link state of a specified VF, regardless of physical link state
4126 **/
4127int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4128{
4129	struct i40e_netdev_priv *np = netdev_priv(netdev);
4130	struct i40e_pf *pf = np->vsi->back;
 
4131	struct virtchnl_pf_event pfe;
4132	struct i40e_hw *hw = &pf->hw;
 
 
4133	struct i40e_vf *vf;
4134	int abs_vf_id;
4135	int ret = 0;
 
 
 
 
 
 
4136
4137	/* validate the request */
4138	if (vf_id >= pf->num_alloc_vfs) {
4139		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4140		ret = -EINVAL;
4141		goto error_out;
4142	}
4143
4144	vf = &pf->vf[vf_id];
4145	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4146
4147	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4148	pfe.severity = PF_EVENT_SEVERITY_INFO;
4149
4150	switch (link) {
4151	case IFLA_VF_LINK_STATE_AUTO:
4152		vf->link_forced = false;
4153		pfe.event_data.link_event.link_status =
4154			pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4155		pfe.event_data.link_event.link_speed =
4156			(enum virtchnl_link_speed)
4157			pf->hw.phy.link_info.link_speed;
4158		break;
4159	case IFLA_VF_LINK_STATE_ENABLE:
4160		vf->link_forced = true;
4161		vf->link_up = true;
4162		pfe.event_data.link_event.link_status = true;
4163		pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
 
 
4164		break;
4165	case IFLA_VF_LINK_STATE_DISABLE:
4166		vf->link_forced = true;
4167		vf->link_up = false;
4168		pfe.event_data.link_event.link_status = false;
4169		pfe.event_data.link_event.link_speed = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4170		break;
4171	default:
4172		ret = -EINVAL;
4173		goto error_out;
4174	}
4175	/* Notify the VF of its new link state */
4176	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4177			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4178
4179error_out:
 
4180	return ret;
4181}
4182
4183/**
4184 * i40e_ndo_set_vf_spoofchk
4185 * @netdev: network interface device structure
4186 * @vf_id: VF identifier
4187 * @enable: flag to enable or disable feature
4188 *
4189 * Enable or disable VF spoof checking
4190 **/
4191int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4192{
4193	struct i40e_netdev_priv *np = netdev_priv(netdev);
4194	struct i40e_vsi *vsi = np->vsi;
4195	struct i40e_pf *pf = vsi->back;
4196	struct i40e_vsi_context ctxt;
4197	struct i40e_hw *hw = &pf->hw;
4198	struct i40e_vf *vf;
4199	int ret = 0;
4200
 
 
 
 
 
4201	/* validate the request */
4202	if (vf_id >= pf->num_alloc_vfs) {
4203		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4204		ret = -EINVAL;
4205		goto out;
4206	}
4207
4208	vf = &(pf->vf[vf_id]);
4209	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4210		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4211			vf_id);
4212		ret = -EAGAIN;
4213		goto out;
4214	}
4215
4216	if (enable == vf->spoofchk)
4217		goto out;
4218
4219	vf->spoofchk = enable;
4220	memset(&ctxt, 0, sizeof(ctxt));
4221	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4222	ctxt.pf_num = pf->hw.pf_id;
4223	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4224	if (enable)
4225		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4226					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4227	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4228	if (ret) {
4229		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4230			ret);
4231		ret = -EIO;
4232	}
4233out:
 
4234	return ret;
4235}
4236
4237/**
4238 * i40e_ndo_set_vf_trust
4239 * @netdev: network interface device structure of the pf
4240 * @vf_id: VF identifier
4241 * @setting: trust setting
4242 *
4243 * Enable or disable VF trust setting
4244 **/
4245int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4246{
4247	struct i40e_netdev_priv *np = netdev_priv(netdev);
4248	struct i40e_pf *pf = np->vsi->back;
4249	struct i40e_vf *vf;
4250	int ret = 0;
4251
 
 
 
 
 
4252	/* validate the request */
4253	if (vf_id >= pf->num_alloc_vfs) {
4254		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4255		return -EINVAL;
 
4256	}
4257
4258	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4259		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4260		return -EINVAL;
 
4261	}
4262
4263	vf = &pf->vf[vf_id];
4264
4265	if (setting == vf->trusted)
4266		goto out;
4267
4268	vf->trusted = setting;
4269	i40e_vc_disable_vf(vf);
 
 
 
 
 
4270	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4271		 vf_id, setting ? "" : "un");
4272
4273	if (vf->adq_enabled) {
4274		if (!vf->trusted) {
4275			dev_info(&pf->pdev->dev,
4276				 "VF %u no longer Trusted, deleting all cloud filters\n",
4277				 vf_id);
4278			i40e_del_all_cloud_filters(vf);
4279		}
4280	}
4281
4282out:
 
4283	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4284}