Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2021-2023, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_flow.h"
   8#include "ice_vf_lib_private.h"
   9
  10#define to_fltr_conf_from_desc(p) \
  11	container_of(p, struct virtchnl_fdir_fltr_conf, input)
  12
  13#define GTPU_TEID_OFFSET 4
  14#define GTPU_EH_QFI_OFFSET 1
  15#define GTPU_EH_QFI_MASK 0x3F
  16#define PFCP_S_OFFSET 0
  17#define PFCP_S_MASK 0x1
  18#define PFCP_PORT_NR 8805
  19
  20#define FDIR_INSET_FLAG_ESP_S 0
  21#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  22#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  23#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
  24
  25enum ice_fdir_tunnel_type {
  26	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
  27	ICE_FDIR_TUNNEL_TYPE_GTPU,
  28	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
 
 
 
 
 
 
 
 
 
  29};
  30
  31struct virtchnl_fdir_fltr_conf {
  32	struct ice_fdir_fltr input;
  33	enum ice_fdir_tunnel_type ttype;
  34	u64 inset_flag;
  35	u32 flow_id;
 
 
 
 
 
  36};
  37
  38struct virtchnl_fdir_inset_map {
  39	enum virtchnl_proto_hdr_field field;
  40	enum ice_flow_field fld;
  41	u64 flag;
  42	u64 mask;
  43};
  44
  45static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
  46	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
  47	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
  48	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
  49	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
  50	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
  51	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
  52	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
  53	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
  54	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
  55	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
  56	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
  57	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
  58	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
  59	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
  60	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
  61	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
  62	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
  63	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
  64	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
  65	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
  66		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
  67	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
  68		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
  69	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
  70	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
  71	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
  72};
  73
  74/**
  75 * ice_vc_fdir_param_check
  76 * @vf: pointer to the VF structure
  77 * @vsi_id: VF relative VSI ID
  78 *
  79 * Check for the valid VSI ID, PF's state and VF's state
  80 *
  81 * Return: 0 on success, and -EINVAL on error.
  82 */
  83static int
  84ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
  85{
  86	struct ice_pf *pf = vf->pf;
  87
  88	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
  89		return -EINVAL;
  90
  91	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  92		return -EINVAL;
  93
  94	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
  95		return -EINVAL;
  96
  97	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
  98		return -EINVAL;
  99
 100	if (!ice_get_vf_vsi(vf))
 101		return -EINVAL;
 102
 103	return 0;
 104}
 105
 106/**
 107 * ice_vf_start_ctrl_vsi
 108 * @vf: pointer to the VF structure
 109 *
 110 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
 111 *
 112 * Return: 0 on success, and other on error.
 113 */
 114static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
 115{
 116	struct ice_pf *pf = vf->pf;
 117	struct ice_vsi *ctrl_vsi;
 118	struct device *dev;
 119	int err;
 120
 121	dev = ice_pf_to_dev(pf);
 122	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 123		return -EEXIST;
 124
 125	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
 126	if (!ctrl_vsi) {
 127		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
 128			vf->vf_id);
 129		return -ENOMEM;
 130	}
 131
 132	err = ice_vsi_open_ctrl(ctrl_vsi);
 133	if (err) {
 134		dev_dbg(dev, "Could not open control VSI for VF %d\n",
 135			vf->vf_id);
 136		goto err_vsi_open;
 137	}
 138
 139	return 0;
 140
 141err_vsi_open:
 142	ice_vsi_release(ctrl_vsi);
 143	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
 144		pf->vsi[vf->ctrl_vsi_idx] = NULL;
 145		vf->ctrl_vsi_idx = ICE_NO_VSI;
 146	}
 147	return err;
 148}
 149
 150/**
 151 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
 152 * @vf: pointer to the VF structure
 153 * @flow: filter flow type
 154 *
 155 * Return: 0 on success, and other on error.
 156 */
 157static int
 158ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 159{
 160	struct ice_vf_fdir *fdir = &vf->fdir;
 161
 162	if (!fdir->fdir_prof) {
 163		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
 164					       ICE_FLTR_PTYPE_MAX,
 165					       sizeof(*fdir->fdir_prof),
 166					       GFP_KERNEL);
 167		if (!fdir->fdir_prof)
 168			return -ENOMEM;
 169	}
 170
 171	if (!fdir->fdir_prof[flow]) {
 172		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
 173						     sizeof(**fdir->fdir_prof),
 174						     GFP_KERNEL);
 175		if (!fdir->fdir_prof[flow])
 176			return -ENOMEM;
 177	}
 178
 179	return 0;
 180}
 181
 182/**
 183 * ice_vc_fdir_free_prof - free profile for this filter flow type
 184 * @vf: pointer to the VF structure
 185 * @flow: filter flow type
 186 */
 187static void
 188ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 189{
 190	struct ice_vf_fdir *fdir = &vf->fdir;
 191
 192	if (!fdir->fdir_prof)
 193		return;
 194
 195	if (!fdir->fdir_prof[flow])
 196		return;
 197
 198	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
 199	fdir->fdir_prof[flow] = NULL;
 200}
 201
 202/**
 203 * ice_vc_fdir_free_prof_all - free all the profile for this VF
 204 * @vf: pointer to the VF structure
 205 */
 206static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
 207{
 208	struct ice_vf_fdir *fdir = &vf->fdir;
 209	enum ice_fltr_ptype flow;
 210
 211	if (!fdir->fdir_prof)
 212		return;
 213
 214	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
 215		ice_vc_fdir_free_prof(vf, flow);
 216
 217	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
 218	fdir->fdir_prof = NULL;
 219}
 220
 221/**
 222 * ice_vc_fdir_parse_flow_fld
 223 * @proto_hdr: virtual channel protocol filter header
 224 * @conf: FDIR configuration for each filter
 225 * @fld: field type array
 226 * @fld_cnt: field counter
 227 *
 228 * Parse the virtual channel filter header and store them into field type array
 229 *
 230 * Return: 0 on success, and other on error.
 231 */
 232static int
 233ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
 234			   struct virtchnl_fdir_fltr_conf *conf,
 235			   enum ice_flow_field *fld, int *fld_cnt)
 236{
 237	struct virtchnl_proto_hdr hdr;
 238	u32 i;
 239
 240	memcpy(&hdr, proto_hdr, sizeof(hdr));
 241
 242	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
 243	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
 244		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
 245			if (fdir_inset_map[i].mask &&
 246			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
 247			     fdir_inset_map[i].flag))
 248				continue;
 249
 250			fld[*fld_cnt] = fdir_inset_map[i].fld;
 251			*fld_cnt += 1;
 252			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
 253				return -EINVAL;
 254			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
 255						     fdir_inset_map[i].field);
 256		}
 257
 258	return 0;
 259}
 260
 261/**
 262 * ice_vc_fdir_set_flow_fld
 263 * @vf: pointer to the VF structure
 264 * @fltr: virtual channel add cmd buffer
 265 * @conf: FDIR configuration for each filter
 266 * @seg: array of one or more packet segments that describe the flow
 267 *
 268 * Parse the virtual channel add msg buffer's field vector and store them into
 269 * flow's packet segment field
 270 *
 271 * Return: 0 on success, and other on error.
 272 */
 273static int
 274ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 275			 struct virtchnl_fdir_fltr_conf *conf,
 276			 struct ice_flow_seg_info *seg)
 277{
 278	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
 279	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
 280	struct device *dev = ice_pf_to_dev(vf->pf);
 281	struct virtchnl_proto_hdrs *proto;
 282	int fld_cnt = 0;
 283	int i;
 284
 285	proto = &rule->proto_hdrs;
 286	for (i = 0; i < proto->count; i++) {
 287		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
 288		int ret;
 289
 290		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
 291		if (ret)
 292			return ret;
 293	}
 294
 295	if (fld_cnt == 0) {
 296		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
 297		return -EINVAL;
 298	}
 299
 300	for (i = 0; i < fld_cnt; i++)
 301		ice_flow_set_fld(seg, fld[i],
 302				 ICE_FLOW_FLD_OFF_INVAL,
 303				 ICE_FLOW_FLD_OFF_INVAL,
 304				 ICE_FLOW_FLD_OFF_INVAL, false);
 305
 306	return 0;
 307}
 308
 309/**
 310 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
 311 * @vf: pointer to the VF structure
 312 * @conf: FDIR configuration for each filter
 313 * @seg: array of one or more packet segments that describe the flow
 314 *
 315 * Return: 0 on success, and other on error.
 316 */
 317static int
 318ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
 319			 struct virtchnl_fdir_fltr_conf *conf,
 320			 struct ice_flow_seg_info *seg)
 321{
 322	enum ice_fltr_ptype flow = conf->input.flow_type;
 323	enum ice_fdir_tunnel_type ttype = conf->ttype;
 324	struct device *dev = ice_pf_to_dev(vf->pf);
 325
 326	switch (flow) {
 327	case ICE_FLTR_PTYPE_NON_IP_L2:
 328		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
 329		break;
 330	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
 331		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 332				  ICE_FLOW_SEG_HDR_IPV4 |
 333				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 334		break;
 335	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
 336		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 337				  ICE_FLOW_SEG_HDR_IPV4 |
 338				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 339		break;
 340	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
 341		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 342				  ICE_FLOW_SEG_HDR_IPV4 |
 343				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 344		break;
 345	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
 346		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 347				  ICE_FLOW_SEG_HDR_IPV4 |
 348				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 349		break;
 350	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
 351		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 352				  ICE_FLOW_SEG_HDR_IPV4 |
 353				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 354		break;
 355	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
 356		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 357				  ICE_FLOW_SEG_HDR_IPV4 |
 358				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 359		break;
 360	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 361		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
 362				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 363		break;
 364	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 365		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 366				  ICE_FLOW_SEG_HDR_IPV4 |
 367				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 368		break;
 369	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 370		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 371				  ICE_FLOW_SEG_HDR_IPV4 |
 372				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 373		break;
 374	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
 375	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
 376	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
 377	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
 378		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
 379			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
 380					  ICE_FLOW_SEG_HDR_IPV4 |
 381					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 382		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
 383			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
 384					  ICE_FLOW_SEG_HDR_GTPU_IP |
 385					  ICE_FLOW_SEG_HDR_IPV4 |
 386					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 387		} else {
 388			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
 389				flow, vf->vf_id);
 390			return -EINVAL;
 391		}
 392		break;
 393	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 394		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 395				  ICE_FLOW_SEG_HDR_IPV4 |
 396				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 397		break;
 398	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
 399		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 400				  ICE_FLOW_SEG_HDR_IPV6 |
 401				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 402		break;
 403	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
 404		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 405				  ICE_FLOW_SEG_HDR_IPV6 |
 406				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 407		break;
 408	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
 409		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 410				  ICE_FLOW_SEG_HDR_IPV6 |
 411				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 412		break;
 413	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
 414		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 415				  ICE_FLOW_SEG_HDR_IPV6 |
 416				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 417		break;
 418	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
 419		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 420				  ICE_FLOW_SEG_HDR_IPV6 |
 421				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 422		break;
 423	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
 424		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 425				  ICE_FLOW_SEG_HDR_IPV6 |
 426				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 427		break;
 428	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 429		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
 430				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 431		break;
 432	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 433		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 434				  ICE_FLOW_SEG_HDR_IPV6 |
 435				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 436		break;
 437	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 438		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 439				  ICE_FLOW_SEG_HDR_IPV6 |
 440				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 441		break;
 442	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 443		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 444				  ICE_FLOW_SEG_HDR_IPV6 |
 445				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 446		break;
 447	default:
 448		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
 449			flow, vf->vf_id);
 450		return -EINVAL;
 451	}
 452
 453	return 0;
 454}
 455
 456/**
 457 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
 458 * @vf: pointer to the VF structure
 459 * @flow: filter flow type
 460 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 461 */
 462static void
 463ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
 464{
 465	struct ice_vf_fdir *fdir = &vf->fdir;
 466	struct ice_fd_hw_prof *vf_prof;
 467	struct ice_pf *pf = vf->pf;
 468	struct ice_vsi *vf_vsi;
 469	struct device *dev;
 470	struct ice_hw *hw;
 471	u64 prof_id;
 472	int i;
 473
 474	dev = ice_pf_to_dev(pf);
 475	hw = &pf->hw;
 476	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
 477		return;
 478
 479	vf_prof = fdir->fdir_prof[flow];
 480	prof_id = vf_prof->prof_id[tun];
 481
 482	vf_vsi = ice_get_vf_vsi(vf);
 483	if (!vf_vsi) {
 484		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
 485		return;
 486	}
 487
 488	if (!fdir->prof_entry_cnt[flow][tun])
 489		return;
 490
 491	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
 492		if (vf_prof->entry_h[i][tun]) {
 493			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
 494
 495			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
 496			ice_flow_rem_entry(hw, ICE_BLK_FD,
 497					   vf_prof->entry_h[i][tun]);
 498			vf_prof->entry_h[i][tun] = 0;
 499		}
 500
 501	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 502	devm_kfree(dev, vf_prof->fdir_seg[tun]);
 503	vf_prof->fdir_seg[tun] = NULL;
 504
 505	for (i = 0; i < vf_prof->cnt; i++)
 506		vf_prof->vsi_h[i] = 0;
 507
 508	fdir->prof_entry_cnt[flow][tun] = 0;
 509}
 510
 511/**
 512 * ice_vc_fdir_rem_prof_all - remove profile for this VF
 513 * @vf: pointer to the VF structure
 514 */
 515static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
 516{
 517	enum ice_fltr_ptype flow;
 518
 519	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
 520	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
 521		ice_vc_fdir_rem_prof(vf, flow, 0);
 522		ice_vc_fdir_rem_prof(vf, flow, 1);
 523	}
 524}
 525
 526/**
 527 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
 528 * @fdir: pointer to the VF FDIR structure
 529 */
 530static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
 531{
 532	enum ice_fltr_ptype flow;
 533
 534	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
 535	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
 536		fdir->fdir_fltr_cnt[flow][0] = 0;
 537		fdir->fdir_fltr_cnt[flow][1] = 0;
 538	}
 
 
 539}
 540
 541/**
 542 * ice_vc_fdir_has_prof_conflict
 543 * @vf: pointer to the VF structure
 544 * @conf: FDIR configuration for each filter
 545 *
 546 * Check if @conf has conflicting profile with existing profiles
 547 *
 548 * Return: true on success, and false on error.
 549 */
 550static bool
 551ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
 552			      struct virtchnl_fdir_fltr_conf *conf)
 553{
 554	struct ice_fdir_fltr *desc;
 555
 556	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
 557		struct virtchnl_fdir_fltr_conf *existing_conf;
 558		enum ice_fltr_ptype flow_type_a, flow_type_b;
 559		struct ice_fdir_fltr *a, *b;
 560
 561		existing_conf = to_fltr_conf_from_desc(desc);
 562		a = &existing_conf->input;
 563		b = &conf->input;
 564		flow_type_a = a->flow_type;
 565		flow_type_b = b->flow_type;
 566
 567		/* No need to compare two rules with different tunnel types or
 568		 * with the same protocol type.
 569		 */
 570		if (existing_conf->ttype != conf->ttype ||
 571		    flow_type_a == flow_type_b)
 572			continue;
 573
 574		switch (flow_type_a) {
 575		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 576		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 577		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 578			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
 579				return true;
 580			break;
 581		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 582			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
 583			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
 584			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
 585				return true;
 586			break;
 587		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 588		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 589		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 590			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
 591				return true;
 592			break;
 593		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 594			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
 595			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
 596			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
 597				return true;
 598			break;
 599		default:
 600			break;
 601		}
 602	}
 603
 604	return false;
 605}
 606
 607/**
 608 * ice_vc_fdir_write_flow_prof
 609 * @vf: pointer to the VF structure
 610 * @flow: filter flow type
 611 * @seg: array of one or more packet segments that describe the flow
 612 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 613 *
 614 * Write the flow's profile config and packet segment into the hardware
 615 *
 616 * Return: 0 on success, and other on error.
 617 */
 618static int
 619ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
 620			    struct ice_flow_seg_info *seg, int tun)
 621{
 622	struct ice_vf_fdir *fdir = &vf->fdir;
 623	struct ice_vsi *vf_vsi, *ctrl_vsi;
 624	struct ice_flow_seg_info *old_seg;
 625	struct ice_flow_prof *prof = NULL;
 626	struct ice_fd_hw_prof *vf_prof;
 627	struct device *dev;
 628	struct ice_pf *pf;
 629	struct ice_hw *hw;
 630	u64 entry1_h = 0;
 631	u64 entry2_h = 0;
 632	int ret;
 633
 634	pf = vf->pf;
 635	dev = ice_pf_to_dev(pf);
 636	hw = &pf->hw;
 637	vf_vsi = ice_get_vf_vsi(vf);
 638	if (!vf_vsi)
 639		return -EINVAL;
 640
 641	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
 642	if (!ctrl_vsi)
 643		return -EINVAL;
 644
 645	vf_prof = fdir->fdir_prof[flow];
 646	old_seg = vf_prof->fdir_seg[tun];
 647	if (old_seg) {
 648		if (!memcmp(old_seg, seg, sizeof(*seg))) {
 649			dev_dbg(dev, "Duplicated profile for VF %d!\n",
 650				vf->vf_id);
 651			return -EEXIST;
 652		}
 653
 654		if (fdir->fdir_fltr_cnt[flow][tun]) {
 655			ret = -EINVAL;
 656			dev_dbg(dev, "Input set conflicts for VF %d\n",
 657				vf->vf_id);
 658			goto err_exit;
 659		}
 660
 661		/* remove previously allocated profile */
 662		ice_vc_fdir_rem_prof(vf, flow, tun);
 663	}
 664
 665	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
 666				tun + 1, false, &prof);
 667	if (ret) {
 668		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
 669			flow, vf->vf_id);
 670		goto err_exit;
 671	}
 672
 673	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
 674				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 675				 seg, &entry1_h);
 676	if (ret) {
 677		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
 678			flow, vf->vf_id);
 679		goto err_prof;
 680	}
 681
 682	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
 683				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 684				 seg, &entry2_h);
 685	if (ret) {
 686		dev_dbg(dev,
 687			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
 688			flow, vf->vf_id);
 689		goto err_entry_1;
 690	}
 691
 692	vf_prof->fdir_seg[tun] = seg;
 693	vf_prof->cnt = 0;
 694	fdir->prof_entry_cnt[flow][tun] = 0;
 695
 696	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
 697	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
 698	vf_prof->cnt++;
 699	fdir->prof_entry_cnt[flow][tun]++;
 700
 701	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
 702	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
 703	vf_prof->cnt++;
 704	fdir->prof_entry_cnt[flow][tun]++;
 705
 706	vf_prof->prof_id[tun] = prof->id;
 707
 708	return 0;
 709
 710err_entry_1:
 711	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 712			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
 713	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 714err_prof:
 715	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
 716err_exit:
 717	return ret;
 718}
 719
 720/**
 721 * ice_vc_fdir_config_input_set
 722 * @vf: pointer to the VF structure
 723 * @fltr: virtual channel add cmd buffer
 724 * @conf: FDIR configuration for each filter
 725 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 726 *
 727 * Config the input set type and value for virtual channel add msg buffer
 728 *
 729 * Return: 0 on success, and other on error.
 730 */
 731static int
 732ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 733			     struct virtchnl_fdir_fltr_conf *conf, int tun)
 734{
 735	struct ice_fdir_fltr *input = &conf->input;
 736	struct device *dev = ice_pf_to_dev(vf->pf);
 737	struct ice_flow_seg_info *seg;
 738	enum ice_fltr_ptype flow;
 739	int ret;
 740
 741	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
 742	if (ret) {
 743		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
 744			vf->vf_id);
 745		return ret;
 746	}
 747
 748	flow = input->flow_type;
 749	ret = ice_vc_fdir_alloc_prof(vf, flow);
 750	if (ret) {
 751		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
 752		return ret;
 753	}
 754
 755	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 756	if (!seg)
 757		return -ENOMEM;
 758
 759	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
 760	if (ret) {
 761		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
 762		goto err_exit;
 763	}
 764
 765	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
 766	if (ret) {
 767		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
 768		goto err_exit;
 769	}
 770
 771	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
 772	if (ret == -EEXIST) {
 773		devm_kfree(dev, seg);
 774	} else if (ret) {
 775		dev_dbg(dev, "Write flow profile for VF %d failed\n",
 776			vf->vf_id);
 777		goto err_exit;
 778	}
 779
 780	return 0;
 781
 782err_exit:
 783	devm_kfree(dev, seg);
 784	return ret;
 785}
 786
 787/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788 * ice_vc_fdir_parse_pattern
 789 * @vf: pointer to the VF info
 790 * @fltr: virtual channel add cmd buffer
 791 * @conf: FDIR configuration for each filter
 792 *
 793 * Parse the virtual channel filter's pattern and store them into conf
 794 *
 795 * Return: 0 on success, and other on error.
 796 */
 797static int
 798ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 799			  struct virtchnl_fdir_fltr_conf *conf)
 800{
 801	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
 802	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
 803	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
 804	struct device *dev = ice_pf_to_dev(vf->pf);
 805	struct ice_fdir_fltr *input = &conf->input;
 806	int i;
 807
 808	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
 809		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
 810			proto->count, vf->vf_id);
 811		return -EINVAL;
 812	}
 813
 
 
 
 
 814	for (i = 0; i < proto->count; i++) {
 815		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
 816		struct ip_esp_hdr *esph;
 817		struct ip_auth_hdr *ah;
 818		struct sctphdr *sctph;
 819		struct ipv6hdr *ip6h;
 820		struct udphdr *udph;
 821		struct tcphdr *tcph;
 822		struct ethhdr *eth;
 823		struct iphdr *iph;
 824		u8 s_field;
 825		u8 *rawh;
 826
 827		switch (hdr->type) {
 828		case VIRTCHNL_PROTO_HDR_ETH:
 829			eth = (struct ethhdr *)hdr->buffer;
 830			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
 831
 832			if (hdr->field_selector)
 833				input->ext_data.ether_type = eth->h_proto;
 834			break;
 835		case VIRTCHNL_PROTO_HDR_IPV4:
 836			iph = (struct iphdr *)hdr->buffer;
 837			l3 = VIRTCHNL_PROTO_HDR_IPV4;
 838			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
 839
 840			if (hdr->field_selector) {
 841				input->ip.v4.src_ip = iph->saddr;
 842				input->ip.v4.dst_ip = iph->daddr;
 843				input->ip.v4.tos = iph->tos;
 844				input->ip.v4.proto = iph->protocol;
 845			}
 846			break;
 847		case VIRTCHNL_PROTO_HDR_IPV6:
 848			ip6h = (struct ipv6hdr *)hdr->buffer;
 849			l3 = VIRTCHNL_PROTO_HDR_IPV6;
 850			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
 851
 852			if (hdr->field_selector) {
 853				memcpy(input->ip.v6.src_ip,
 854				       ip6h->saddr.in6_u.u6_addr8,
 855				       sizeof(ip6h->saddr));
 856				memcpy(input->ip.v6.dst_ip,
 857				       ip6h->daddr.in6_u.u6_addr8,
 858				       sizeof(ip6h->daddr));
 859				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
 860						  (ip6h->flow_lbl[0] >> 4);
 861				input->ip.v6.proto = ip6h->nexthdr;
 862			}
 863			break;
 864		case VIRTCHNL_PROTO_HDR_TCP:
 865			tcph = (struct tcphdr *)hdr->buffer;
 866			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 867				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
 868			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 869				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
 870
 871			if (hdr->field_selector) {
 872				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
 873					input->ip.v4.src_port = tcph->source;
 874					input->ip.v4.dst_port = tcph->dest;
 875				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
 876					input->ip.v6.src_port = tcph->source;
 877					input->ip.v6.dst_port = tcph->dest;
 878				}
 879			}
 880			break;
 881		case VIRTCHNL_PROTO_HDR_UDP:
 882			udph = (struct udphdr *)hdr->buffer;
 883			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 884				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
 885			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 886				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
 887
 888			if (hdr->field_selector) {
 889				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
 890					input->ip.v4.src_port = udph->source;
 891					input->ip.v4.dst_port = udph->dest;
 892				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
 893					input->ip.v6.src_port = udph->source;
 894					input->ip.v6.dst_port = udph->dest;
 895				}
 896			}
 897			break;
 898		case VIRTCHNL_PROTO_HDR_SCTP:
 899			sctph = (struct sctphdr *)hdr->buffer;
 900			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 901				input->flow_type =
 902					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
 903			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 904				input->flow_type =
 905					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
 906
 907			if (hdr->field_selector) {
 908				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
 909					input->ip.v4.src_port = sctph->source;
 910					input->ip.v4.dst_port = sctph->dest;
 911				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
 912					input->ip.v6.src_port = sctph->source;
 913					input->ip.v6.dst_port = sctph->dest;
 914				}
 915			}
 916			break;
 917		case VIRTCHNL_PROTO_HDR_L2TPV3:
 918			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 919				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
 920			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 921				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
 922
 923			if (hdr->field_selector)
 924				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
 925			break;
 926		case VIRTCHNL_PROTO_HDR_ESP:
 927			esph = (struct ip_esp_hdr *)hdr->buffer;
 928			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
 929			    l4 == VIRTCHNL_PROTO_HDR_UDP)
 930				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
 931			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
 932				 l4 == VIRTCHNL_PROTO_HDR_UDP)
 933				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
 934			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
 935				 l4 == VIRTCHNL_PROTO_HDR_NONE)
 936				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
 937			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
 938				 l4 == VIRTCHNL_PROTO_HDR_NONE)
 939				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
 940
 941			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
 942				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
 943			else
 944				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
 945
 946			if (hdr->field_selector) {
 947				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 948					input->ip.v4.sec_parm_idx = esph->spi;
 949				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 950					input->ip.v6.sec_parm_idx = esph->spi;
 951			}
 952			break;
 953		case VIRTCHNL_PROTO_HDR_AH:
 954			ah = (struct ip_auth_hdr *)hdr->buffer;
 955			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 956				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
 957			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 958				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
 959
 960			if (hdr->field_selector) {
 961				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 962					input->ip.v4.sec_parm_idx = ah->spi;
 963				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 964					input->ip.v6.sec_parm_idx = ah->spi;
 965			}
 966			break;
 967		case VIRTCHNL_PROTO_HDR_PFCP:
 968			rawh = (u8 *)hdr->buffer;
 969			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
 970			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
 971				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
 972			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
 973				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
 974			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
 975				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
 976			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
 977				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
 978
 979			if (hdr->field_selector) {
 980				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 981					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
 982				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 983					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
 984			}
 985			break;
 986		case VIRTCHNL_PROTO_HDR_GTPU_IP:
 987			rawh = (u8 *)hdr->buffer;
 988			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
 989
 990			if (hdr->field_selector)
 991				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
 992			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
 993			break;
 994		case VIRTCHNL_PROTO_HDR_GTPU_EH:
 995			rawh = (u8 *)hdr->buffer;
 996
 997			if (hdr->field_selector)
 998				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
 999			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1000			break;
1001		default:
1002			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1003				hdr->type, vf->vf_id);
1004			return -EINVAL;
1005		}
1006	}
1007
1008	return 0;
1009}
1010
1011/**
1012 * ice_vc_fdir_parse_action
1013 * @vf: pointer to the VF info
1014 * @fltr: virtual channel add cmd buffer
1015 * @conf: FDIR configuration for each filter
1016 *
1017 * Parse the virtual channel filter's action and store them into conf
1018 *
1019 * Return: 0 on success, and other on error.
1020 */
1021static int
1022ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1023			 struct virtchnl_fdir_fltr_conf *conf)
1024{
1025	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1026	struct device *dev = ice_pf_to_dev(vf->pf);
1027	struct ice_fdir_fltr *input = &conf->input;
1028	u32 dest_num = 0;
1029	u32 mark_num = 0;
1030	int i;
1031
1032	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1033		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1034			as->count, vf->vf_id);
1035		return -EINVAL;
1036	}
1037
1038	for (i = 0; i < as->count; i++) {
1039		struct virtchnl_filter_action *action = &as->actions[i];
1040
1041		switch (action->type) {
1042		case VIRTCHNL_ACTION_PASSTHRU:
1043			dest_num++;
1044			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1045			break;
1046		case VIRTCHNL_ACTION_DROP:
1047			dest_num++;
1048			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1049			break;
1050		case VIRTCHNL_ACTION_QUEUE:
1051			dest_num++;
1052			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1053			input->q_index = action->act_conf.queue.index;
1054			break;
1055		case VIRTCHNL_ACTION_Q_REGION:
1056			dest_num++;
1057			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1058			input->q_index = action->act_conf.queue.index;
1059			input->q_region = action->act_conf.queue.region;
1060			break;
1061		case VIRTCHNL_ACTION_MARK:
1062			mark_num++;
1063			input->fltr_id = action->act_conf.mark_id;
1064			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1065			break;
1066		default:
1067			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1068				action->type, vf->vf_id);
1069			return -EINVAL;
1070		}
1071	}
1072
1073	if (dest_num == 0 || dest_num >= 2) {
1074		dev_dbg(dev, "Invalid destination action for VF %d\n",
1075			vf->vf_id);
1076		return -EINVAL;
1077	}
1078
1079	if (mark_num >= 2) {
1080		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1081		return -EINVAL;
1082	}
1083
1084	return 0;
1085}
1086
1087/**
1088 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1089 * @vf: pointer to the VF info
1090 * @fltr: virtual channel add cmd buffer
1091 * @conf: FDIR configuration for each filter
1092 *
1093 * Return: 0 on success, and other on error.
1094 */
1095static int
1096ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1097			  struct virtchnl_fdir_fltr_conf *conf)
1098{
1099	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1100	int ret;
1101
1102	if (!ice_vc_validate_pattern(vf, proto))
1103		return -EINVAL;
 
 
1104
1105	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1106	if (ret)
1107		return ret;
1108
1109	return ice_vc_fdir_parse_action(vf, fltr, conf);
1110}
1111
1112/**
1113 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1114 * @conf_a: FDIR configuration for filter a
1115 * @conf_b: FDIR configuration for filter b
1116 *
1117 * Return: 0 on success, and other on error.
1118 */
1119static bool
1120ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1121		       struct virtchnl_fdir_fltr_conf *conf_b)
1122{
1123	struct ice_fdir_fltr *a = &conf_a->input;
1124	struct ice_fdir_fltr *b = &conf_b->input;
1125
1126	if (conf_a->ttype != conf_b->ttype)
1127		return false;
1128	if (a->flow_type != b->flow_type)
1129		return false;
1130	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1131		return false;
1132	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1133		return false;
1134	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1135		return false;
1136	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1137		return false;
1138	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1139		return false;
1140	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1141		return false;
1142	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1143		return false;
1144	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1145		return false;
1146
1147	return true;
1148}
1149
1150/**
1151 * ice_vc_fdir_is_dup_fltr
1152 * @vf: pointer to the VF info
1153 * @conf: FDIR configuration for each filter
1154 *
1155 * Check if there is duplicated rule with same conf value
1156 *
1157 * Return: 0 true success, and false on error.
1158 */
1159static bool
1160ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1161{
1162	struct ice_fdir_fltr *desc;
1163	bool ret;
1164
1165	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1166		struct virtchnl_fdir_fltr_conf *node =
1167				to_fltr_conf_from_desc(desc);
1168
1169		ret = ice_vc_fdir_comp_rules(node, conf);
1170		if (ret)
1171			return true;
1172	}
1173
1174	return false;
1175}
1176
1177/**
1178 * ice_vc_fdir_insert_entry
1179 * @vf: pointer to the VF info
1180 * @conf: FDIR configuration for each filter
1181 * @id: pointer to ID value allocated by driver
1182 *
1183 * Insert FDIR conf entry into list and allocate ID for this filter
1184 *
1185 * Return: 0 true success, and other on error.
1186 */
1187static int
1188ice_vc_fdir_insert_entry(struct ice_vf *vf,
1189			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1190{
1191	struct ice_fdir_fltr *input = &conf->input;
1192	int i;
1193
1194	/* alloc ID corresponding with conf */
1195	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1196		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1197	if (i < 0)
1198		return -EINVAL;
1199	*id = i;
1200
1201	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1202	return 0;
1203}
1204
1205/**
1206 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1207 * @vf: pointer to the VF info
1208 * @conf: FDIR configuration for each filter
1209 * @id: filter rule's ID
1210 */
1211static void
1212ice_vc_fdir_remove_entry(struct ice_vf *vf,
1213			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1214{
1215	struct ice_fdir_fltr *input = &conf->input;
1216
1217	idr_remove(&vf->fdir.fdir_rule_idr, id);
1218	list_del(&input->fltr_node);
1219}
1220
1221/**
1222 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1223 * @vf: pointer to the VF info
1224 * @id: filter rule's ID
1225 *
1226 * Return: NULL on error, and other on success.
1227 */
1228static struct virtchnl_fdir_fltr_conf *
1229ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1230{
1231	return idr_find(&vf->fdir.fdir_rule_idr, id);
1232}
1233
1234/**
1235 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1236 * @vf: pointer to the VF info
1237 */
1238static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1239{
1240	struct virtchnl_fdir_fltr_conf *conf;
1241	struct ice_fdir_fltr *desc, *temp;
1242
1243	list_for_each_entry_safe(desc, temp,
1244				 &vf->fdir.fdir_rule_list, fltr_node) {
1245		conf = to_fltr_conf_from_desc(desc);
1246		list_del(&desc->fltr_node);
1247		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1248	}
1249}
1250
1251/**
1252 * ice_vc_fdir_write_fltr - write filter rule into hardware
1253 * @vf: pointer to the VF info
1254 * @conf: FDIR configuration for each filter
1255 * @add: true implies add rule, false implies del rules
1256 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1257 *
1258 * Return: 0 on success, and other on error.
1259 */
1260static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1261				  struct virtchnl_fdir_fltr_conf *conf,
1262				  bool add, bool is_tun)
1263{
1264	struct ice_fdir_fltr *input = &conf->input;
1265	struct ice_vsi *vsi, *ctrl_vsi;
1266	struct ice_fltr_desc desc;
1267	struct device *dev;
1268	struct ice_pf *pf;
1269	struct ice_hw *hw;
1270	int ret;
1271	u8 *pkt;
1272
1273	pf = vf->pf;
1274	dev = ice_pf_to_dev(pf);
1275	hw = &pf->hw;
1276	vsi = ice_get_vf_vsi(vf);
1277	if (!vsi) {
1278		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1279		return -EINVAL;
1280	}
1281
1282	input->dest_vsi = vsi->idx;
1283	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1284
1285	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1286	if (!ctrl_vsi) {
1287		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1288		return -EINVAL;
1289	}
1290
1291	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1292	if (!pkt)
1293		return -ENOMEM;
1294
1295	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1296	ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1297	if (ret) {
1298		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1299			vf->vf_id, input->flow_type);
1300		goto err_free_pkt;
 
 
 
 
1301	}
1302
1303	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1304	if (ret)
1305		goto err_free_pkt;
1306
1307	return 0;
1308
1309err_free_pkt:
1310	devm_kfree(dev, pkt);
1311	return ret;
1312}
1313
1314/**
1315 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1316 * @t: pointer to timer_list
1317 */
1318static void ice_vf_fdir_timer(struct timer_list *t)
1319{
1320	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1321	struct ice_vf_fdir_ctx *ctx_done;
1322	struct ice_vf_fdir *fdir;
1323	unsigned long flags;
1324	struct ice_vf *vf;
1325	struct ice_pf *pf;
1326
1327	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1328	vf = container_of(fdir, struct ice_vf, fdir);
1329	ctx_done = &fdir->ctx_done;
1330	pf = vf->pf;
1331	spin_lock_irqsave(&fdir->ctx_lock, flags);
1332	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1333		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1334		WARN_ON_ONCE(1);
1335		return;
1336	}
1337
1338	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1339
1340	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1341	ctx_done->conf = ctx_irq->conf;
1342	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1343	ctx_done->v_opcode = ctx_irq->v_opcode;
1344	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1345
1346	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1347	ice_service_task_schedule(pf);
1348}
1349
1350/**
1351 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1352 * @ctrl_vsi: pointer to a VF's CTRL VSI
1353 * @rx_desc: pointer to FDIR Rx queue descriptor
1354 */
1355void
1356ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1357			union ice_32b_rx_flex_desc *rx_desc)
1358{
1359	struct ice_pf *pf = ctrl_vsi->back;
1360	struct ice_vf *vf = ctrl_vsi->vf;
1361	struct ice_vf_fdir_ctx *ctx_done;
1362	struct ice_vf_fdir_ctx *ctx_irq;
1363	struct ice_vf_fdir *fdir;
1364	unsigned long flags;
1365	struct device *dev;
1366	int ret;
1367
1368	if (WARN_ON(!vf))
1369		return;
1370
1371	fdir = &vf->fdir;
1372	ctx_done = &fdir->ctx_done;
1373	ctx_irq = &fdir->ctx_irq;
1374	dev = ice_pf_to_dev(pf);
1375	spin_lock_irqsave(&fdir->ctx_lock, flags);
1376	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1377		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1378		WARN_ON_ONCE(1);
1379		return;
1380	}
1381
1382	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1383
1384	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1385	ctx_done->conf = ctx_irq->conf;
1386	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1387	ctx_done->v_opcode = ctx_irq->v_opcode;
1388	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1389	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1390
1391	ret = del_timer(&ctx_irq->rx_tmr);
1392	if (!ret)
1393		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1394
1395	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1396	ice_service_task_schedule(pf);
1397}
1398
1399/**
1400 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1401 * @vf: pointer to the VF info
1402 */
1403static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1404{
1405	u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1406	struct ice_vsi *vf_vsi;
1407	struct device *dev;
1408	struct ice_pf *pf;
1409	struct ice_hw *hw;
1410	u16 vsi_num;
1411
1412	pf = vf->pf;
1413	hw = &pf->hw;
1414	dev = ice_pf_to_dev(pf);
1415	vf_vsi = ice_get_vf_vsi(vf);
1416	if (!vf_vsi) {
1417		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1418		return;
1419	}
1420
1421	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1422
1423	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1424	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1425	switch (hw->mac_type) {
1426	case ICE_MAC_E830:
1427		fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1428		fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1429		fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1430		fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1431		break;
1432	case ICE_MAC_E810:
1433	default:
1434		fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1435		fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1436		fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1437		fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1438	}
1439
1440	dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1441		vf->vf_id, fd_size_g, fd_size_b);
1442	dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1443		vf->vf_id, fd_cnt_g, fd_cnt_b);
1444}
1445
1446/**
1447 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1448 * @vf: pointer to the VF info
1449 * @ctx: FDIR context info for post processing
1450 * @status: virtchnl FDIR program status
1451 *
1452 * Return: 0 on success, and other on error.
1453 */
1454static int
1455ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1456		      enum virtchnl_fdir_prgm_status *status)
1457{
1458	struct device *dev = ice_pf_to_dev(vf->pf);
1459	u32 stat_err, error, prog_id;
1460	int ret;
1461
1462	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1463	if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1464	    ICE_FXD_FLTR_WB_QW1_DD_YES) {
1465		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1466		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1467		ret = -EINVAL;
1468		goto err_exit;
1469	}
1470
1471	prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1472	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1473	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1474		dev_err(dev, "VF %d: Desc show add, but ctx not",
1475			vf->vf_id);
1476		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1477		ret = -EINVAL;
1478		goto err_exit;
1479	}
1480
1481	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1482	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1483		dev_err(dev, "VF %d: Desc show del, but ctx not",
1484			vf->vf_id);
1485		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1486		ret = -EINVAL;
1487		goto err_exit;
1488	}
1489
1490	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1491	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1492		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1493			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1494				vf->vf_id);
1495			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1496		} else {
1497			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1498				vf->vf_id);
1499			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1500		}
1501		ret = -EINVAL;
1502		goto err_exit;
1503	}
1504
1505	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1506	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1507		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1508		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1509		ret = -EINVAL;
1510		goto err_exit;
1511	}
1512
1513	*status = VIRTCHNL_FDIR_SUCCESS;
1514
1515	return 0;
1516
1517err_exit:
1518	ice_vf_fdir_dump_info(vf);
1519	return ret;
1520}
1521
 
 
 
 
 
 
 
 
 
 
1522/**
1523 * ice_vc_add_fdir_fltr_post
1524 * @vf: pointer to the VF structure
1525 * @ctx: FDIR context info for post processing
1526 * @status: virtchnl FDIR program status
1527 * @success: true implies success, false implies failure
1528 *
1529 * Post process for flow director add command. If success, then do post process
1530 * and send back success msg by virtchnl. Otherwise, do context reversion and
1531 * send back failure msg by virtchnl.
1532 *
1533 * Return: 0 on success, and other on error.
1534 */
1535static int
1536ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1537			  enum virtchnl_fdir_prgm_status status,
1538			  bool success)
1539{
1540	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1541	struct device *dev = ice_pf_to_dev(vf->pf);
1542	enum virtchnl_status_code v_ret;
1543	struct virtchnl_fdir_add *resp;
1544	int ret, len, is_tun;
1545
1546	v_ret = VIRTCHNL_STATUS_SUCCESS;
1547	len = sizeof(*resp);
1548	resp = kzalloc(len, GFP_KERNEL);
1549	if (!resp) {
1550		len = 0;
1551		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1552		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1553		goto err_exit;
1554	}
1555
1556	if (!success)
1557		goto err_exit;
1558
1559	is_tun = 0;
1560	resp->status = status;
1561	resp->flow_id = conf->flow_id;
1562	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
 
1563
1564	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1565				    (u8 *)resp, len);
1566	kfree(resp);
1567
1568	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1569		vf->vf_id, conf->flow_id,
1570		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1571		"add" : "del");
1572	return ret;
1573
1574err_exit:
1575	if (resp)
1576		resp->status = status;
1577	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1578	devm_kfree(dev, conf);
1579
1580	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1581				    (u8 *)resp, len);
1582	kfree(resp);
1583	return ret;
1584}
1585
1586/**
1587 * ice_vc_del_fdir_fltr_post
1588 * @vf: pointer to the VF structure
1589 * @ctx: FDIR context info for post processing
1590 * @status: virtchnl FDIR program status
1591 * @success: true implies success, false implies failure
1592 *
1593 * Post process for flow director del command. If success, then do post process
1594 * and send back success msg by virtchnl. Otherwise, do context reversion and
1595 * send back failure msg by virtchnl.
1596 *
1597 * Return: 0 on success, and other on error.
1598 */
1599static int
1600ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1601			  enum virtchnl_fdir_prgm_status status,
1602			  bool success)
1603{
1604	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1605	struct device *dev = ice_pf_to_dev(vf->pf);
1606	enum virtchnl_status_code v_ret;
1607	struct virtchnl_fdir_del *resp;
1608	int ret, len, is_tun;
1609
1610	v_ret = VIRTCHNL_STATUS_SUCCESS;
1611	len = sizeof(*resp);
1612	resp = kzalloc(len, GFP_KERNEL);
1613	if (!resp) {
1614		len = 0;
1615		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1616		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1617		goto err_exit;
1618	}
1619
1620	if (!success)
1621		goto err_exit;
1622
1623	is_tun = 0;
1624	resp->status = status;
1625	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1626	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
 
1627
1628	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1629				    (u8 *)resp, len);
1630	kfree(resp);
1631
1632	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1633		vf->vf_id, conf->flow_id,
1634		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1635		"add" : "del");
1636	devm_kfree(dev, conf);
1637	return ret;
1638
1639err_exit:
1640	if (resp)
1641		resp->status = status;
1642	if (success)
1643		devm_kfree(dev, conf);
1644
1645	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1646				    (u8 *)resp, len);
1647	kfree(resp);
1648	return ret;
1649}
1650
1651/**
1652 * ice_flush_fdir_ctx
1653 * @pf: pointer to the PF structure
1654 *
1655 * Flush all the pending event on ctx_done list and process them.
1656 */
1657void ice_flush_fdir_ctx(struct ice_pf *pf)
1658{
1659	struct ice_vf *vf;
1660	unsigned int bkt;
1661
1662	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1663		return;
1664
1665	mutex_lock(&pf->vfs.table_lock);
1666	ice_for_each_vf(pf, bkt, vf) {
1667		struct device *dev = ice_pf_to_dev(pf);
1668		enum virtchnl_fdir_prgm_status status;
1669		struct ice_vf_fdir_ctx *ctx;
1670		unsigned long flags;
1671		int ret;
1672
1673		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1674			continue;
1675
1676		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1677			continue;
1678
1679		ctx = &vf->fdir.ctx_done;
1680		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1681		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1682			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1683			continue;
1684		}
1685		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1686
1687		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1688		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1689			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1690			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1691				vf->vf_id);
1692			goto err_exit;
1693		}
1694
1695		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1696		if (ret)
1697			goto err_exit;
1698
1699		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1700			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1701		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1702			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1703		else
1704			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1705
1706		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1707		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1708		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1709		continue;
1710err_exit:
1711		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1712			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1713		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1714			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1715		else
1716			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1717
1718		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1719		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1720		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1721	}
1722	mutex_unlock(&pf->vfs.table_lock);
1723}
1724
1725/**
1726 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1727 * @vf: pointer to the VF structure
1728 * @conf: FDIR configuration for each filter
1729 * @v_opcode: virtual channel operation code
1730 *
1731 * Return: 0 on success, and other on error.
1732 */
1733static int
1734ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1735			enum virtchnl_ops v_opcode)
1736{
1737	struct device *dev = ice_pf_to_dev(vf->pf);
1738	struct ice_vf_fdir_ctx *ctx;
1739	unsigned long flags;
1740
1741	ctx = &vf->fdir.ctx_irq;
1742	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1743	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1744	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1745		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1746		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1747			vf->vf_id);
1748		return -EBUSY;
1749	}
1750	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1751	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1752
1753	ctx->conf = conf;
1754	ctx->v_opcode = v_opcode;
1755	ctx->stat = ICE_FDIR_CTX_READY;
1756	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1757
1758	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1759
1760	return 0;
1761}
1762
1763/**
1764 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1765 * @vf: pointer to the VF structure
1766 *
1767 * Return: 0 on success, and other on error.
1768 */
1769static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1770{
1771	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1772	unsigned long flags;
1773
1774	del_timer(&ctx->rx_tmr);
1775	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1776	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1777	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1778}
1779
1780/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1781 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1782 * @vf: pointer to the VF info
1783 * @msg: pointer to the msg buffer
1784 *
1785 * Return: 0 on success, and other on error.
1786 */
1787int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1788{
1789	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1790	struct virtchnl_fdir_add *stat = NULL;
1791	struct virtchnl_fdir_fltr_conf *conf;
1792	enum virtchnl_status_code v_ret;
 
1793	struct device *dev;
1794	struct ice_pf *pf;
1795	int is_tun = 0;
1796	int len = 0;
1797	int ret;
1798
1799	pf = vf->pf;
1800	dev = ice_pf_to_dev(pf);
 
 
 
 
 
 
 
 
 
 
 
1801	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1802	if (ret) {
1803		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1804		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1805		goto err_exit;
1806	}
1807
1808	ret = ice_vf_start_ctrl_vsi(vf);
1809	if (ret && (ret != -EEXIST)) {
1810		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1811		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1812			vf->vf_id, ret);
1813		goto err_exit;
1814	}
1815
1816	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1817	if (!stat) {
1818		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1819		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1820		goto err_exit;
1821	}
1822
1823	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1824	if (!conf) {
1825		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1826		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1827		goto err_exit;
1828	}
1829
1830	len = sizeof(*stat);
1831	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1832	if (ret) {
1833		v_ret = VIRTCHNL_STATUS_SUCCESS;
1834		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1835		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1836		goto err_free_conf;
1837	}
1838
1839	if (fltr->validate_only) {
1840		v_ret = VIRTCHNL_STATUS_SUCCESS;
1841		stat->status = VIRTCHNL_FDIR_SUCCESS;
1842		devm_kfree(dev, conf);
1843		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1844					    v_ret, (u8 *)stat, len);
1845		goto exit;
1846	}
1847
 
 
 
 
 
 
 
 
 
1848	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1849	if (ret) {
1850		v_ret = VIRTCHNL_STATUS_SUCCESS;
1851		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1852		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1853			vf->vf_id, ret);
1854		goto err_free_conf;
1855	}
1856
1857	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1858	if (ret) {
1859		v_ret = VIRTCHNL_STATUS_SUCCESS;
1860		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1861		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1862			vf->vf_id);
1863		goto err_free_conf;
1864	}
1865
1866	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1867	if (ret) {
1868		v_ret = VIRTCHNL_STATUS_SUCCESS;
1869		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1870		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1871		goto err_free_conf;
1872	}
1873
1874	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1875	if (ret) {
1876		v_ret = VIRTCHNL_STATUS_SUCCESS;
1877		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1878		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1879		goto err_rem_entry;
1880	}
1881
1882	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1883	if (ret) {
1884		v_ret = VIRTCHNL_STATUS_SUCCESS;
1885		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1886		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1887			vf->vf_id, ret);
1888		goto err_clr_irq;
1889	}
1890
1891exit:
1892	kfree(stat);
1893	return ret;
1894
1895err_clr_irq:
1896	ice_vc_fdir_clear_irq_ctx(vf);
1897err_rem_entry:
1898	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1899err_free_conf:
1900	devm_kfree(dev, conf);
1901err_exit:
1902	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1903				    (u8 *)stat, len);
1904	kfree(stat);
1905	return ret;
1906}
1907
1908/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1909 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1910 * @vf: pointer to the VF info
1911 * @msg: pointer to the msg buffer
1912 *
1913 * Return: 0 on success, and other on error.
1914 */
1915int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1916{
1917	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1918	struct virtchnl_fdir_del *stat = NULL;
1919	struct virtchnl_fdir_fltr_conf *conf;
 
1920	enum virtchnl_status_code v_ret;
 
 
1921	struct device *dev;
1922	struct ice_pf *pf;
1923	int is_tun = 0;
1924	int len = 0;
1925	int ret;
1926
1927	pf = vf->pf;
1928	dev = ice_pf_to_dev(pf);
1929	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1930	if (ret) {
1931		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1932		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1933		goto err_exit;
1934	}
1935
1936	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1937	if (!stat) {
1938		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1939		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1940		goto err_exit;
1941	}
1942
1943	len = sizeof(*stat);
1944
1945	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1946	if (!conf) {
1947		v_ret = VIRTCHNL_STATUS_SUCCESS;
1948		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1949		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1950			vf->vf_id, fltr->flow_id);
1951		goto err_exit;
1952	}
1953
1954	/* Just return failure when ctrl_vsi idx is invalid */
1955	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1956		v_ret = VIRTCHNL_STATUS_SUCCESS;
1957		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1958		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1959		goto err_exit;
1960	}
1961
1962	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1963	if (ret) {
1964		v_ret = VIRTCHNL_STATUS_SUCCESS;
1965		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1966		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1967		goto err_exit;
1968	}
1969
 
 
 
 
 
 
 
 
 
1970	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1971	if (ret) {
1972		v_ret = VIRTCHNL_STATUS_SUCCESS;
1973		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1974		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1975			vf->vf_id, ret);
1976		goto err_del_tmr;
1977	}
1978
 
 
 
 
 
 
 
1979	kfree(stat);
1980
1981	return ret;
1982
1983err_del_tmr:
1984	ice_vc_fdir_clear_irq_ctx(vf);
1985err_exit:
1986	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1987				    (u8 *)stat, len);
1988	kfree(stat);
1989	return ret;
1990}
1991
1992/**
1993 * ice_vf_fdir_init - init FDIR resource for VF
1994 * @vf: pointer to the VF info
1995 */
1996void ice_vf_fdir_init(struct ice_vf *vf)
1997{
1998	struct ice_vf_fdir *fdir = &vf->fdir;
1999
2000	idr_init(&fdir->fdir_rule_idr);
2001	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2002
2003	spin_lock_init(&fdir->ctx_lock);
2004	fdir->ctx_irq.flags = 0;
2005	fdir->ctx_done.flags = 0;
2006	ice_vc_fdir_reset_cnt_all(fdir);
2007}
2008
2009/**
2010 * ice_vf_fdir_exit - destroy FDIR resource for VF
2011 * @vf: pointer to the VF info
2012 */
2013void ice_vf_fdir_exit(struct ice_vf *vf)
2014{
2015	ice_vc_fdir_flush_entry(vf);
2016	idr_destroy(&vf->fdir.fdir_rule_idr);
2017	ice_vc_fdir_rem_prof_all(vf);
2018	ice_vc_fdir_free_prof_all(vf);
2019}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2021-2023, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_flow.h"
   8#include "ice_vf_lib_private.h"
   9
  10#define to_fltr_conf_from_desc(p) \
  11	container_of(p, struct virtchnl_fdir_fltr_conf, input)
  12
  13#define GTPU_TEID_OFFSET 4
  14#define GTPU_EH_QFI_OFFSET 1
  15#define GTPU_EH_QFI_MASK 0x3F
  16#define PFCP_S_OFFSET 0
  17#define PFCP_S_MASK 0x1
  18#define PFCP_PORT_NR 8805
  19
  20#define FDIR_INSET_FLAG_ESP_S 0
  21#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  22#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  23#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
  24
  25enum ice_fdir_tunnel_type {
  26	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
  27	ICE_FDIR_TUNNEL_TYPE_GTPU,
  28	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
  29	ICE_FDIR_TUNNEL_TYPE_ECPRI,
  30	ICE_FDIR_TUNNEL_TYPE_GTPU_INNER,
  31	ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER,
  32	ICE_FDIR_TUNNEL_TYPE_GRE,
  33	ICE_FDIR_TUNNEL_TYPE_GTPOGRE,
  34	ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER,
  35	ICE_FDIR_TUNNEL_TYPE_GRE_INNER,
  36	ICE_FDIR_TUNNEL_TYPE_L2TPV2,
  37	ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER,
  38};
  39
  40struct virtchnl_fdir_fltr_conf {
  41	struct ice_fdir_fltr input;
  42	enum ice_fdir_tunnel_type ttype;
  43	u64 inset_flag;
  44	u32 flow_id;
  45
  46	struct ice_parser_profile *prof;
  47	bool parser_ena;
  48	u8 *pkt_buf;
  49	u8 pkt_len;
  50};
  51
  52struct virtchnl_fdir_inset_map {
  53	enum virtchnl_proto_hdr_field field;
  54	enum ice_flow_field fld;
  55	u64 flag;
  56	u64 mask;
  57};
  58
  59static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
  60	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
  61	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
  62	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
  63	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
  64	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
  65	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
  66	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
  67	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
  68	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
  69	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
  70	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
  71	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
  72	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
  73	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
  74	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
  75	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
  76	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
  77	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
  78	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
  79	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
  80		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
  81	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
  82		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
  83	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
  84	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
  85	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
  86};
  87
  88/**
  89 * ice_vc_fdir_param_check
  90 * @vf: pointer to the VF structure
  91 * @vsi_id: VF relative VSI ID
  92 *
  93 * Check for the valid VSI ID, PF's state and VF's state
  94 *
  95 * Return: 0 on success, and -EINVAL on error.
  96 */
  97static int
  98ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
  99{
 100	struct ice_pf *pf = vf->pf;
 101
 102	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
 103		return -EINVAL;
 104
 105	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
 106		return -EINVAL;
 107
 108	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
 109		return -EINVAL;
 110
 111	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
 112		return -EINVAL;
 113
 114	if (!ice_get_vf_vsi(vf))
 115		return -EINVAL;
 116
 117	return 0;
 118}
 119
 120/**
 121 * ice_vf_start_ctrl_vsi
 122 * @vf: pointer to the VF structure
 123 *
 124 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
 125 *
 126 * Return: 0 on success, and other on error.
 127 */
 128static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
 129{
 130	struct ice_pf *pf = vf->pf;
 131	struct ice_vsi *ctrl_vsi;
 132	struct device *dev;
 133	int err;
 134
 135	dev = ice_pf_to_dev(pf);
 136	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 137		return -EEXIST;
 138
 139	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
 140	if (!ctrl_vsi) {
 141		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
 142			vf->vf_id);
 143		return -ENOMEM;
 144	}
 145
 146	err = ice_vsi_open_ctrl(ctrl_vsi);
 147	if (err) {
 148		dev_dbg(dev, "Could not open control VSI for VF %d\n",
 149			vf->vf_id);
 150		goto err_vsi_open;
 151	}
 152
 153	return 0;
 154
 155err_vsi_open:
 156	ice_vsi_release(ctrl_vsi);
 157	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
 158		pf->vsi[vf->ctrl_vsi_idx] = NULL;
 159		vf->ctrl_vsi_idx = ICE_NO_VSI;
 160	}
 161	return err;
 162}
 163
 164/**
 165 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
 166 * @vf: pointer to the VF structure
 167 * @flow: filter flow type
 168 *
 169 * Return: 0 on success, and other on error.
 170 */
 171static int
 172ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 173{
 174	struct ice_vf_fdir *fdir = &vf->fdir;
 175
 176	if (!fdir->fdir_prof) {
 177		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
 178					       ICE_FLTR_PTYPE_MAX,
 179					       sizeof(*fdir->fdir_prof),
 180					       GFP_KERNEL);
 181		if (!fdir->fdir_prof)
 182			return -ENOMEM;
 183	}
 184
 185	if (!fdir->fdir_prof[flow]) {
 186		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
 187						     sizeof(**fdir->fdir_prof),
 188						     GFP_KERNEL);
 189		if (!fdir->fdir_prof[flow])
 190			return -ENOMEM;
 191	}
 192
 193	return 0;
 194}
 195
 196/**
 197 * ice_vc_fdir_free_prof - free profile for this filter flow type
 198 * @vf: pointer to the VF structure
 199 * @flow: filter flow type
 200 */
 201static void
 202ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 203{
 204	struct ice_vf_fdir *fdir = &vf->fdir;
 205
 206	if (!fdir->fdir_prof)
 207		return;
 208
 209	if (!fdir->fdir_prof[flow])
 210		return;
 211
 212	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
 213	fdir->fdir_prof[flow] = NULL;
 214}
 215
 216/**
 217 * ice_vc_fdir_free_prof_all - free all the profile for this VF
 218 * @vf: pointer to the VF structure
 219 */
 220static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
 221{
 222	struct ice_vf_fdir *fdir = &vf->fdir;
 223	enum ice_fltr_ptype flow;
 224
 225	if (!fdir->fdir_prof)
 226		return;
 227
 228	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
 229		ice_vc_fdir_free_prof(vf, flow);
 230
 231	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
 232	fdir->fdir_prof = NULL;
 233}
 234
 235/**
 236 * ice_vc_fdir_parse_flow_fld
 237 * @proto_hdr: virtual channel protocol filter header
 238 * @conf: FDIR configuration for each filter
 239 * @fld: field type array
 240 * @fld_cnt: field counter
 241 *
 242 * Parse the virtual channel filter header and store them into field type array
 243 *
 244 * Return: 0 on success, and other on error.
 245 */
 246static int
 247ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
 248			   struct virtchnl_fdir_fltr_conf *conf,
 249			   enum ice_flow_field *fld, int *fld_cnt)
 250{
 251	struct virtchnl_proto_hdr hdr;
 252	u32 i;
 253
 254	memcpy(&hdr, proto_hdr, sizeof(hdr));
 255
 256	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
 257	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
 258		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
 259			if (fdir_inset_map[i].mask &&
 260			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
 261			     fdir_inset_map[i].flag))
 262				continue;
 263
 264			fld[*fld_cnt] = fdir_inset_map[i].fld;
 265			*fld_cnt += 1;
 266			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
 267				return -EINVAL;
 268			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
 269						     fdir_inset_map[i].field);
 270		}
 271
 272	return 0;
 273}
 274
 275/**
 276 * ice_vc_fdir_set_flow_fld
 277 * @vf: pointer to the VF structure
 278 * @fltr: virtual channel add cmd buffer
 279 * @conf: FDIR configuration for each filter
 280 * @seg: array of one or more packet segments that describe the flow
 281 *
 282 * Parse the virtual channel add msg buffer's field vector and store them into
 283 * flow's packet segment field
 284 *
 285 * Return: 0 on success, and other on error.
 286 */
 287static int
 288ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 289			 struct virtchnl_fdir_fltr_conf *conf,
 290			 struct ice_flow_seg_info *seg)
 291{
 292	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
 293	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
 294	struct device *dev = ice_pf_to_dev(vf->pf);
 295	struct virtchnl_proto_hdrs *proto;
 296	int fld_cnt = 0;
 297	int i;
 298
 299	proto = &rule->proto_hdrs;
 300	for (i = 0; i < proto->count; i++) {
 301		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
 302		int ret;
 303
 304		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
 305		if (ret)
 306			return ret;
 307	}
 308
 309	if (fld_cnt == 0) {
 310		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
 311		return -EINVAL;
 312	}
 313
 314	for (i = 0; i < fld_cnt; i++)
 315		ice_flow_set_fld(seg, fld[i],
 316				 ICE_FLOW_FLD_OFF_INVAL,
 317				 ICE_FLOW_FLD_OFF_INVAL,
 318				 ICE_FLOW_FLD_OFF_INVAL, false);
 319
 320	return 0;
 321}
 322
 323/**
 324 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
 325 * @vf: pointer to the VF structure
 326 * @conf: FDIR configuration for each filter
 327 * @seg: array of one or more packet segments that describe the flow
 328 *
 329 * Return: 0 on success, and other on error.
 330 */
 331static int
 332ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
 333			 struct virtchnl_fdir_fltr_conf *conf,
 334			 struct ice_flow_seg_info *seg)
 335{
 336	enum ice_fltr_ptype flow = conf->input.flow_type;
 337	enum ice_fdir_tunnel_type ttype = conf->ttype;
 338	struct device *dev = ice_pf_to_dev(vf->pf);
 339
 340	switch (flow) {
 341	case ICE_FLTR_PTYPE_NON_IP_L2:
 342		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
 343		break;
 344	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
 345		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 346				  ICE_FLOW_SEG_HDR_IPV4 |
 347				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 348		break;
 349	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
 350		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 351				  ICE_FLOW_SEG_HDR_IPV4 |
 352				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 353		break;
 354	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
 355		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 356				  ICE_FLOW_SEG_HDR_IPV4 |
 357				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 358		break;
 359	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
 360		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 361				  ICE_FLOW_SEG_HDR_IPV4 |
 362				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 363		break;
 364	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
 365		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 366				  ICE_FLOW_SEG_HDR_IPV4 |
 367				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 368		break;
 369	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
 370		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 371				  ICE_FLOW_SEG_HDR_IPV4 |
 372				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 373		break;
 374	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 375		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
 376				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 377		break;
 378	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 379		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 380				  ICE_FLOW_SEG_HDR_IPV4 |
 381				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 382		break;
 383	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 384		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 385				  ICE_FLOW_SEG_HDR_IPV4 |
 386				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 387		break;
 388	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
 389	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
 390	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
 391	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
 392		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
 393			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
 394					  ICE_FLOW_SEG_HDR_IPV4 |
 395					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 396		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
 397			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
 398					  ICE_FLOW_SEG_HDR_GTPU_IP |
 399					  ICE_FLOW_SEG_HDR_IPV4 |
 400					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 401		} else {
 402			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
 403				flow, vf->vf_id);
 404			return -EINVAL;
 405		}
 406		break;
 407	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 408		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 409				  ICE_FLOW_SEG_HDR_IPV4 |
 410				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 411		break;
 412	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
 413		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 414				  ICE_FLOW_SEG_HDR_IPV6 |
 415				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 416		break;
 417	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
 418		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 419				  ICE_FLOW_SEG_HDR_IPV6 |
 420				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 421		break;
 422	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
 423		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 424				  ICE_FLOW_SEG_HDR_IPV6 |
 425				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 426		break;
 427	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
 428		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 429				  ICE_FLOW_SEG_HDR_IPV6 |
 430				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 431		break;
 432	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
 433		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 434				  ICE_FLOW_SEG_HDR_IPV6 |
 435				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 436		break;
 437	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
 438		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 439				  ICE_FLOW_SEG_HDR_IPV6 |
 440				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 441		break;
 442	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 443		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
 444				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 445		break;
 446	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 447		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 448				  ICE_FLOW_SEG_HDR_IPV6 |
 449				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 450		break;
 451	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 452		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 453				  ICE_FLOW_SEG_HDR_IPV6 |
 454				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 455		break;
 456	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 457		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 458				  ICE_FLOW_SEG_HDR_IPV6 |
 459				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 460		break;
 461	default:
 462		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
 463			flow, vf->vf_id);
 464		return -EINVAL;
 465	}
 466
 467	return 0;
 468}
 469
 470/**
 471 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
 472 * @vf: pointer to the VF structure
 473 * @flow: filter flow type
 474 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 475 */
 476static void
 477ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
 478{
 479	struct ice_vf_fdir *fdir = &vf->fdir;
 480	struct ice_fd_hw_prof *vf_prof;
 481	struct ice_pf *pf = vf->pf;
 482	struct ice_vsi *vf_vsi;
 483	struct device *dev;
 484	struct ice_hw *hw;
 485	u64 prof_id;
 486	int i;
 487
 488	dev = ice_pf_to_dev(pf);
 489	hw = &pf->hw;
 490	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
 491		return;
 492
 493	vf_prof = fdir->fdir_prof[flow];
 494	prof_id = vf_prof->prof_id[tun];
 495
 496	vf_vsi = ice_get_vf_vsi(vf);
 497	if (!vf_vsi) {
 498		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
 499		return;
 500	}
 501
 502	if (!fdir->prof_entry_cnt[flow][tun])
 503		return;
 504
 505	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
 506		if (vf_prof->entry_h[i][tun]) {
 507			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
 508
 509			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
 510			ice_flow_rem_entry(hw, ICE_BLK_FD,
 511					   vf_prof->entry_h[i][tun]);
 512			vf_prof->entry_h[i][tun] = 0;
 513		}
 514
 515	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 516	devm_kfree(dev, vf_prof->fdir_seg[tun]);
 517	vf_prof->fdir_seg[tun] = NULL;
 518
 519	for (i = 0; i < vf_prof->cnt; i++)
 520		vf_prof->vsi_h[i] = 0;
 521
 522	fdir->prof_entry_cnt[flow][tun] = 0;
 523}
 524
 525/**
 526 * ice_vc_fdir_rem_prof_all - remove profile for this VF
 527 * @vf: pointer to the VF structure
 528 */
 529static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
 530{
 531	enum ice_fltr_ptype flow;
 532
 533	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
 534	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
 535		ice_vc_fdir_rem_prof(vf, flow, 0);
 536		ice_vc_fdir_rem_prof(vf, flow, 1);
 537	}
 538}
 539
 540/**
 541 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
 542 * @fdir: pointer to the VF FDIR structure
 543 */
 544static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
 545{
 546	enum ice_fltr_ptype flow;
 547
 548	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
 549	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
 550		fdir->fdir_fltr_cnt[flow][0] = 0;
 551		fdir->fdir_fltr_cnt[flow][1] = 0;
 552	}
 553
 554	fdir->fdir_fltr_cnt_total = 0;
 555}
 556
 557/**
 558 * ice_vc_fdir_has_prof_conflict
 559 * @vf: pointer to the VF structure
 560 * @conf: FDIR configuration for each filter
 561 *
 562 * Check if @conf has conflicting profile with existing profiles
 563 *
 564 * Return: true on success, and false on error.
 565 */
 566static bool
 567ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
 568			      struct virtchnl_fdir_fltr_conf *conf)
 569{
 570	struct ice_fdir_fltr *desc;
 571
 572	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
 573		struct virtchnl_fdir_fltr_conf *existing_conf;
 574		enum ice_fltr_ptype flow_type_a, flow_type_b;
 575		struct ice_fdir_fltr *a, *b;
 576
 577		existing_conf = to_fltr_conf_from_desc(desc);
 578		a = &existing_conf->input;
 579		b = &conf->input;
 580		flow_type_a = a->flow_type;
 581		flow_type_b = b->flow_type;
 582
 583		/* No need to compare two rules with different tunnel types or
 584		 * with the same protocol type.
 585		 */
 586		if (existing_conf->ttype != conf->ttype ||
 587		    flow_type_a == flow_type_b)
 588			continue;
 589
 590		switch (flow_type_a) {
 591		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 592		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 593		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 594			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
 595				return true;
 596			break;
 597		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 598			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
 599			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
 600			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
 601				return true;
 602			break;
 603		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 604		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 605		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 606			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
 607				return true;
 608			break;
 609		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 610			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
 611			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
 612			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
 613				return true;
 614			break;
 615		default:
 616			break;
 617		}
 618	}
 619
 620	return false;
 621}
 622
 623/**
 624 * ice_vc_fdir_write_flow_prof
 625 * @vf: pointer to the VF structure
 626 * @flow: filter flow type
 627 * @seg: array of one or more packet segments that describe the flow
 628 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 629 *
 630 * Write the flow's profile config and packet segment into the hardware
 631 *
 632 * Return: 0 on success, and other on error.
 633 */
 634static int
 635ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
 636			    struct ice_flow_seg_info *seg, int tun)
 637{
 638	struct ice_vf_fdir *fdir = &vf->fdir;
 639	struct ice_vsi *vf_vsi, *ctrl_vsi;
 640	struct ice_flow_seg_info *old_seg;
 641	struct ice_flow_prof *prof = NULL;
 642	struct ice_fd_hw_prof *vf_prof;
 643	struct device *dev;
 644	struct ice_pf *pf;
 645	struct ice_hw *hw;
 646	u64 entry1_h = 0;
 647	u64 entry2_h = 0;
 648	int ret;
 649
 650	pf = vf->pf;
 651	dev = ice_pf_to_dev(pf);
 652	hw = &pf->hw;
 653	vf_vsi = ice_get_vf_vsi(vf);
 654	if (!vf_vsi)
 655		return -EINVAL;
 656
 657	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
 658	if (!ctrl_vsi)
 659		return -EINVAL;
 660
 661	vf_prof = fdir->fdir_prof[flow];
 662	old_seg = vf_prof->fdir_seg[tun];
 663	if (old_seg) {
 664		if (!memcmp(old_seg, seg, sizeof(*seg))) {
 665			dev_dbg(dev, "Duplicated profile for VF %d!\n",
 666				vf->vf_id);
 667			return -EEXIST;
 668		}
 669
 670		if (fdir->fdir_fltr_cnt[flow][tun]) {
 671			ret = -EINVAL;
 672			dev_dbg(dev, "Input set conflicts for VF %d\n",
 673				vf->vf_id);
 674			goto err_exit;
 675		}
 676
 677		/* remove previously allocated profile */
 678		ice_vc_fdir_rem_prof(vf, flow, tun);
 679	}
 680
 681	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
 682				tun + 1, false, &prof);
 683	if (ret) {
 684		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
 685			flow, vf->vf_id);
 686		goto err_exit;
 687	}
 688
 689	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
 690				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 691				 seg, &entry1_h);
 692	if (ret) {
 693		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
 694			flow, vf->vf_id);
 695		goto err_prof;
 696	}
 697
 698	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
 699				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 700				 seg, &entry2_h);
 701	if (ret) {
 702		dev_dbg(dev,
 703			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
 704			flow, vf->vf_id);
 705		goto err_entry_1;
 706	}
 707
 708	vf_prof->fdir_seg[tun] = seg;
 709	vf_prof->cnt = 0;
 710	fdir->prof_entry_cnt[flow][tun] = 0;
 711
 712	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
 713	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
 714	vf_prof->cnt++;
 715	fdir->prof_entry_cnt[flow][tun]++;
 716
 717	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
 718	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
 719	vf_prof->cnt++;
 720	fdir->prof_entry_cnt[flow][tun]++;
 721
 722	vf_prof->prof_id[tun] = prof->id;
 723
 724	return 0;
 725
 726err_entry_1:
 727	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 728			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
 729	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 730err_prof:
 731	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
 732err_exit:
 733	return ret;
 734}
 735
 736/**
 737 * ice_vc_fdir_config_input_set
 738 * @vf: pointer to the VF structure
 739 * @fltr: virtual channel add cmd buffer
 740 * @conf: FDIR configuration for each filter
 741 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 742 *
 743 * Config the input set type and value for virtual channel add msg buffer
 744 *
 745 * Return: 0 on success, and other on error.
 746 */
 747static int
 748ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 749			     struct virtchnl_fdir_fltr_conf *conf, int tun)
 750{
 751	struct ice_fdir_fltr *input = &conf->input;
 752	struct device *dev = ice_pf_to_dev(vf->pf);
 753	struct ice_flow_seg_info *seg;
 754	enum ice_fltr_ptype flow;
 755	int ret;
 756
 757	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
 758	if (ret) {
 759		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
 760			vf->vf_id);
 761		return ret;
 762	}
 763
 764	flow = input->flow_type;
 765	ret = ice_vc_fdir_alloc_prof(vf, flow);
 766	if (ret) {
 767		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
 768		return ret;
 769	}
 770
 771	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 772	if (!seg)
 773		return -ENOMEM;
 774
 775	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
 776	if (ret) {
 777		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
 778		goto err_exit;
 779	}
 780
 781	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
 782	if (ret) {
 783		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
 784		goto err_exit;
 785	}
 786
 787	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
 788	if (ret == -EEXIST) {
 789		devm_kfree(dev, seg);
 790	} else if (ret) {
 791		dev_dbg(dev, "Write flow profile for VF %d failed\n",
 792			vf->vf_id);
 793		goto err_exit;
 794	}
 795
 796	return 0;
 797
 798err_exit:
 799	devm_kfree(dev, seg);
 800	return ret;
 801}
 802
 803/**
 804 * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary)
 805 * @proto: virtchnl protocol headers
 806 *
 807 * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note
 808 * that common FDIR rule must have non-zero proto->count. Thus, we choose the
 809 * tunnel_level and count of proto as the indicators. If both tunnel_level and
 810 * count of proto are zero, this FDIR rule will be regarded as raw flow.
 811 *
 812 * Returns: true if headers describe raw flow, false otherwise.
 813 */
 814static bool
 815ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
 816{
 817	return (proto->tunnel_level == 0 && proto->count == 0);
 818}
 819
 820/**
 821 * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule
 822 * @vf: pointer to the VF info
 823 * @proto: virtchnl protocol headers
 824 * @conf: FDIR configuration for each filter
 825 *
 826 * Parse the virtual channel filter's raw flow and store it in @conf
 827 *
 828 * Return: 0 on success or negative errno on failure.
 829 */
 830static int
 831ice_vc_fdir_parse_raw(struct ice_vf *vf,
 832		      struct virtchnl_proto_hdrs *proto,
 833		      struct virtchnl_fdir_fltr_conf *conf)
 834{
 835	u8 *pkt_buf, *msk_buf __free(kfree);
 836	struct ice_parser_result rslt;
 837	struct ice_pf *pf = vf->pf;
 838	struct ice_parser *psr;
 839	int status = -ENOMEM;
 840	struct ice_hw *hw;
 841	u16 udp_port = 0;
 842
 843	pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
 844	msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
 845	if (!pkt_buf || !msk_buf)
 846		goto err_mem_alloc;
 847
 848	memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
 849	memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
 850
 851	hw = &pf->hw;
 852
 853	/* Get raw profile info via Parser Lib */
 854	psr = ice_parser_create(hw);
 855	if (IS_ERR(psr)) {
 856		status = PTR_ERR(psr);
 857		goto err_mem_alloc;
 858	}
 859
 860	ice_parser_dvm_set(psr, ice_is_dvm_ena(hw));
 861
 862	if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
 863		ice_parser_vxlan_tunnel_set(psr, udp_port, true);
 864
 865	status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
 866	if (status)
 867		goto err_parser_destroy;
 868
 869	if (hw->debug_mask & ICE_DBG_PARSER)
 870		ice_parser_result_dump(hw, &rslt);
 871
 872	conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL);
 873	if (!conf->prof) {
 874		status = -ENOMEM;
 875		goto err_parser_destroy;
 876	}
 877
 878	status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
 879					 proto->raw.pkt_len, ICE_BLK_FD,
 880					 conf->prof);
 881	if (status)
 882		goto err_parser_profile_init;
 883
 884	if (hw->debug_mask & ICE_DBG_PARSER)
 885		ice_parser_profile_dump(hw, conf->prof);
 886
 887	/* Store raw flow info into @conf */
 888	conf->pkt_len = proto->raw.pkt_len;
 889	conf->pkt_buf = pkt_buf;
 890	conf->parser_ena = true;
 891
 892	ice_parser_destroy(psr);
 893	return 0;
 894
 895err_parser_profile_init:
 896	kfree(conf->prof);
 897err_parser_destroy:
 898	ice_parser_destroy(psr);
 899err_mem_alloc:
 900	kfree(pkt_buf);
 901	return status;
 902}
 903
 904/**
 905 * ice_vc_fdir_parse_pattern
 906 * @vf: pointer to the VF info
 907 * @fltr: virtual channel add cmd buffer
 908 * @conf: FDIR configuration for each filter
 909 *
 910 * Parse the virtual channel filter's pattern and store them into conf
 911 *
 912 * Return: 0 on success, and other on error.
 913 */
 914static int
 915ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 916			  struct virtchnl_fdir_fltr_conf *conf)
 917{
 918	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
 919	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
 920	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
 921	struct device *dev = ice_pf_to_dev(vf->pf);
 922	struct ice_fdir_fltr *input = &conf->input;
 923	int i;
 924
 925	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
 926		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
 927			proto->count, vf->vf_id);
 928		return -EINVAL;
 929	}
 930
 931	/* For raw FDIR filters created by the parser */
 932	if (ice_vc_fdir_is_raw_flow(proto))
 933		return ice_vc_fdir_parse_raw(vf, proto, conf);
 934
 935	for (i = 0; i < proto->count; i++) {
 936		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
 937		struct ip_esp_hdr *esph;
 938		struct ip_auth_hdr *ah;
 939		struct sctphdr *sctph;
 940		struct ipv6hdr *ip6h;
 941		struct udphdr *udph;
 942		struct tcphdr *tcph;
 943		struct ethhdr *eth;
 944		struct iphdr *iph;
 945		u8 s_field;
 946		u8 *rawh;
 947
 948		switch (hdr->type) {
 949		case VIRTCHNL_PROTO_HDR_ETH:
 950			eth = (struct ethhdr *)hdr->buffer;
 951			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
 952
 953			if (hdr->field_selector)
 954				input->ext_data.ether_type = eth->h_proto;
 955			break;
 956		case VIRTCHNL_PROTO_HDR_IPV4:
 957			iph = (struct iphdr *)hdr->buffer;
 958			l3 = VIRTCHNL_PROTO_HDR_IPV4;
 959			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
 960
 961			if (hdr->field_selector) {
 962				input->ip.v4.src_ip = iph->saddr;
 963				input->ip.v4.dst_ip = iph->daddr;
 964				input->ip.v4.tos = iph->tos;
 965				input->ip.v4.proto = iph->protocol;
 966			}
 967			break;
 968		case VIRTCHNL_PROTO_HDR_IPV6:
 969			ip6h = (struct ipv6hdr *)hdr->buffer;
 970			l3 = VIRTCHNL_PROTO_HDR_IPV6;
 971			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
 972
 973			if (hdr->field_selector) {
 974				memcpy(input->ip.v6.src_ip,
 975				       ip6h->saddr.in6_u.u6_addr8,
 976				       sizeof(ip6h->saddr));
 977				memcpy(input->ip.v6.dst_ip,
 978				       ip6h->daddr.in6_u.u6_addr8,
 979				       sizeof(ip6h->daddr));
 980				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
 981						  (ip6h->flow_lbl[0] >> 4);
 982				input->ip.v6.proto = ip6h->nexthdr;
 983			}
 984			break;
 985		case VIRTCHNL_PROTO_HDR_TCP:
 986			tcph = (struct tcphdr *)hdr->buffer;
 987			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 988				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
 989			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 990				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
 991
 992			if (hdr->field_selector) {
 993				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
 994					input->ip.v4.src_port = tcph->source;
 995					input->ip.v4.dst_port = tcph->dest;
 996				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
 997					input->ip.v6.src_port = tcph->source;
 998					input->ip.v6.dst_port = tcph->dest;
 999				}
1000			}
1001			break;
1002		case VIRTCHNL_PROTO_HDR_UDP:
1003			udph = (struct udphdr *)hdr->buffer;
1004			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1005				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1006			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1007				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1008
1009			if (hdr->field_selector) {
1010				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1011					input->ip.v4.src_port = udph->source;
1012					input->ip.v4.dst_port = udph->dest;
1013				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1014					input->ip.v6.src_port = udph->source;
1015					input->ip.v6.dst_port = udph->dest;
1016				}
1017			}
1018			break;
1019		case VIRTCHNL_PROTO_HDR_SCTP:
1020			sctph = (struct sctphdr *)hdr->buffer;
1021			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1022				input->flow_type =
1023					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1024			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1025				input->flow_type =
1026					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1027
1028			if (hdr->field_selector) {
1029				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1030					input->ip.v4.src_port = sctph->source;
1031					input->ip.v4.dst_port = sctph->dest;
1032				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1033					input->ip.v6.src_port = sctph->source;
1034					input->ip.v6.dst_port = sctph->dest;
1035				}
1036			}
1037			break;
1038		case VIRTCHNL_PROTO_HDR_L2TPV3:
1039			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1040				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
1041			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1042				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
1043
1044			if (hdr->field_selector)
1045				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
1046			break;
1047		case VIRTCHNL_PROTO_HDR_ESP:
1048			esph = (struct ip_esp_hdr *)hdr->buffer;
1049			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1050			    l4 == VIRTCHNL_PROTO_HDR_UDP)
1051				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
1052			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1053				 l4 == VIRTCHNL_PROTO_HDR_UDP)
1054				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
1055			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1056				 l4 == VIRTCHNL_PROTO_HDR_NONE)
1057				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
1058			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1059				 l4 == VIRTCHNL_PROTO_HDR_NONE)
1060				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
1061
1062			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
1063				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
1064			else
1065				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
1066
1067			if (hdr->field_selector) {
1068				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1069					input->ip.v4.sec_parm_idx = esph->spi;
1070				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1071					input->ip.v6.sec_parm_idx = esph->spi;
1072			}
1073			break;
1074		case VIRTCHNL_PROTO_HDR_AH:
1075			ah = (struct ip_auth_hdr *)hdr->buffer;
1076			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1077				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
1078			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1079				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
1080
1081			if (hdr->field_selector) {
1082				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1083					input->ip.v4.sec_parm_idx = ah->spi;
1084				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1085					input->ip.v6.sec_parm_idx = ah->spi;
1086			}
1087			break;
1088		case VIRTCHNL_PROTO_HDR_PFCP:
1089			rawh = (u8 *)hdr->buffer;
1090			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
1091			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
1092				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
1093			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
1094				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
1095			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
1096				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
1097			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
1098				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
1099
1100			if (hdr->field_selector) {
1101				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1102					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1103				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1104					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1105			}
1106			break;
1107		case VIRTCHNL_PROTO_HDR_GTPU_IP:
1108			rawh = (u8 *)hdr->buffer;
1109			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1110
1111			if (hdr->field_selector)
1112				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1113			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1114			break;
1115		case VIRTCHNL_PROTO_HDR_GTPU_EH:
1116			rawh = (u8 *)hdr->buffer;
1117
1118			if (hdr->field_selector)
1119				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1120			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1121			break;
1122		default:
1123			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1124				hdr->type, vf->vf_id);
1125			return -EINVAL;
1126		}
1127	}
1128
1129	return 0;
1130}
1131
1132/**
1133 * ice_vc_fdir_parse_action
1134 * @vf: pointer to the VF info
1135 * @fltr: virtual channel add cmd buffer
1136 * @conf: FDIR configuration for each filter
1137 *
1138 * Parse the virtual channel filter's action and store them into conf
1139 *
1140 * Return: 0 on success, and other on error.
1141 */
1142static int
1143ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1144			 struct virtchnl_fdir_fltr_conf *conf)
1145{
1146	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1147	struct device *dev = ice_pf_to_dev(vf->pf);
1148	struct ice_fdir_fltr *input = &conf->input;
1149	u32 dest_num = 0;
1150	u32 mark_num = 0;
1151	int i;
1152
1153	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1154		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1155			as->count, vf->vf_id);
1156		return -EINVAL;
1157	}
1158
1159	for (i = 0; i < as->count; i++) {
1160		struct virtchnl_filter_action *action = &as->actions[i];
1161
1162		switch (action->type) {
1163		case VIRTCHNL_ACTION_PASSTHRU:
1164			dest_num++;
1165			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1166			break;
1167		case VIRTCHNL_ACTION_DROP:
1168			dest_num++;
1169			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1170			break;
1171		case VIRTCHNL_ACTION_QUEUE:
1172			dest_num++;
1173			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1174			input->q_index = action->act_conf.queue.index;
1175			break;
1176		case VIRTCHNL_ACTION_Q_REGION:
1177			dest_num++;
1178			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1179			input->q_index = action->act_conf.queue.index;
1180			input->q_region = action->act_conf.queue.region;
1181			break;
1182		case VIRTCHNL_ACTION_MARK:
1183			mark_num++;
1184			input->fltr_id = action->act_conf.mark_id;
1185			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1186			break;
1187		default:
1188			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1189				action->type, vf->vf_id);
1190			return -EINVAL;
1191		}
1192	}
1193
1194	if (dest_num == 0 || dest_num >= 2) {
1195		dev_dbg(dev, "Invalid destination action for VF %d\n",
1196			vf->vf_id);
1197		return -EINVAL;
1198	}
1199
1200	if (mark_num >= 2) {
1201		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1202		return -EINVAL;
1203	}
1204
1205	return 0;
1206}
1207
1208/**
1209 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1210 * @vf: pointer to the VF info
1211 * @fltr: virtual channel add cmd buffer
1212 * @conf: FDIR configuration for each filter
1213 *
1214 * Return: 0 on success, and other on error.
1215 */
1216static int
1217ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1218			  struct virtchnl_fdir_fltr_conf *conf)
1219{
1220	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1221	int ret;
1222
1223	/* For raw FDIR filters created by the parser */
1224	if (!ice_vc_fdir_is_raw_flow(proto))
1225		if (!ice_vc_validate_pattern(vf, proto))
1226			return -EINVAL;
1227
1228	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1229	if (ret)
1230		return ret;
1231
1232	return ice_vc_fdir_parse_action(vf, fltr, conf);
1233}
1234
1235/**
1236 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1237 * @conf_a: FDIR configuration for filter a
1238 * @conf_b: FDIR configuration for filter b
1239 *
1240 * Return: 0 on success, and other on error.
1241 */
1242static bool
1243ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1244		       struct virtchnl_fdir_fltr_conf *conf_b)
1245{
1246	struct ice_fdir_fltr *a = &conf_a->input;
1247	struct ice_fdir_fltr *b = &conf_b->input;
1248
1249	if (conf_a->ttype != conf_b->ttype)
1250		return false;
1251	if (a->flow_type != b->flow_type)
1252		return false;
1253	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1254		return false;
1255	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1256		return false;
1257	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1258		return false;
1259	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1260		return false;
1261	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1262		return false;
1263	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1264		return false;
1265	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1266		return false;
1267	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1268		return false;
1269
1270	return true;
1271}
1272
1273/**
1274 * ice_vc_fdir_is_dup_fltr
1275 * @vf: pointer to the VF info
1276 * @conf: FDIR configuration for each filter
1277 *
1278 * Check if there is duplicated rule with same conf value
1279 *
1280 * Return: 0 true success, and false on error.
1281 */
1282static bool
1283ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1284{
1285	struct ice_fdir_fltr *desc;
1286	bool ret;
1287
1288	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1289		struct virtchnl_fdir_fltr_conf *node =
1290				to_fltr_conf_from_desc(desc);
1291
1292		ret = ice_vc_fdir_comp_rules(node, conf);
1293		if (ret)
1294			return true;
1295	}
1296
1297	return false;
1298}
1299
1300/**
1301 * ice_vc_fdir_insert_entry
1302 * @vf: pointer to the VF info
1303 * @conf: FDIR configuration for each filter
1304 * @id: pointer to ID value allocated by driver
1305 *
1306 * Insert FDIR conf entry into list and allocate ID for this filter
1307 *
1308 * Return: 0 true success, and other on error.
1309 */
1310static int
1311ice_vc_fdir_insert_entry(struct ice_vf *vf,
1312			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1313{
1314	struct ice_fdir_fltr *input = &conf->input;
1315	int i;
1316
1317	/* alloc ID corresponding with conf */
1318	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1319		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1320	if (i < 0)
1321		return -EINVAL;
1322	*id = i;
1323
1324	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1325	return 0;
1326}
1327
1328/**
1329 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1330 * @vf: pointer to the VF info
1331 * @conf: FDIR configuration for each filter
1332 * @id: filter rule's ID
1333 */
1334static void
1335ice_vc_fdir_remove_entry(struct ice_vf *vf,
1336			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1337{
1338	struct ice_fdir_fltr *input = &conf->input;
1339
1340	idr_remove(&vf->fdir.fdir_rule_idr, id);
1341	list_del(&input->fltr_node);
1342}
1343
1344/**
1345 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1346 * @vf: pointer to the VF info
1347 * @id: filter rule's ID
1348 *
1349 * Return: NULL on error, and other on success.
1350 */
1351static struct virtchnl_fdir_fltr_conf *
1352ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1353{
1354	return idr_find(&vf->fdir.fdir_rule_idr, id);
1355}
1356
1357/**
1358 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1359 * @vf: pointer to the VF info
1360 */
1361static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1362{
1363	struct virtchnl_fdir_fltr_conf *conf;
1364	struct ice_fdir_fltr *desc, *temp;
1365
1366	list_for_each_entry_safe(desc, temp,
1367				 &vf->fdir.fdir_rule_list, fltr_node) {
1368		conf = to_fltr_conf_from_desc(desc);
1369		list_del(&desc->fltr_node);
1370		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1371	}
1372}
1373
1374/**
1375 * ice_vc_fdir_write_fltr - write filter rule into hardware
1376 * @vf: pointer to the VF info
1377 * @conf: FDIR configuration for each filter
1378 * @add: true implies add rule, false implies del rules
1379 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1380 *
1381 * Return: 0 on success, and other on error.
1382 */
1383static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1384				  struct virtchnl_fdir_fltr_conf *conf,
1385				  bool add, bool is_tun)
1386{
1387	struct ice_fdir_fltr *input = &conf->input;
1388	struct ice_vsi *vsi, *ctrl_vsi;
1389	struct ice_fltr_desc desc;
1390	struct device *dev;
1391	struct ice_pf *pf;
1392	struct ice_hw *hw;
1393	int ret;
1394	u8 *pkt;
1395
1396	pf = vf->pf;
1397	dev = ice_pf_to_dev(pf);
1398	hw = &pf->hw;
1399	vsi = ice_get_vf_vsi(vf);
1400	if (!vsi) {
1401		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1402		return -EINVAL;
1403	}
1404
1405	input->dest_vsi = vsi->idx;
1406	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1407
1408	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1409	if (!ctrl_vsi) {
1410		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1411		return -EINVAL;
1412	}
1413
1414	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1415	if (!pkt)
1416		return -ENOMEM;
1417
1418	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1419	if (conf->parser_ena) {
1420		memcpy(pkt, conf->pkt_buf, conf->pkt_len);
1421	} else {
1422		ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1423		if (ret) {
1424			dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1425				vf->vf_id, input->flow_type);
1426			goto err_free_pkt;
1427		}
1428	}
1429
1430	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1431	if (ret)
1432		goto err_free_pkt;
1433
1434	return 0;
1435
1436err_free_pkt:
1437	devm_kfree(dev, pkt);
1438	return ret;
1439}
1440
1441/**
1442 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1443 * @t: pointer to timer_list
1444 */
1445static void ice_vf_fdir_timer(struct timer_list *t)
1446{
1447	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1448	struct ice_vf_fdir_ctx *ctx_done;
1449	struct ice_vf_fdir *fdir;
1450	unsigned long flags;
1451	struct ice_vf *vf;
1452	struct ice_pf *pf;
1453
1454	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1455	vf = container_of(fdir, struct ice_vf, fdir);
1456	ctx_done = &fdir->ctx_done;
1457	pf = vf->pf;
1458	spin_lock_irqsave(&fdir->ctx_lock, flags);
1459	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1460		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1461		WARN_ON_ONCE(1);
1462		return;
1463	}
1464
1465	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1466
1467	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1468	ctx_done->conf = ctx_irq->conf;
1469	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1470	ctx_done->v_opcode = ctx_irq->v_opcode;
1471	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1472
1473	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1474	ice_service_task_schedule(pf);
1475}
1476
1477/**
1478 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1479 * @ctrl_vsi: pointer to a VF's CTRL VSI
1480 * @rx_desc: pointer to FDIR Rx queue descriptor
1481 */
1482void
1483ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1484			union ice_32b_rx_flex_desc *rx_desc)
1485{
1486	struct ice_pf *pf = ctrl_vsi->back;
1487	struct ice_vf *vf = ctrl_vsi->vf;
1488	struct ice_vf_fdir_ctx *ctx_done;
1489	struct ice_vf_fdir_ctx *ctx_irq;
1490	struct ice_vf_fdir *fdir;
1491	unsigned long flags;
1492	struct device *dev;
1493	int ret;
1494
1495	if (WARN_ON(!vf))
1496		return;
1497
1498	fdir = &vf->fdir;
1499	ctx_done = &fdir->ctx_done;
1500	ctx_irq = &fdir->ctx_irq;
1501	dev = ice_pf_to_dev(pf);
1502	spin_lock_irqsave(&fdir->ctx_lock, flags);
1503	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1504		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1505		WARN_ON_ONCE(1);
1506		return;
1507	}
1508
1509	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1510
1511	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1512	ctx_done->conf = ctx_irq->conf;
1513	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1514	ctx_done->v_opcode = ctx_irq->v_opcode;
1515	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1516	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1517
1518	ret = del_timer(&ctx_irq->rx_tmr);
1519	if (!ret)
1520		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1521
1522	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1523	ice_service_task_schedule(pf);
1524}
1525
1526/**
1527 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1528 * @vf: pointer to the VF info
1529 */
1530static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1531{
1532	u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1533	struct ice_vsi *vf_vsi;
1534	struct device *dev;
1535	struct ice_pf *pf;
1536	struct ice_hw *hw;
1537	u16 vsi_num;
1538
1539	pf = vf->pf;
1540	hw = &pf->hw;
1541	dev = ice_pf_to_dev(pf);
1542	vf_vsi = ice_get_vf_vsi(vf);
1543	if (!vf_vsi) {
1544		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1545		return;
1546	}
1547
1548	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1549
1550	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1551	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1552	switch (hw->mac_type) {
1553	case ICE_MAC_E830:
1554		fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1555		fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1556		fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1557		fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1558		break;
1559	case ICE_MAC_E810:
1560	default:
1561		fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1562		fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1563		fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1564		fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1565	}
1566
1567	dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1568		vf->vf_id, fd_size_g, fd_size_b);
1569	dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1570		vf->vf_id, fd_cnt_g, fd_cnt_b);
1571}
1572
1573/**
1574 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1575 * @vf: pointer to the VF info
1576 * @ctx: FDIR context info for post processing
1577 * @status: virtchnl FDIR program status
1578 *
1579 * Return: 0 on success, and other on error.
1580 */
1581static int
1582ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1583		      enum virtchnl_fdir_prgm_status *status)
1584{
1585	struct device *dev = ice_pf_to_dev(vf->pf);
1586	u32 stat_err, error, prog_id;
1587	int ret;
1588
1589	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1590	if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1591	    ICE_FXD_FLTR_WB_QW1_DD_YES) {
1592		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1593		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1594		ret = -EINVAL;
1595		goto err_exit;
1596	}
1597
1598	prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
1599	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1600	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1601		dev_err(dev, "VF %d: Desc show add, but ctx not",
1602			vf->vf_id);
1603		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1604		ret = -EINVAL;
1605		goto err_exit;
1606	}
1607
1608	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1609	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1610		dev_err(dev, "VF %d: Desc show del, but ctx not",
1611			vf->vf_id);
1612		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1613		ret = -EINVAL;
1614		goto err_exit;
1615	}
1616
1617	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
1618	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1619		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1620			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1621				vf->vf_id);
1622			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1623		} else {
1624			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1625				vf->vf_id);
1626			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1627		}
1628		ret = -EINVAL;
1629		goto err_exit;
1630	}
1631
1632	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
1633	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1634		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1635		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1636		ret = -EINVAL;
1637		goto err_exit;
1638	}
1639
1640	*status = VIRTCHNL_FDIR_SUCCESS;
1641
1642	return 0;
1643
1644err_exit:
1645	ice_vf_fdir_dump_info(vf);
1646	return ret;
1647}
1648
1649static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)
1650{
1651	return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER ||
1652		ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER ||
1653		ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER ||
1654		ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER ||
1655		ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI ||
1656		ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER);
1657}
1658
1659/**
1660 * ice_vc_add_fdir_fltr_post
1661 * @vf: pointer to the VF structure
1662 * @ctx: FDIR context info for post processing
1663 * @status: virtchnl FDIR program status
1664 * @success: true implies success, false implies failure
1665 *
1666 * Post process for flow director add command. If success, then do post process
1667 * and send back success msg by virtchnl. Otherwise, do context reversion and
1668 * send back failure msg by virtchnl.
1669 *
1670 * Return: 0 on success, and other on error.
1671 */
1672static int
1673ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1674			  enum virtchnl_fdir_prgm_status status,
1675			  bool success)
1676{
1677	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1678	struct device *dev = ice_pf_to_dev(vf->pf);
1679	enum virtchnl_status_code v_ret;
1680	struct virtchnl_fdir_add *resp;
1681	int ret, len, is_tun;
1682
1683	v_ret = VIRTCHNL_STATUS_SUCCESS;
1684	len = sizeof(*resp);
1685	resp = kzalloc(len, GFP_KERNEL);
1686	if (!resp) {
1687		len = 0;
1688		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1689		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1690		goto err_exit;
1691	}
1692
1693	if (!success)
1694		goto err_exit;
1695
1696	is_tun = 0;
1697	resp->status = status;
1698	resp->flow_id = conf->flow_id;
1699	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1700	vf->fdir.fdir_fltr_cnt_total++;
1701
1702	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1703				    (u8 *)resp, len);
1704	kfree(resp);
1705
1706	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1707		vf->vf_id, conf->flow_id,
1708		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1709		"add" : "del");
1710	return ret;
1711
1712err_exit:
1713	if (resp)
1714		resp->status = status;
1715	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1716	devm_kfree(dev, conf);
1717
1718	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1719				    (u8 *)resp, len);
1720	kfree(resp);
1721	return ret;
1722}
1723
1724/**
1725 * ice_vc_del_fdir_fltr_post
1726 * @vf: pointer to the VF structure
1727 * @ctx: FDIR context info for post processing
1728 * @status: virtchnl FDIR program status
1729 * @success: true implies success, false implies failure
1730 *
1731 * Post process for flow director del command. If success, then do post process
1732 * and send back success msg by virtchnl. Otherwise, do context reversion and
1733 * send back failure msg by virtchnl.
1734 *
1735 * Return: 0 on success, and other on error.
1736 */
1737static int
1738ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1739			  enum virtchnl_fdir_prgm_status status,
1740			  bool success)
1741{
1742	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1743	struct device *dev = ice_pf_to_dev(vf->pf);
1744	enum virtchnl_status_code v_ret;
1745	struct virtchnl_fdir_del *resp;
1746	int ret, len, is_tun;
1747
1748	v_ret = VIRTCHNL_STATUS_SUCCESS;
1749	len = sizeof(*resp);
1750	resp = kzalloc(len, GFP_KERNEL);
1751	if (!resp) {
1752		len = 0;
1753		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1754		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1755		goto err_exit;
1756	}
1757
1758	if (!success)
1759		goto err_exit;
1760
1761	is_tun = 0;
1762	resp->status = status;
1763	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1764	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1765	vf->fdir.fdir_fltr_cnt_total--;
1766
1767	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1768				    (u8 *)resp, len);
1769	kfree(resp);
1770
1771	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1772		vf->vf_id, conf->flow_id,
1773		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1774		"add" : "del");
1775	devm_kfree(dev, conf);
1776	return ret;
1777
1778err_exit:
1779	if (resp)
1780		resp->status = status;
1781	if (success)
1782		devm_kfree(dev, conf);
1783
1784	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1785				    (u8 *)resp, len);
1786	kfree(resp);
1787	return ret;
1788}
1789
1790/**
1791 * ice_flush_fdir_ctx
1792 * @pf: pointer to the PF structure
1793 *
1794 * Flush all the pending event on ctx_done list and process them.
1795 */
1796void ice_flush_fdir_ctx(struct ice_pf *pf)
1797{
1798	struct ice_vf *vf;
1799	unsigned int bkt;
1800
1801	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1802		return;
1803
1804	mutex_lock(&pf->vfs.table_lock);
1805	ice_for_each_vf(pf, bkt, vf) {
1806		struct device *dev = ice_pf_to_dev(pf);
1807		enum virtchnl_fdir_prgm_status status;
1808		struct ice_vf_fdir_ctx *ctx;
1809		unsigned long flags;
1810		int ret;
1811
1812		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1813			continue;
1814
1815		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1816			continue;
1817
1818		ctx = &vf->fdir.ctx_done;
1819		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1820		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1821			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1822			continue;
1823		}
1824		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1825
1826		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1827		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1828			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1829			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1830				vf->vf_id);
1831			goto err_exit;
1832		}
1833
1834		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1835		if (ret)
1836			goto err_exit;
1837
1838		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1839			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1840		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1841			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1842		else
1843			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1844
1845		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1846		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1847		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1848		continue;
1849err_exit:
1850		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1851			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1852		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1853			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1854		else
1855			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1856
1857		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1858		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1859		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1860	}
1861	mutex_unlock(&pf->vfs.table_lock);
1862}
1863
1864/**
1865 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1866 * @vf: pointer to the VF structure
1867 * @conf: FDIR configuration for each filter
1868 * @v_opcode: virtual channel operation code
1869 *
1870 * Return: 0 on success, and other on error.
1871 */
1872static int
1873ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1874			enum virtchnl_ops v_opcode)
1875{
1876	struct device *dev = ice_pf_to_dev(vf->pf);
1877	struct ice_vf_fdir_ctx *ctx;
1878	unsigned long flags;
1879
1880	ctx = &vf->fdir.ctx_irq;
1881	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1882	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1883	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1884		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1885		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1886			vf->vf_id);
1887		return -EBUSY;
1888	}
1889	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1890	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1891
1892	ctx->conf = conf;
1893	ctx->v_opcode = v_opcode;
1894	ctx->stat = ICE_FDIR_CTX_READY;
1895	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1896
1897	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1898
1899	return 0;
1900}
1901
1902/**
1903 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1904 * @vf: pointer to the VF structure
1905 *
1906 * Return: 0 on success, and other on error.
1907 */
1908static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1909{
1910	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1911	unsigned long flags;
1912
1913	del_timer(&ctx->rx_tmr);
1914	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1915	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1916	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1917}
1918
1919/**
1920 * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context
1921 * @fv_a: struct of parsed FDIR profile field vector
1922 * @fv_b: struct of parsed FDIR profile field vector
1923 *
1924 * Check if the two parsed FDIR profile field vector context are different,
1925 * including proto_id, offset and mask.
1926 *
1927 * Return: true on different, false on otherwise.
1928 */
1929static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a,
1930					struct ice_parser_fv *fv_b)
1931{
1932	return (fv_a->proto_id	!= fv_b->proto_id ||
1933		fv_a->offset	!= fv_b->offset ||
1934		fv_a->msk	!= fv_b->msk);
1935}
1936
1937/**
1938 * ice_vc_parser_fv_save - save parsed FDIR profile fv context
1939 * @fv: struct of parsed FDIR profile field vector
1940 * @fv_src: parsed FDIR profile field vector context to save
1941 *
1942 * Save the parsed FDIR profile field vector context, including proto_id,
1943 * offset and mask.
1944 *
1945 * Return: Void.
1946 */
1947static void ice_vc_parser_fv_save(struct ice_parser_fv *fv,
1948				  struct ice_parser_fv *fv_src)
1949{
1950	fv->proto_id	= fv_src->proto_id;
1951	fv->offset	= fv_src->offset;
1952	fv->msk		= fv_src->msk;
1953	fv->spec	= 0;
1954}
1955
1956/**
1957 * ice_vc_add_fdir_raw - add a raw FDIR filter for VF
1958 * @vf: pointer to the VF info
1959 * @conf: FDIR configuration for each filter
1960 * @v_ret: the final VIRTCHNL code
1961 * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER
1962 * @len: length of the stat
1963 *
1964 * Return: 0 on success or negative errno on failure.
1965 */
1966static int
1967ice_vc_add_fdir_raw(struct ice_vf *vf,
1968		    struct virtchnl_fdir_fltr_conf *conf,
1969		    enum virtchnl_status_code *v_ret,
1970		    struct virtchnl_fdir_add *stat, int len)
1971{
1972	struct ice_vsi *vf_vsi, *ctrl_vsi;
1973	struct ice_fdir_prof_info *pi;
1974	struct ice_pf *pf = vf->pf;
1975	int ret, ptg, id, i;
1976	struct device *dev;
1977	struct ice_hw *hw;
1978	bool fv_found;
1979
1980	dev = ice_pf_to_dev(pf);
1981	hw = &pf->hw;
1982	*v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1983	stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1984
1985	id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
1986	ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
1987
1988	vf_vsi = ice_get_vf_vsi(vf);
1989	if (!vf_vsi) {
1990		dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id);
1991		return -ENODEV;
1992	}
1993
1994	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1995	if (!ctrl_vsi) {
1996		dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
1997			vf->vf_id);
1998		return -ENODEV;
1999	}
2000
2001	fv_found = false;
2002
2003	/* Check if profile info already exists, then update the counter */
2004	pi = &vf->fdir_prof_info[ptg];
2005	if (pi->fdir_active_cnt != 0) {
2006		for (i = 0; i < ICE_MAX_FV_WORDS; i++)
2007			if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
2008							&conf->prof->fv[i]))
2009				break;
2010		if (i == ICE_MAX_FV_WORDS) {
2011			fv_found = true;
2012			pi->fdir_active_cnt++;
2013		}
2014	}
2015
2016	/* HW profile setting is only required for the first time */
2017	if (!fv_found) {
2018		ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
2019					       ctrl_vsi->idx, conf->prof,
2020					       ICE_BLK_FD);
2021
2022		if (ret) {
2023			*v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2024			dev_dbg(dev, "VF %d: insert hw prof failed\n",
2025				vf->vf_id);
2026			return ret;
2027		}
2028	}
2029
2030	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
2031	if (ret) {
2032		*v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2033		dev_dbg(dev, "VF %d: insert FDIR list failed\n",
2034			vf->vf_id);
2035		return ret;
2036	}
2037
2038	ret = ice_vc_fdir_set_irq_ctx(vf, conf,
2039				      VIRTCHNL_OP_ADD_FDIR_FILTER);
2040	if (ret) {
2041		dev_dbg(dev, "VF %d: set FDIR context failed\n",
2042			vf->vf_id);
2043		goto err_rem_entry;
2044	}
2045
2046	ret = ice_vc_fdir_write_fltr(vf, conf, true, false);
2047	if (ret) {
2048		dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
2049			vf->vf_id, ret);
2050		goto err_clr_irq;
2051	}
2052
2053	/* Save parsed profile fv info of the FDIR rule for the first time */
2054	if (!fv_found) {
2055		for (i = 0; i < conf->prof->fv_num; i++)
2056			ice_vc_parser_fv_save(&pi->prof.fv[i],
2057					      &conf->prof->fv[i]);
2058		pi->prof.fv_num = conf->prof->fv_num;
2059		pi->fdir_active_cnt = 1;
2060	}
2061
2062	return 0;
2063
2064err_clr_irq:
2065	ice_vc_fdir_clear_irq_ctx(vf);
2066err_rem_entry:
2067	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
2068	return ret;
2069}
2070
2071/**
2072 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
2073 * @vf: pointer to the VF info
2074 * @msg: pointer to the msg buffer
2075 *
2076 * Return: 0 on success, and other on error.
2077 */
2078int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
2079{
2080	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
2081	struct virtchnl_fdir_add *stat = NULL;
2082	struct virtchnl_fdir_fltr_conf *conf;
2083	enum virtchnl_status_code v_ret;
2084	struct ice_vsi *vf_vsi;
2085	struct device *dev;
2086	struct ice_pf *pf;
2087	int is_tun = 0;
2088	int len = 0;
2089	int ret;
2090
2091	pf = vf->pf;
2092	dev = ice_pf_to_dev(pf);
2093	vf_vsi = ice_get_vf_vsi(vf);
2094
2095#define ICE_VF_MAX_FDIR_FILTERS	128
2096	if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
2097	    vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
2098		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2099		dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
2100			vf->vf_id);
2101		goto err_exit;
2102	}
2103
2104	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2105	if (ret) {
2106		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2107		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2108		goto err_exit;
2109	}
2110
2111	ret = ice_vf_start_ctrl_vsi(vf);
2112	if (ret && (ret != -EEXIST)) {
2113		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2114		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
2115			vf->vf_id, ret);
2116		goto err_exit;
2117	}
2118
2119	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2120	if (!stat) {
2121		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2122		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2123		goto err_exit;
2124	}
2125
2126	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
2127	if (!conf) {
2128		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2129		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
2130		goto err_exit;
2131	}
2132
2133	len = sizeof(*stat);
2134	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
2135	if (ret) {
2136		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2137		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
2138		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
2139		goto err_free_conf;
2140	}
2141
2142	if (fltr->validate_only) {
2143		v_ret = VIRTCHNL_STATUS_SUCCESS;
2144		stat->status = VIRTCHNL_FDIR_SUCCESS;
2145		devm_kfree(dev, conf);
2146		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
2147					    v_ret, (u8 *)stat, len);
2148		goto exit;
2149	}
2150
2151	/* For raw FDIR filters created by the parser */
2152	if (conf->parser_ena) {
2153		ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len);
2154		if (ret)
2155			goto err_free_conf;
2156		goto exit;
2157	}
2158
2159	is_tun = ice_fdir_is_tunnel(conf->ttype);
2160	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
2161	if (ret) {
2162		v_ret = VIRTCHNL_STATUS_SUCCESS;
2163		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
2164		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
2165			vf->vf_id, ret);
2166		goto err_free_conf;
2167	}
2168
2169	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
2170	if (ret) {
2171		v_ret = VIRTCHNL_STATUS_SUCCESS;
2172		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
2173		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
2174			vf->vf_id);
2175		goto err_free_conf;
2176	}
2177
2178	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
2179	if (ret) {
2180		v_ret = VIRTCHNL_STATUS_SUCCESS;
2181		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2182		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
2183		goto err_free_conf;
2184	}
2185
2186	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
2187	if (ret) {
2188		v_ret = VIRTCHNL_STATUS_SUCCESS;
2189		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2190		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2191		goto err_rem_entry;
2192	}
2193
2194	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
2195	if (ret) {
2196		v_ret = VIRTCHNL_STATUS_SUCCESS;
2197		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2198		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2199			vf->vf_id, ret);
2200		goto err_clr_irq;
2201	}
2202
2203exit:
2204	kfree(stat);
2205	return ret;
2206
2207err_clr_irq:
2208	ice_vc_fdir_clear_irq_ctx(vf);
2209err_rem_entry:
2210	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
2211err_free_conf:
2212	devm_kfree(dev, conf);
2213err_exit:
2214	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
2215				    (u8 *)stat, len);
2216	kfree(stat);
2217	return ret;
2218}
2219
2220/**
2221 * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF
2222 * @vf: pointer to the VF info
2223 * @conf: FDIR configuration for each filter
2224 * @v_ret: the final VIRTCHNL code
2225 * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER
2226 * @len: length of the stat
2227 *
2228 * Return: 0 on success or negative errno on failure.
2229 */
2230static int
2231ice_vc_del_fdir_raw(struct ice_vf *vf,
2232		    struct virtchnl_fdir_fltr_conf *conf,
2233		    enum virtchnl_status_code *v_ret,
2234		    struct virtchnl_fdir_del *stat, int len)
2235{
2236	struct ice_vsi *vf_vsi, *ctrl_vsi;
2237	enum ice_block blk = ICE_BLK_FD;
2238	struct ice_fdir_prof_info *pi;
2239	struct ice_pf *pf = vf->pf;
2240	struct device *dev;
2241	struct ice_hw *hw;
2242	unsigned long id;
2243	u16 vsi_num;
2244	int ptg;
2245	int ret;
2246
2247	dev = ice_pf_to_dev(pf);
2248	hw = &pf->hw;
2249	*v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2250	stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2251
2252	id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
2253	ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
2254
2255	ret = ice_vc_fdir_write_fltr(vf, conf, false, false);
2256	if (ret) {
2257		dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
2258			vf->vf_id, ret);
2259		return ret;
2260	}
2261
2262	vf_vsi = ice_get_vf_vsi(vf);
2263	if (!vf_vsi) {
2264		dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
2265		return -ENODEV;
2266	}
2267
2268	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
2269	if (!ctrl_vsi) {
2270		dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
2271			vf->vf_id);
2272		return -ENODEV;
2273	}
2274
2275	pi = &vf->fdir_prof_info[ptg];
2276	if (pi->fdir_active_cnt != 0) {
2277		pi->fdir_active_cnt--;
2278		/* Remove the profile id flow if no active FDIR rule left */
2279		if (!pi->fdir_active_cnt) {
2280			vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
2281			ice_rem_prof_id_flow(hw, blk, vsi_num, id);
2282
2283			vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
2284			ice_rem_prof_id_flow(hw, blk, vsi_num, id);
2285		}
2286	}
2287
2288	conf->parser_ena = false;
2289	return 0;
2290}
2291
2292/**
2293 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
2294 * @vf: pointer to the VF info
2295 * @msg: pointer to the msg buffer
2296 *
2297 * Return: 0 on success, and other on error.
2298 */
2299int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
2300{
2301	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
2302	struct virtchnl_fdir_del *stat = NULL;
2303	struct virtchnl_fdir_fltr_conf *conf;
2304	struct ice_vf_fdir *fdir = &vf->fdir;
2305	enum virtchnl_status_code v_ret;
2306	struct ice_fdir_fltr *input;
2307	enum ice_fltr_ptype flow;
2308	struct device *dev;
2309	struct ice_pf *pf;
2310	int is_tun = 0;
2311	int len = 0;
2312	int ret;
2313
2314	pf = vf->pf;
2315	dev = ice_pf_to_dev(pf);
2316	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2317	if (ret) {
2318		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2319		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2320		goto err_exit;
2321	}
2322
2323	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2324	if (!stat) {
2325		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2326		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2327		goto err_exit;
2328	}
2329
2330	len = sizeof(*stat);
2331
2332	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
2333	if (!conf) {
2334		v_ret = VIRTCHNL_STATUS_SUCCESS;
2335		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
2336		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
2337			vf->vf_id, fltr->flow_id);
2338		goto err_exit;
2339	}
2340
2341	/* Just return failure when ctrl_vsi idx is invalid */
2342	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
2343		v_ret = VIRTCHNL_STATUS_SUCCESS;
2344		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2345		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
2346		goto err_exit;
2347	}
2348
2349	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
2350	if (ret) {
2351		v_ret = VIRTCHNL_STATUS_SUCCESS;
2352		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2353		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2354		goto err_exit;
2355	}
2356
2357	/* For raw FDIR filters created by the parser */
2358	if (conf->parser_ena) {
2359		ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len);
2360		if (ret)
2361			goto err_del_tmr;
2362		goto exit;
2363	}
2364
2365	is_tun = ice_fdir_is_tunnel(conf->ttype);
2366	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
2367	if (ret) {
2368		v_ret = VIRTCHNL_STATUS_SUCCESS;
2369		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2370		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2371			vf->vf_id, ret);
2372		goto err_del_tmr;
2373	}
2374
2375	/* Remove unused profiles to avoid unexpected behaviors */
2376	input = &conf->input;
2377	flow = input->flow_type;
2378	if (fdir->fdir_fltr_cnt[flow][is_tun] == 1)
2379		ice_vc_fdir_rem_prof(vf, flow, is_tun);
2380
2381exit:
2382	kfree(stat);
2383
2384	return ret;
2385
2386err_del_tmr:
2387	ice_vc_fdir_clear_irq_ctx(vf);
2388err_exit:
2389	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2390				    (u8 *)stat, len);
2391	kfree(stat);
2392	return ret;
2393}
2394
2395/**
2396 * ice_vf_fdir_init - init FDIR resource for VF
2397 * @vf: pointer to the VF info
2398 */
2399void ice_vf_fdir_init(struct ice_vf *vf)
2400{
2401	struct ice_vf_fdir *fdir = &vf->fdir;
2402
2403	idr_init(&fdir->fdir_rule_idr);
2404	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2405
2406	spin_lock_init(&fdir->ctx_lock);
2407	fdir->ctx_irq.flags = 0;
2408	fdir->ctx_done.flags = 0;
2409	ice_vc_fdir_reset_cnt_all(fdir);
2410}
2411
2412/**
2413 * ice_vf_fdir_exit - destroy FDIR resource for VF
2414 * @vf: pointer to the VF info
2415 */
2416void ice_vf_fdir_exit(struct ice_vf *vf)
2417{
2418	ice_vc_fdir_flush_entry(vf);
2419	idr_destroy(&vf->fdir.fdir_rule_idr);
2420	ice_vc_fdir_rem_prof_all(vf);
2421	ice_vc_fdir_free_prof_all(vf);
2422}