Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2021-2023, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_flow.h"
   8#include "ice_vf_lib_private.h"
   9
  10#define to_fltr_conf_from_desc(p) \
  11	container_of(p, struct virtchnl_fdir_fltr_conf, input)
  12
 
 
 
 
 
 
 
 
 
 
 
 
 
  13#define GTPU_TEID_OFFSET 4
  14#define GTPU_EH_QFI_OFFSET 1
  15#define GTPU_EH_QFI_MASK 0x3F
  16#define PFCP_S_OFFSET 0
  17#define PFCP_S_MASK 0x1
  18#define PFCP_PORT_NR 8805
  19
  20#define FDIR_INSET_FLAG_ESP_S 0
  21#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  22#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  23#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
  24
  25enum ice_fdir_tunnel_type {
  26	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
  27	ICE_FDIR_TUNNEL_TYPE_GTPU,
  28	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
  29};
  30
  31struct virtchnl_fdir_fltr_conf {
  32	struct ice_fdir_fltr input;
  33	enum ice_fdir_tunnel_type ttype;
  34	u64 inset_flag;
  35	u32 flow_id;
  36};
  37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  38struct virtchnl_fdir_inset_map {
  39	enum virtchnl_proto_hdr_field field;
  40	enum ice_flow_field fld;
  41	u64 flag;
  42	u64 mask;
  43};
  44
  45static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
  46	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
  47	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
  48	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
  49	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
  50	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
  51	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
  52	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
  53	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
  54	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
  55	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
  56	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
  57	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
  58	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
  59	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
  60	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
  61	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
  62	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
  63	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
  64	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
  65	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
  66		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
  67	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
  68		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
  69	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
  70	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
  71	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
  72};
  73
  74/**
  75 * ice_vc_fdir_param_check
  76 * @vf: pointer to the VF structure
  77 * @vsi_id: VF relative VSI ID
  78 *
  79 * Check for the valid VSI ID, PF's state and VF's state
  80 *
  81 * Return: 0 on success, and -EINVAL on error.
  82 */
  83static int
  84ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
  85{
  86	struct ice_pf *pf = vf->pf;
  87
  88	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
  89		return -EINVAL;
  90
  91	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  92		return -EINVAL;
  93
  94	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
  95		return -EINVAL;
  96
  97	if (vsi_id != vf->lan_vsi_num)
  98		return -EINVAL;
  99
 100	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
 101		return -EINVAL;
 102
 103	if (!ice_get_vf_vsi(vf))
 104		return -EINVAL;
 105
 106	return 0;
 107}
 108
 109/**
 110 * ice_vf_start_ctrl_vsi
 111 * @vf: pointer to the VF structure
 112 *
 113 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
 114 *
 115 * Return: 0 on success, and other on error.
 116 */
 117static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
 118{
 119	struct ice_pf *pf = vf->pf;
 120	struct ice_vsi *ctrl_vsi;
 121	struct device *dev;
 122	int err;
 123
 124	dev = ice_pf_to_dev(pf);
 125	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 126		return -EEXIST;
 127
 128	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
 129	if (!ctrl_vsi) {
 130		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
 131			vf->vf_id);
 132		return -ENOMEM;
 133	}
 134
 135	err = ice_vsi_open_ctrl(ctrl_vsi);
 136	if (err) {
 137		dev_dbg(dev, "Could not open control VSI for VF %d\n",
 138			vf->vf_id);
 139		goto err_vsi_open;
 140	}
 141
 142	return 0;
 143
 144err_vsi_open:
 145	ice_vsi_release(ctrl_vsi);
 146	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
 147		pf->vsi[vf->ctrl_vsi_idx] = NULL;
 148		vf->ctrl_vsi_idx = ICE_NO_VSI;
 149	}
 150	return err;
 151}
 152
 153/**
 154 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
 155 * @vf: pointer to the VF structure
 156 * @flow: filter flow type
 157 *
 158 * Return: 0 on success, and other on error.
 159 */
 160static int
 161ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 162{
 163	struct ice_vf_fdir *fdir = &vf->fdir;
 164
 165	if (!fdir->fdir_prof) {
 166		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
 167					       ICE_FLTR_PTYPE_MAX,
 168					       sizeof(*fdir->fdir_prof),
 169					       GFP_KERNEL);
 170		if (!fdir->fdir_prof)
 171			return -ENOMEM;
 172	}
 173
 174	if (!fdir->fdir_prof[flow]) {
 175		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
 176						     sizeof(**fdir->fdir_prof),
 177						     GFP_KERNEL);
 178		if (!fdir->fdir_prof[flow])
 179			return -ENOMEM;
 180	}
 181
 182	return 0;
 183}
 184
 185/**
 186 * ice_vc_fdir_free_prof - free profile for this filter flow type
 187 * @vf: pointer to the VF structure
 188 * @flow: filter flow type
 189 */
 190static void
 191ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 192{
 193	struct ice_vf_fdir *fdir = &vf->fdir;
 194
 195	if (!fdir->fdir_prof)
 196		return;
 197
 198	if (!fdir->fdir_prof[flow])
 199		return;
 200
 201	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
 202	fdir->fdir_prof[flow] = NULL;
 203}
 204
 205/**
 206 * ice_vc_fdir_free_prof_all - free all the profile for this VF
 207 * @vf: pointer to the VF structure
 208 */
 209static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
 210{
 211	struct ice_vf_fdir *fdir = &vf->fdir;
 212	enum ice_fltr_ptype flow;
 213
 214	if (!fdir->fdir_prof)
 215		return;
 216
 217	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
 218		ice_vc_fdir_free_prof(vf, flow);
 219
 220	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
 221	fdir->fdir_prof = NULL;
 222}
 223
 224/**
 225 * ice_vc_fdir_parse_flow_fld
 226 * @proto_hdr: virtual channel protocol filter header
 227 * @conf: FDIR configuration for each filter
 228 * @fld: field type array
 229 * @fld_cnt: field counter
 230 *
 231 * Parse the virtual channel filter header and store them into field type array
 232 *
 233 * Return: 0 on success, and other on error.
 234 */
 235static int
 236ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
 237			   struct virtchnl_fdir_fltr_conf *conf,
 238			   enum ice_flow_field *fld, int *fld_cnt)
 239{
 240	struct virtchnl_proto_hdr hdr;
 241	u32 i;
 242
 243	memcpy(&hdr, proto_hdr, sizeof(hdr));
 244
 245	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
 246	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
 247		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
 248			if (fdir_inset_map[i].mask &&
 249			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
 250			     fdir_inset_map[i].flag))
 251				continue;
 252
 253			fld[*fld_cnt] = fdir_inset_map[i].fld;
 254			*fld_cnt += 1;
 255			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
 256				return -EINVAL;
 257			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
 258						     fdir_inset_map[i].field);
 259		}
 260
 261	return 0;
 262}
 263
 264/**
 265 * ice_vc_fdir_set_flow_fld
 266 * @vf: pointer to the VF structure
 267 * @fltr: virtual channel add cmd buffer
 268 * @conf: FDIR configuration for each filter
 269 * @seg: array of one or more packet segments that describe the flow
 270 *
 271 * Parse the virtual channel add msg buffer's field vector and store them into
 272 * flow's packet segment field
 273 *
 274 * Return: 0 on success, and other on error.
 275 */
 276static int
 277ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 278			 struct virtchnl_fdir_fltr_conf *conf,
 279			 struct ice_flow_seg_info *seg)
 280{
 281	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
 282	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
 283	struct device *dev = ice_pf_to_dev(vf->pf);
 284	struct virtchnl_proto_hdrs *proto;
 285	int fld_cnt = 0;
 286	int i;
 287
 288	proto = &rule->proto_hdrs;
 289	for (i = 0; i < proto->count; i++) {
 290		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
 291		int ret;
 292
 293		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
 294		if (ret)
 295			return ret;
 296	}
 297
 298	if (fld_cnt == 0) {
 299		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
 300		return -EINVAL;
 301	}
 302
 303	for (i = 0; i < fld_cnt; i++)
 304		ice_flow_set_fld(seg, fld[i],
 305				 ICE_FLOW_FLD_OFF_INVAL,
 306				 ICE_FLOW_FLD_OFF_INVAL,
 307				 ICE_FLOW_FLD_OFF_INVAL, false);
 308
 309	return 0;
 310}
 311
 312/**
 313 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
 314 * @vf: pointer to the VF structure
 315 * @conf: FDIR configuration for each filter
 316 * @seg: array of one or more packet segments that describe the flow
 317 *
 318 * Return: 0 on success, and other on error.
 319 */
 320static int
 321ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
 322			 struct virtchnl_fdir_fltr_conf *conf,
 323			 struct ice_flow_seg_info *seg)
 324{
 325	enum ice_fltr_ptype flow = conf->input.flow_type;
 326	enum ice_fdir_tunnel_type ttype = conf->ttype;
 327	struct device *dev = ice_pf_to_dev(vf->pf);
 328
 329	switch (flow) {
 330	case ICE_FLTR_PTYPE_NON_IP_L2:
 331		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
 332		break;
 333	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
 334		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 335				  ICE_FLOW_SEG_HDR_IPV4 |
 336				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 337		break;
 338	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
 339		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 340				  ICE_FLOW_SEG_HDR_IPV4 |
 341				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 342		break;
 343	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
 344		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 345				  ICE_FLOW_SEG_HDR_IPV4 |
 346				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 347		break;
 348	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
 349		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 350				  ICE_FLOW_SEG_HDR_IPV4 |
 351				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 352		break;
 353	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
 354		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 355				  ICE_FLOW_SEG_HDR_IPV4 |
 356				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 357		break;
 358	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
 359		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 360				  ICE_FLOW_SEG_HDR_IPV4 |
 361				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 362		break;
 363	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 364		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
 365				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 366		break;
 367	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 368		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 369				  ICE_FLOW_SEG_HDR_IPV4 |
 370				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 371		break;
 372	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 373		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 374				  ICE_FLOW_SEG_HDR_IPV4 |
 375				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 376		break;
 377	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
 378	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
 379	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
 380	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
 381		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
 382			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
 383					  ICE_FLOW_SEG_HDR_IPV4 |
 384					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 385		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
 386			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
 387					  ICE_FLOW_SEG_HDR_GTPU_IP |
 388					  ICE_FLOW_SEG_HDR_IPV4 |
 389					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 390		} else {
 391			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
 392				flow, vf->vf_id);
 393			return -EINVAL;
 394		}
 395		break;
 396	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 397		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 398				  ICE_FLOW_SEG_HDR_IPV4 |
 399				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 400		break;
 401	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
 402		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 403				  ICE_FLOW_SEG_HDR_IPV6 |
 404				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 405		break;
 406	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
 407		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 408				  ICE_FLOW_SEG_HDR_IPV6 |
 409				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 410		break;
 411	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
 412		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 413				  ICE_FLOW_SEG_HDR_IPV6 |
 414				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 415		break;
 416	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
 417		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 418				  ICE_FLOW_SEG_HDR_IPV6 |
 419				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 420		break;
 421	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
 422		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 423				  ICE_FLOW_SEG_HDR_IPV6 |
 424				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 425		break;
 426	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
 427		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 428				  ICE_FLOW_SEG_HDR_IPV6 |
 429				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 430		break;
 431	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 432		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
 433				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 434		break;
 435	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 436		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 437				  ICE_FLOW_SEG_HDR_IPV6 |
 438				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 439		break;
 440	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 441		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 442				  ICE_FLOW_SEG_HDR_IPV6 |
 443				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 444		break;
 445	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 446		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 447				  ICE_FLOW_SEG_HDR_IPV6 |
 448				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 449		break;
 450	default:
 451		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
 452			flow, vf->vf_id);
 453		return -EINVAL;
 454	}
 455
 456	return 0;
 457}
 458
 459/**
 460 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
 461 * @vf: pointer to the VF structure
 462 * @flow: filter flow type
 463 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 464 */
 465static void
 466ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
 467{
 468	struct ice_vf_fdir *fdir = &vf->fdir;
 469	struct ice_fd_hw_prof *vf_prof;
 470	struct ice_pf *pf = vf->pf;
 471	struct ice_vsi *vf_vsi;
 472	struct device *dev;
 473	struct ice_hw *hw;
 474	u64 prof_id;
 475	int i;
 476
 477	dev = ice_pf_to_dev(pf);
 478	hw = &pf->hw;
 479	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
 480		return;
 481
 482	vf_prof = fdir->fdir_prof[flow];
 483	prof_id = vf_prof->prof_id[tun];
 484
 485	vf_vsi = ice_get_vf_vsi(vf);
 486	if (!vf_vsi) {
 487		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
 488		return;
 489	}
 490
 491	if (!fdir->prof_entry_cnt[flow][tun])
 492		return;
 493
 
 
 
 494	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
 495		if (vf_prof->entry_h[i][tun]) {
 496			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
 497
 498			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
 499			ice_flow_rem_entry(hw, ICE_BLK_FD,
 500					   vf_prof->entry_h[i][tun]);
 501			vf_prof->entry_h[i][tun] = 0;
 502		}
 503
 504	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 505	devm_kfree(dev, vf_prof->fdir_seg[tun]);
 506	vf_prof->fdir_seg[tun] = NULL;
 507
 508	for (i = 0; i < vf_prof->cnt; i++)
 509		vf_prof->vsi_h[i] = 0;
 510
 511	fdir->prof_entry_cnt[flow][tun] = 0;
 512}
 513
 514/**
 515 * ice_vc_fdir_rem_prof_all - remove profile for this VF
 516 * @vf: pointer to the VF structure
 517 */
 518static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
 519{
 520	enum ice_fltr_ptype flow;
 521
 522	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
 523	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
 524		ice_vc_fdir_rem_prof(vf, flow, 0);
 525		ice_vc_fdir_rem_prof(vf, flow, 1);
 526	}
 527}
 528
 529/**
 530 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
 531 * @fdir: pointer to the VF FDIR structure
 532 */
 533static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
 534{
 535	enum ice_fltr_ptype flow;
 536
 537	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
 538	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
 539		fdir->fdir_fltr_cnt[flow][0] = 0;
 540		fdir->fdir_fltr_cnt[flow][1] = 0;
 541	}
 542}
 543
 544/**
 545 * ice_vc_fdir_has_prof_conflict
 546 * @vf: pointer to the VF structure
 547 * @conf: FDIR configuration for each filter
 548 *
 549 * Check if @conf has conflicting profile with existing profiles
 550 *
 551 * Return: true on success, and false on error.
 552 */
 553static bool
 554ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
 555			      struct virtchnl_fdir_fltr_conf *conf)
 556{
 557	struct ice_fdir_fltr *desc;
 558
 559	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
 560		struct virtchnl_fdir_fltr_conf *existing_conf;
 561		enum ice_fltr_ptype flow_type_a, flow_type_b;
 562		struct ice_fdir_fltr *a, *b;
 563
 564		existing_conf = to_fltr_conf_from_desc(desc);
 565		a = &existing_conf->input;
 566		b = &conf->input;
 567		flow_type_a = a->flow_type;
 568		flow_type_b = b->flow_type;
 569
 570		/* No need to compare two rules with different tunnel types or
 571		 * with the same protocol type.
 572		 */
 573		if (existing_conf->ttype != conf->ttype ||
 574		    flow_type_a == flow_type_b)
 575			continue;
 576
 577		switch (flow_type_a) {
 578		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 579		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 580		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 581			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
 582				return true;
 583			break;
 584		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 585			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
 586			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
 587			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
 588				return true;
 589			break;
 590		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 591		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 592		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 593			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
 594				return true;
 595			break;
 596		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 597			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
 598			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
 599			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
 600				return true;
 601			break;
 602		default:
 603			break;
 604		}
 605	}
 606
 607	return false;
 608}
 609
 610/**
 611 * ice_vc_fdir_write_flow_prof
 612 * @vf: pointer to the VF structure
 613 * @flow: filter flow type
 614 * @seg: array of one or more packet segments that describe the flow
 615 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 616 *
 617 * Write the flow's profile config and packet segment into the hardware
 618 *
 619 * Return: 0 on success, and other on error.
 620 */
 621static int
 622ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
 623			    struct ice_flow_seg_info *seg, int tun)
 624{
 625	struct ice_vf_fdir *fdir = &vf->fdir;
 626	struct ice_vsi *vf_vsi, *ctrl_vsi;
 627	struct ice_flow_seg_info *old_seg;
 628	struct ice_flow_prof *prof = NULL;
 629	struct ice_fd_hw_prof *vf_prof;
 
 630	struct device *dev;
 631	struct ice_pf *pf;
 632	struct ice_hw *hw;
 633	u64 entry1_h = 0;
 634	u64 entry2_h = 0;
 
 635	int ret;
 636
 637	pf = vf->pf;
 638	dev = ice_pf_to_dev(pf);
 639	hw = &pf->hw;
 640	vf_vsi = ice_get_vf_vsi(vf);
 641	if (!vf_vsi)
 642		return -EINVAL;
 643
 644	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
 645	if (!ctrl_vsi)
 646		return -EINVAL;
 647
 648	vf_prof = fdir->fdir_prof[flow];
 649	old_seg = vf_prof->fdir_seg[tun];
 650	if (old_seg) {
 651		if (!memcmp(old_seg, seg, sizeof(*seg))) {
 652			dev_dbg(dev, "Duplicated profile for VF %d!\n",
 653				vf->vf_id);
 654			return -EEXIST;
 655		}
 656
 657		if (fdir->fdir_fltr_cnt[flow][tun]) {
 658			ret = -EINVAL;
 659			dev_dbg(dev, "Input set conflicts for VF %d\n",
 660				vf->vf_id);
 661			goto err_exit;
 662		}
 663
 664		/* remove previously allocated profile */
 665		ice_vc_fdir_rem_prof(vf, flow, tun);
 666	}
 667
 668	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
 669				tun + 1, false, &prof);
 
 
 
 
 670	if (ret) {
 671		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
 672			flow, vf->vf_id);
 673		goto err_exit;
 674	}
 675
 676	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
 677				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 678				 seg, &entry1_h);
 
 679	if (ret) {
 680		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
 681			flow, vf->vf_id);
 682		goto err_prof;
 683	}
 684
 685	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
 686				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 687				 seg, &entry2_h);
 
 688	if (ret) {
 689		dev_dbg(dev,
 690			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
 691			flow, vf->vf_id);
 692		goto err_entry_1;
 693	}
 694
 695	vf_prof->fdir_seg[tun] = seg;
 696	vf_prof->cnt = 0;
 697	fdir->prof_entry_cnt[flow][tun] = 0;
 698
 699	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
 700	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
 701	vf_prof->cnt++;
 702	fdir->prof_entry_cnt[flow][tun]++;
 703
 704	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
 705	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
 706	vf_prof->cnt++;
 707	fdir->prof_entry_cnt[flow][tun]++;
 708
 709	vf_prof->prof_id[tun] = prof->id;
 710
 711	return 0;
 712
 713err_entry_1:
 714	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 715			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
 716	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 717err_prof:
 718	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
 719err_exit:
 720	return ret;
 721}
 722
 723/**
 724 * ice_vc_fdir_config_input_set
 725 * @vf: pointer to the VF structure
 726 * @fltr: virtual channel add cmd buffer
 727 * @conf: FDIR configuration for each filter
 728 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 729 *
 730 * Config the input set type and value for virtual channel add msg buffer
 731 *
 732 * Return: 0 on success, and other on error.
 733 */
 734static int
 735ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 736			     struct virtchnl_fdir_fltr_conf *conf, int tun)
 737{
 738	struct ice_fdir_fltr *input = &conf->input;
 739	struct device *dev = ice_pf_to_dev(vf->pf);
 740	struct ice_flow_seg_info *seg;
 741	enum ice_fltr_ptype flow;
 742	int ret;
 743
 744	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
 745	if (ret) {
 746		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
 747			vf->vf_id);
 748		return ret;
 749	}
 750
 751	flow = input->flow_type;
 752	ret = ice_vc_fdir_alloc_prof(vf, flow);
 753	if (ret) {
 754		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
 755		return ret;
 756	}
 757
 758	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 759	if (!seg)
 760		return -ENOMEM;
 761
 762	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
 763	if (ret) {
 764		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
 765		goto err_exit;
 766	}
 767
 768	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
 769	if (ret) {
 770		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
 771		goto err_exit;
 772	}
 773
 774	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
 775	if (ret == -EEXIST) {
 776		devm_kfree(dev, seg);
 777	} else if (ret) {
 778		dev_dbg(dev, "Write flow profile for VF %d failed\n",
 779			vf->vf_id);
 780		goto err_exit;
 781	}
 782
 783	return 0;
 784
 785err_exit:
 786	devm_kfree(dev, seg);
 787	return ret;
 788}
 789
 790/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791 * ice_vc_fdir_parse_pattern
 792 * @vf: pointer to the VF info
 793 * @fltr: virtual channel add cmd buffer
 794 * @conf: FDIR configuration for each filter
 795 *
 796 * Parse the virtual channel filter's pattern and store them into conf
 797 *
 798 * Return: 0 on success, and other on error.
 799 */
 800static int
 801ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 802			  struct virtchnl_fdir_fltr_conf *conf)
 803{
 804	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
 805	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
 806	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
 807	struct device *dev = ice_pf_to_dev(vf->pf);
 808	struct ice_fdir_fltr *input = &conf->input;
 809	int i;
 810
 811	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
 812		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
 813			proto->count, vf->vf_id);
 814		return -EINVAL;
 815	}
 816
 817	for (i = 0; i < proto->count; i++) {
 818		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
 819		struct ip_esp_hdr *esph;
 820		struct ip_auth_hdr *ah;
 821		struct sctphdr *sctph;
 822		struct ipv6hdr *ip6h;
 823		struct udphdr *udph;
 824		struct tcphdr *tcph;
 825		struct ethhdr *eth;
 826		struct iphdr *iph;
 827		u8 s_field;
 828		u8 *rawh;
 829
 830		switch (hdr->type) {
 831		case VIRTCHNL_PROTO_HDR_ETH:
 832			eth = (struct ethhdr *)hdr->buffer;
 833			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
 834
 835			if (hdr->field_selector)
 836				input->ext_data.ether_type = eth->h_proto;
 837			break;
 838		case VIRTCHNL_PROTO_HDR_IPV4:
 839			iph = (struct iphdr *)hdr->buffer;
 840			l3 = VIRTCHNL_PROTO_HDR_IPV4;
 841			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
 842
 843			if (hdr->field_selector) {
 844				input->ip.v4.src_ip = iph->saddr;
 845				input->ip.v4.dst_ip = iph->daddr;
 846				input->ip.v4.tos = iph->tos;
 847				input->ip.v4.proto = iph->protocol;
 848			}
 849			break;
 850		case VIRTCHNL_PROTO_HDR_IPV6:
 851			ip6h = (struct ipv6hdr *)hdr->buffer;
 852			l3 = VIRTCHNL_PROTO_HDR_IPV6;
 853			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
 854
 855			if (hdr->field_selector) {
 856				memcpy(input->ip.v6.src_ip,
 857				       ip6h->saddr.in6_u.u6_addr8,
 858				       sizeof(ip6h->saddr));
 859				memcpy(input->ip.v6.dst_ip,
 860				       ip6h->daddr.in6_u.u6_addr8,
 861				       sizeof(ip6h->daddr));
 862				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
 863						  (ip6h->flow_lbl[0] >> 4);
 864				input->ip.v6.proto = ip6h->nexthdr;
 865			}
 866			break;
 867		case VIRTCHNL_PROTO_HDR_TCP:
 868			tcph = (struct tcphdr *)hdr->buffer;
 869			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 870				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
 871			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 872				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
 873
 874			if (hdr->field_selector) {
 875				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
 876					input->ip.v4.src_port = tcph->source;
 877					input->ip.v4.dst_port = tcph->dest;
 878				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
 879					input->ip.v6.src_port = tcph->source;
 880					input->ip.v6.dst_port = tcph->dest;
 881				}
 882			}
 883			break;
 884		case VIRTCHNL_PROTO_HDR_UDP:
 885			udph = (struct udphdr *)hdr->buffer;
 886			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 887				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
 888			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 889				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
 890
 891			if (hdr->field_selector) {
 892				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
 893					input->ip.v4.src_port = udph->source;
 894					input->ip.v4.dst_port = udph->dest;
 895				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
 896					input->ip.v6.src_port = udph->source;
 897					input->ip.v6.dst_port = udph->dest;
 898				}
 899			}
 900			break;
 901		case VIRTCHNL_PROTO_HDR_SCTP:
 902			sctph = (struct sctphdr *)hdr->buffer;
 903			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 904				input->flow_type =
 905					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
 906			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 907				input->flow_type =
 908					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
 909
 910			if (hdr->field_selector) {
 911				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
 912					input->ip.v4.src_port = sctph->source;
 913					input->ip.v4.dst_port = sctph->dest;
 914				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
 915					input->ip.v6.src_port = sctph->source;
 916					input->ip.v6.dst_port = sctph->dest;
 917				}
 918			}
 919			break;
 920		case VIRTCHNL_PROTO_HDR_L2TPV3:
 921			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 922				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
 923			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 924				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
 925
 926			if (hdr->field_selector)
 927				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
 928			break;
 929		case VIRTCHNL_PROTO_HDR_ESP:
 930			esph = (struct ip_esp_hdr *)hdr->buffer;
 931			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
 932			    l4 == VIRTCHNL_PROTO_HDR_UDP)
 933				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
 934			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
 935				 l4 == VIRTCHNL_PROTO_HDR_UDP)
 936				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
 937			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
 938				 l4 == VIRTCHNL_PROTO_HDR_NONE)
 939				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
 940			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
 941				 l4 == VIRTCHNL_PROTO_HDR_NONE)
 942				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
 943
 944			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
 945				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
 946			else
 947				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
 948
 949			if (hdr->field_selector) {
 950				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 951					input->ip.v4.sec_parm_idx = esph->spi;
 952				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 953					input->ip.v6.sec_parm_idx = esph->spi;
 954			}
 955			break;
 956		case VIRTCHNL_PROTO_HDR_AH:
 957			ah = (struct ip_auth_hdr *)hdr->buffer;
 958			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 959				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
 960			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 961				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
 962
 963			if (hdr->field_selector) {
 964				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 965					input->ip.v4.sec_parm_idx = ah->spi;
 966				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 967					input->ip.v6.sec_parm_idx = ah->spi;
 968			}
 969			break;
 970		case VIRTCHNL_PROTO_HDR_PFCP:
 971			rawh = (u8 *)hdr->buffer;
 972			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
 973			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
 974				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
 975			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
 976				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
 977			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
 978				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
 979			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
 980				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
 981
 982			if (hdr->field_selector) {
 983				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
 984					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
 985				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
 986					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
 987			}
 988			break;
 989		case VIRTCHNL_PROTO_HDR_GTPU_IP:
 990			rawh = (u8 *)hdr->buffer;
 991			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
 992
 993			if (hdr->field_selector)
 994				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
 995			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
 996			break;
 997		case VIRTCHNL_PROTO_HDR_GTPU_EH:
 998			rawh = (u8 *)hdr->buffer;
 999
1000			if (hdr->field_selector)
1001				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1002			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1003			break;
1004		default:
1005			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1006				hdr->type, vf->vf_id);
1007			return -EINVAL;
1008		}
1009	}
1010
1011	return 0;
1012}
1013
1014/**
1015 * ice_vc_fdir_parse_action
1016 * @vf: pointer to the VF info
1017 * @fltr: virtual channel add cmd buffer
1018 * @conf: FDIR configuration for each filter
1019 *
1020 * Parse the virtual channel filter's action and store them into conf
1021 *
1022 * Return: 0 on success, and other on error.
1023 */
1024static int
1025ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1026			 struct virtchnl_fdir_fltr_conf *conf)
1027{
1028	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1029	struct device *dev = ice_pf_to_dev(vf->pf);
1030	struct ice_fdir_fltr *input = &conf->input;
1031	u32 dest_num = 0;
1032	u32 mark_num = 0;
1033	int i;
1034
1035	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1036		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1037			as->count, vf->vf_id);
1038		return -EINVAL;
1039	}
1040
1041	for (i = 0; i < as->count; i++) {
1042		struct virtchnl_filter_action *action = &as->actions[i];
1043
1044		switch (action->type) {
1045		case VIRTCHNL_ACTION_PASSTHRU:
1046			dest_num++;
1047			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1048			break;
1049		case VIRTCHNL_ACTION_DROP:
1050			dest_num++;
1051			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1052			break;
1053		case VIRTCHNL_ACTION_QUEUE:
1054			dest_num++;
1055			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1056			input->q_index = action->act_conf.queue.index;
1057			break;
1058		case VIRTCHNL_ACTION_Q_REGION:
1059			dest_num++;
1060			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1061			input->q_index = action->act_conf.queue.index;
1062			input->q_region = action->act_conf.queue.region;
1063			break;
1064		case VIRTCHNL_ACTION_MARK:
1065			mark_num++;
1066			input->fltr_id = action->act_conf.mark_id;
1067			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1068			break;
1069		default:
1070			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1071				action->type, vf->vf_id);
1072			return -EINVAL;
1073		}
1074	}
1075
1076	if (dest_num == 0 || dest_num >= 2) {
1077		dev_dbg(dev, "Invalid destination action for VF %d\n",
1078			vf->vf_id);
1079		return -EINVAL;
1080	}
1081
1082	if (mark_num >= 2) {
1083		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1084		return -EINVAL;
1085	}
1086
1087	return 0;
1088}
1089
1090/**
1091 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1092 * @vf: pointer to the VF info
1093 * @fltr: virtual channel add cmd buffer
1094 * @conf: FDIR configuration for each filter
1095 *
1096 * Return: 0 on success, and other on error.
1097 */
1098static int
1099ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1100			  struct virtchnl_fdir_fltr_conf *conf)
1101{
1102	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1103	int ret;
1104
1105	if (!ice_vc_validate_pattern(vf, proto))
1106		return -EINVAL;
 
1107
1108	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1109	if (ret)
1110		return ret;
1111
1112	return ice_vc_fdir_parse_action(vf, fltr, conf);
1113}
1114
1115/**
1116 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1117 * @conf_a: FDIR configuration for filter a
1118 * @conf_b: FDIR configuration for filter b
1119 *
1120 * Return: 0 on success, and other on error.
1121 */
1122static bool
1123ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1124		       struct virtchnl_fdir_fltr_conf *conf_b)
1125{
1126	struct ice_fdir_fltr *a = &conf_a->input;
1127	struct ice_fdir_fltr *b = &conf_b->input;
1128
1129	if (conf_a->ttype != conf_b->ttype)
1130		return false;
1131	if (a->flow_type != b->flow_type)
1132		return false;
1133	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1134		return false;
1135	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1136		return false;
1137	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1138		return false;
1139	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1140		return false;
1141	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1142		return false;
1143	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1144		return false;
1145	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1146		return false;
1147	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1148		return false;
1149
1150	return true;
1151}
1152
1153/**
1154 * ice_vc_fdir_is_dup_fltr
1155 * @vf: pointer to the VF info
1156 * @conf: FDIR configuration for each filter
1157 *
1158 * Check if there is duplicated rule with same conf value
1159 *
1160 * Return: 0 true success, and false on error.
1161 */
1162static bool
1163ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1164{
1165	struct ice_fdir_fltr *desc;
1166	bool ret;
1167
1168	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1169		struct virtchnl_fdir_fltr_conf *node =
1170				to_fltr_conf_from_desc(desc);
1171
1172		ret = ice_vc_fdir_comp_rules(node, conf);
1173		if (ret)
1174			return true;
1175	}
1176
1177	return false;
1178}
1179
1180/**
1181 * ice_vc_fdir_insert_entry
1182 * @vf: pointer to the VF info
1183 * @conf: FDIR configuration for each filter
1184 * @id: pointer to ID value allocated by driver
1185 *
1186 * Insert FDIR conf entry into list and allocate ID for this filter
1187 *
1188 * Return: 0 true success, and other on error.
1189 */
1190static int
1191ice_vc_fdir_insert_entry(struct ice_vf *vf,
1192			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1193{
1194	struct ice_fdir_fltr *input = &conf->input;
1195	int i;
1196
1197	/* alloc ID corresponding with conf */
1198	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1199		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1200	if (i < 0)
1201		return -EINVAL;
1202	*id = i;
1203
1204	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1205	return 0;
1206}
1207
1208/**
1209 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1210 * @vf: pointer to the VF info
1211 * @conf: FDIR configuration for each filter
1212 * @id: filter rule's ID
1213 */
1214static void
1215ice_vc_fdir_remove_entry(struct ice_vf *vf,
1216			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1217{
1218	struct ice_fdir_fltr *input = &conf->input;
1219
1220	idr_remove(&vf->fdir.fdir_rule_idr, id);
1221	list_del(&input->fltr_node);
1222}
1223
1224/**
1225 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1226 * @vf: pointer to the VF info
1227 * @id: filter rule's ID
1228 *
1229 * Return: NULL on error, and other on success.
1230 */
1231static struct virtchnl_fdir_fltr_conf *
1232ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1233{
1234	return idr_find(&vf->fdir.fdir_rule_idr, id);
1235}
1236
1237/**
1238 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1239 * @vf: pointer to the VF info
1240 */
1241static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1242{
1243	struct virtchnl_fdir_fltr_conf *conf;
1244	struct ice_fdir_fltr *desc, *temp;
1245
1246	list_for_each_entry_safe(desc, temp,
1247				 &vf->fdir.fdir_rule_list, fltr_node) {
1248		conf = to_fltr_conf_from_desc(desc);
1249		list_del(&desc->fltr_node);
1250		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1251	}
1252}
1253
1254/**
1255 * ice_vc_fdir_write_fltr - write filter rule into hardware
1256 * @vf: pointer to the VF info
1257 * @conf: FDIR configuration for each filter
1258 * @add: true implies add rule, false implies del rules
1259 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1260 *
1261 * Return: 0 on success, and other on error.
1262 */
1263static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1264				  struct virtchnl_fdir_fltr_conf *conf,
1265				  bool add, bool is_tun)
1266{
1267	struct ice_fdir_fltr *input = &conf->input;
1268	struct ice_vsi *vsi, *ctrl_vsi;
1269	struct ice_fltr_desc desc;
 
1270	struct device *dev;
1271	struct ice_pf *pf;
1272	struct ice_hw *hw;
1273	int ret;
1274	u8 *pkt;
1275
1276	pf = vf->pf;
1277	dev = ice_pf_to_dev(pf);
1278	hw = &pf->hw;
1279	vsi = ice_get_vf_vsi(vf);
1280	if (!vsi) {
1281		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1282		return -EINVAL;
1283	}
1284
1285	input->dest_vsi = vsi->idx;
1286	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1287
1288	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1289	if (!ctrl_vsi) {
1290		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1291		return -EINVAL;
1292	}
1293
1294	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1295	if (!pkt)
1296		return -ENOMEM;
1297
1298	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1299	ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
 
1300	if (ret) {
1301		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1302			vf->vf_id, input->flow_type);
1303		goto err_free_pkt;
1304	}
1305
1306	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1307	if (ret)
1308		goto err_free_pkt;
1309
1310	return 0;
1311
1312err_free_pkt:
1313	devm_kfree(dev, pkt);
1314	return ret;
1315}
1316
1317/**
1318 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1319 * @t: pointer to timer_list
1320 */
1321static void ice_vf_fdir_timer(struct timer_list *t)
1322{
1323	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1324	struct ice_vf_fdir_ctx *ctx_done;
1325	struct ice_vf_fdir *fdir;
1326	unsigned long flags;
1327	struct ice_vf *vf;
1328	struct ice_pf *pf;
1329
1330	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1331	vf = container_of(fdir, struct ice_vf, fdir);
1332	ctx_done = &fdir->ctx_done;
1333	pf = vf->pf;
1334	spin_lock_irqsave(&fdir->ctx_lock, flags);
1335	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1336		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1337		WARN_ON_ONCE(1);
1338		return;
1339	}
1340
1341	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1342
1343	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1344	ctx_done->conf = ctx_irq->conf;
1345	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1346	ctx_done->v_opcode = ctx_irq->v_opcode;
1347	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1348
1349	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1350	ice_service_task_schedule(pf);
1351}
1352
1353/**
1354 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1355 * @ctrl_vsi: pointer to a VF's CTRL VSI
1356 * @rx_desc: pointer to FDIR Rx queue descriptor
1357 */
1358void
1359ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1360			union ice_32b_rx_flex_desc *rx_desc)
1361{
1362	struct ice_pf *pf = ctrl_vsi->back;
1363	struct ice_vf *vf = ctrl_vsi->vf;
1364	struct ice_vf_fdir_ctx *ctx_done;
1365	struct ice_vf_fdir_ctx *ctx_irq;
1366	struct ice_vf_fdir *fdir;
1367	unsigned long flags;
1368	struct device *dev;
 
1369	int ret;
1370
1371	if (WARN_ON(!vf))
1372		return;
1373
1374	fdir = &vf->fdir;
1375	ctx_done = &fdir->ctx_done;
1376	ctx_irq = &fdir->ctx_irq;
1377	dev = ice_pf_to_dev(pf);
1378	spin_lock_irqsave(&fdir->ctx_lock, flags);
1379	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1380		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1381		WARN_ON_ONCE(1);
1382		return;
1383	}
1384
1385	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1386
1387	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1388	ctx_done->conf = ctx_irq->conf;
1389	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1390	ctx_done->v_opcode = ctx_irq->v_opcode;
1391	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1392	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1393
1394	ret = del_timer(&ctx_irq->rx_tmr);
1395	if (!ret)
1396		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1397
1398	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1399	ice_service_task_schedule(pf);
1400}
1401
1402/**
1403 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1404 * @vf: pointer to the VF info
1405 */
1406static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1407{
1408	u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1409	struct ice_vsi *vf_vsi;
 
1410	struct device *dev;
1411	struct ice_pf *pf;
1412	struct ice_hw *hw;
1413	u16 vsi_num;
1414
1415	pf = vf->pf;
1416	hw = &pf->hw;
1417	dev = ice_pf_to_dev(pf);
1418	vf_vsi = ice_get_vf_vsi(vf);
1419	if (!vf_vsi) {
1420		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1421		return;
1422	}
1423
1424	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1425
1426	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1427	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1428	switch (hw->mac_type) {
1429	case ICE_MAC_E830:
1430		fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1431		fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1432		fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1433		fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1434		break;
1435	case ICE_MAC_E810:
1436	default:
1437		fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1438		fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1439		fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1440		fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1441	}
1442
1443	dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1444		vf->vf_id, fd_size_g, fd_size_b);
1445	dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1446		vf->vf_id, fd_cnt_g, fd_cnt_b);
1447}
1448
1449/**
1450 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1451 * @vf: pointer to the VF info
1452 * @ctx: FDIR context info for post processing
1453 * @status: virtchnl FDIR program status
1454 *
1455 * Return: 0 on success, and other on error.
1456 */
1457static int
1458ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1459		      enum virtchnl_fdir_prgm_status *status)
1460{
1461	struct device *dev = ice_pf_to_dev(vf->pf);
1462	u32 stat_err, error, prog_id;
1463	int ret;
1464
1465	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1466	if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
1467	    ICE_FXD_FLTR_WB_QW1_DD_YES) {
1468		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1469		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1470		ret = -EINVAL;
1471		goto err_exit;
1472	}
1473
1474	prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
 
1475	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1476	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1477		dev_err(dev, "VF %d: Desc show add, but ctx not",
1478			vf->vf_id);
1479		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1480		ret = -EINVAL;
1481		goto err_exit;
1482	}
1483
1484	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1485	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1486		dev_err(dev, "VF %d: Desc show del, but ctx not",
1487			vf->vf_id);
1488		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1489		ret = -EINVAL;
1490		goto err_exit;
1491	}
1492
1493	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
 
1494	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1495		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1496			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1497				vf->vf_id);
1498			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1499		} else {
1500			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1501				vf->vf_id);
1502			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1503		}
1504		ret = -EINVAL;
1505		goto err_exit;
1506	}
1507
1508	error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
 
1509	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1510		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1511		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1512		ret = -EINVAL;
1513		goto err_exit;
1514	}
1515
1516	*status = VIRTCHNL_FDIR_SUCCESS;
1517
1518	return 0;
1519
1520err_exit:
1521	ice_vf_fdir_dump_info(vf);
1522	return ret;
1523}
1524
1525/**
1526 * ice_vc_add_fdir_fltr_post
1527 * @vf: pointer to the VF structure
1528 * @ctx: FDIR context info for post processing
1529 * @status: virtchnl FDIR program status
1530 * @success: true implies success, false implies failure
1531 *
1532 * Post process for flow director add command. If success, then do post process
1533 * and send back success msg by virtchnl. Otherwise, do context reversion and
1534 * send back failure msg by virtchnl.
1535 *
1536 * Return: 0 on success, and other on error.
1537 */
1538static int
1539ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1540			  enum virtchnl_fdir_prgm_status status,
1541			  bool success)
1542{
1543	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1544	struct device *dev = ice_pf_to_dev(vf->pf);
1545	enum virtchnl_status_code v_ret;
1546	struct virtchnl_fdir_add *resp;
1547	int ret, len, is_tun;
1548
1549	v_ret = VIRTCHNL_STATUS_SUCCESS;
1550	len = sizeof(*resp);
1551	resp = kzalloc(len, GFP_KERNEL);
1552	if (!resp) {
1553		len = 0;
1554		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1555		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1556		goto err_exit;
1557	}
1558
1559	if (!success)
1560		goto err_exit;
1561
1562	is_tun = 0;
1563	resp->status = status;
1564	resp->flow_id = conf->flow_id;
1565	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1566
1567	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1568				    (u8 *)resp, len);
1569	kfree(resp);
1570
1571	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1572		vf->vf_id, conf->flow_id,
1573		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1574		"add" : "del");
1575	return ret;
1576
1577err_exit:
1578	if (resp)
1579		resp->status = status;
1580	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1581	devm_kfree(dev, conf);
1582
1583	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1584				    (u8 *)resp, len);
1585	kfree(resp);
1586	return ret;
1587}
1588
1589/**
1590 * ice_vc_del_fdir_fltr_post
1591 * @vf: pointer to the VF structure
1592 * @ctx: FDIR context info for post processing
1593 * @status: virtchnl FDIR program status
1594 * @success: true implies success, false implies failure
1595 *
1596 * Post process for flow director del command. If success, then do post process
1597 * and send back success msg by virtchnl. Otherwise, do context reversion and
1598 * send back failure msg by virtchnl.
1599 *
1600 * Return: 0 on success, and other on error.
1601 */
1602static int
1603ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1604			  enum virtchnl_fdir_prgm_status status,
1605			  bool success)
1606{
1607	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1608	struct device *dev = ice_pf_to_dev(vf->pf);
1609	enum virtchnl_status_code v_ret;
1610	struct virtchnl_fdir_del *resp;
1611	int ret, len, is_tun;
1612
1613	v_ret = VIRTCHNL_STATUS_SUCCESS;
1614	len = sizeof(*resp);
1615	resp = kzalloc(len, GFP_KERNEL);
1616	if (!resp) {
1617		len = 0;
1618		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1619		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1620		goto err_exit;
1621	}
1622
1623	if (!success)
1624		goto err_exit;
1625
1626	is_tun = 0;
1627	resp->status = status;
1628	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1629	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1630
1631	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1632				    (u8 *)resp, len);
1633	kfree(resp);
1634
1635	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1636		vf->vf_id, conf->flow_id,
1637		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1638		"add" : "del");
1639	devm_kfree(dev, conf);
1640	return ret;
1641
1642err_exit:
1643	if (resp)
1644		resp->status = status;
1645	if (success)
1646		devm_kfree(dev, conf);
1647
1648	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1649				    (u8 *)resp, len);
1650	kfree(resp);
1651	return ret;
1652}
1653
1654/**
1655 * ice_flush_fdir_ctx
1656 * @pf: pointer to the PF structure
1657 *
1658 * Flush all the pending event on ctx_done list and process them.
1659 */
1660void ice_flush_fdir_ctx(struct ice_pf *pf)
1661{
1662	struct ice_vf *vf;
1663	unsigned int bkt;
1664
1665	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1666		return;
1667
1668	mutex_lock(&pf->vfs.table_lock);
1669	ice_for_each_vf(pf, bkt, vf) {
1670		struct device *dev = ice_pf_to_dev(pf);
1671		enum virtchnl_fdir_prgm_status status;
 
1672		struct ice_vf_fdir_ctx *ctx;
1673		unsigned long flags;
1674		int ret;
1675
1676		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1677			continue;
1678
1679		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1680			continue;
1681
1682		ctx = &vf->fdir.ctx_done;
1683		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1684		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1685			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1686			continue;
1687		}
1688		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1689
1690		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1691		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1692			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1693			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1694				vf->vf_id);
1695			goto err_exit;
1696		}
1697
1698		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1699		if (ret)
1700			goto err_exit;
1701
1702		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1703			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1704		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1705			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1706		else
1707			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1708
1709		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1710		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1711		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1712		continue;
1713err_exit:
1714		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1715			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1716		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1717			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1718		else
1719			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1720
1721		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1722		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1723		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1724	}
1725	mutex_unlock(&pf->vfs.table_lock);
1726}
1727
1728/**
1729 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1730 * @vf: pointer to the VF structure
1731 * @conf: FDIR configuration for each filter
1732 * @v_opcode: virtual channel operation code
1733 *
1734 * Return: 0 on success, and other on error.
1735 */
1736static int
1737ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1738			enum virtchnl_ops v_opcode)
1739{
1740	struct device *dev = ice_pf_to_dev(vf->pf);
1741	struct ice_vf_fdir_ctx *ctx;
1742	unsigned long flags;
1743
1744	ctx = &vf->fdir.ctx_irq;
1745	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1746	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1747	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1748		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1749		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1750			vf->vf_id);
1751		return -EBUSY;
1752	}
1753	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1754	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1755
1756	ctx->conf = conf;
1757	ctx->v_opcode = v_opcode;
1758	ctx->stat = ICE_FDIR_CTX_READY;
1759	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1760
1761	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1762
1763	return 0;
1764}
1765
1766/**
1767 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1768 * @vf: pointer to the VF structure
1769 *
1770 * Return: 0 on success, and other on error.
1771 */
1772static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1773{
1774	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1775	unsigned long flags;
1776
1777	del_timer(&ctx->rx_tmr);
1778	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1779	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1780	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1781}
1782
1783/**
1784 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1785 * @vf: pointer to the VF info
1786 * @msg: pointer to the msg buffer
1787 *
1788 * Return: 0 on success, and other on error.
1789 */
1790int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1791{
1792	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1793	struct virtchnl_fdir_add *stat = NULL;
1794	struct virtchnl_fdir_fltr_conf *conf;
1795	enum virtchnl_status_code v_ret;
1796	struct device *dev;
1797	struct ice_pf *pf;
1798	int is_tun = 0;
1799	int len = 0;
1800	int ret;
1801
1802	pf = vf->pf;
1803	dev = ice_pf_to_dev(pf);
1804	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1805	if (ret) {
1806		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1807		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1808		goto err_exit;
1809	}
1810
1811	ret = ice_vf_start_ctrl_vsi(vf);
1812	if (ret && (ret != -EEXIST)) {
1813		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1814		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1815			vf->vf_id, ret);
1816		goto err_exit;
1817	}
1818
1819	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1820	if (!stat) {
1821		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1822		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1823		goto err_exit;
1824	}
1825
1826	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1827	if (!conf) {
1828		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1829		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1830		goto err_exit;
1831	}
1832
1833	len = sizeof(*stat);
1834	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1835	if (ret) {
1836		v_ret = VIRTCHNL_STATUS_SUCCESS;
1837		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1838		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1839		goto err_free_conf;
1840	}
1841
1842	if (fltr->validate_only) {
1843		v_ret = VIRTCHNL_STATUS_SUCCESS;
1844		stat->status = VIRTCHNL_FDIR_SUCCESS;
1845		devm_kfree(dev, conf);
1846		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1847					    v_ret, (u8 *)stat, len);
1848		goto exit;
1849	}
1850
1851	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1852	if (ret) {
1853		v_ret = VIRTCHNL_STATUS_SUCCESS;
1854		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1855		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1856			vf->vf_id, ret);
1857		goto err_free_conf;
1858	}
1859
1860	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1861	if (ret) {
1862		v_ret = VIRTCHNL_STATUS_SUCCESS;
1863		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1864		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1865			vf->vf_id);
1866		goto err_free_conf;
1867	}
1868
1869	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1870	if (ret) {
1871		v_ret = VIRTCHNL_STATUS_SUCCESS;
1872		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1873		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1874		goto err_free_conf;
1875	}
1876
1877	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1878	if (ret) {
1879		v_ret = VIRTCHNL_STATUS_SUCCESS;
1880		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1881		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1882		goto err_rem_entry;
1883	}
1884
1885	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1886	if (ret) {
1887		v_ret = VIRTCHNL_STATUS_SUCCESS;
1888		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1889		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1890			vf->vf_id, ret);
1891		goto err_clr_irq;
1892	}
1893
1894exit:
1895	kfree(stat);
1896	return ret;
1897
1898err_clr_irq:
1899	ice_vc_fdir_clear_irq_ctx(vf);
1900err_rem_entry:
 
1901	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1902err_free_conf:
1903	devm_kfree(dev, conf);
1904err_exit:
1905	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1906				    (u8 *)stat, len);
1907	kfree(stat);
1908	return ret;
1909}
1910
1911/**
1912 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1913 * @vf: pointer to the VF info
1914 * @msg: pointer to the msg buffer
1915 *
1916 * Return: 0 on success, and other on error.
1917 */
1918int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1919{
1920	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1921	struct virtchnl_fdir_del *stat = NULL;
1922	struct virtchnl_fdir_fltr_conf *conf;
1923	enum virtchnl_status_code v_ret;
1924	struct device *dev;
1925	struct ice_pf *pf;
1926	int is_tun = 0;
1927	int len = 0;
1928	int ret;
1929
1930	pf = vf->pf;
1931	dev = ice_pf_to_dev(pf);
1932	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1933	if (ret) {
1934		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1935		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1936		goto err_exit;
1937	}
1938
1939	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1940	if (!stat) {
1941		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1942		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1943		goto err_exit;
1944	}
1945
1946	len = sizeof(*stat);
1947
1948	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1949	if (!conf) {
1950		v_ret = VIRTCHNL_STATUS_SUCCESS;
1951		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1952		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1953			vf->vf_id, fltr->flow_id);
1954		goto err_exit;
1955	}
1956
1957	/* Just return failure when ctrl_vsi idx is invalid */
1958	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1959		v_ret = VIRTCHNL_STATUS_SUCCESS;
1960		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1961		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1962		goto err_exit;
1963	}
1964
1965	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1966	if (ret) {
1967		v_ret = VIRTCHNL_STATUS_SUCCESS;
1968		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1969		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1970		goto err_exit;
1971	}
1972
1973	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1974	if (ret) {
1975		v_ret = VIRTCHNL_STATUS_SUCCESS;
1976		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1977		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1978			vf->vf_id, ret);
1979		goto err_del_tmr;
1980	}
1981
1982	kfree(stat);
1983
1984	return ret;
1985
1986err_del_tmr:
1987	ice_vc_fdir_clear_irq_ctx(vf);
1988err_exit:
1989	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1990				    (u8 *)stat, len);
1991	kfree(stat);
1992	return ret;
1993}
1994
1995/**
1996 * ice_vf_fdir_init - init FDIR resource for VF
1997 * @vf: pointer to the VF info
1998 */
1999void ice_vf_fdir_init(struct ice_vf *vf)
2000{
2001	struct ice_vf_fdir *fdir = &vf->fdir;
2002
2003	idr_init(&fdir->fdir_rule_idr);
2004	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2005
2006	spin_lock_init(&fdir->ctx_lock);
2007	fdir->ctx_irq.flags = 0;
2008	fdir->ctx_done.flags = 0;
2009	ice_vc_fdir_reset_cnt_all(fdir);
2010}
2011
2012/**
2013 * ice_vf_fdir_exit - destroy FDIR resource for VF
2014 * @vf: pointer to the VF info
2015 */
2016void ice_vf_fdir_exit(struct ice_vf *vf)
2017{
2018	ice_vc_fdir_flush_entry(vf);
2019	idr_destroy(&vf->fdir.fdir_rule_idr);
2020	ice_vc_fdir_rem_prof_all(vf);
2021	ice_vc_fdir_free_prof_all(vf);
2022}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2021, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_flow.h"
 
   8
   9#define to_fltr_conf_from_desc(p) \
  10	container_of(p, struct virtchnl_fdir_fltr_conf, input)
  11
  12#define ICE_FLOW_PROF_TYPE_S	0
  13#define ICE_FLOW_PROF_TYPE_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
  14#define ICE_FLOW_PROF_VSI_S	32
  15#define ICE_FLOW_PROF_VSI_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
  16
  17/* Flow profile ID format:
  18 * [0:31] - flow type, flow + tun_offs
  19 * [32:63] - VSI index
  20 */
  21#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
  22	((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
  23	      (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
  24
  25#define GTPU_TEID_OFFSET 4
  26#define GTPU_EH_QFI_OFFSET 1
  27#define GTPU_EH_QFI_MASK 0x3F
  28#define PFCP_S_OFFSET 0
  29#define PFCP_S_MASK 0x1
  30#define PFCP_PORT_NR 8805
  31
  32#define FDIR_INSET_FLAG_ESP_S 0
  33#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  34#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
  35#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
  36
  37enum ice_fdir_tunnel_type {
  38	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
  39	ICE_FDIR_TUNNEL_TYPE_GTPU,
  40	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
  41};
  42
  43struct virtchnl_fdir_fltr_conf {
  44	struct ice_fdir_fltr input;
  45	enum ice_fdir_tunnel_type ttype;
  46	u64 inset_flag;
  47	u32 flow_id;
  48};
  49
  50static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
  51	VIRTCHNL_PROTO_HDR_ETH,
  52	VIRTCHNL_PROTO_HDR_NONE,
  53};
  54
  55static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
  56	VIRTCHNL_PROTO_HDR_ETH,
  57	VIRTCHNL_PROTO_HDR_IPV4,
  58	VIRTCHNL_PROTO_HDR_NONE,
  59};
  60
  61static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
  62	VIRTCHNL_PROTO_HDR_ETH,
  63	VIRTCHNL_PROTO_HDR_IPV4,
  64	VIRTCHNL_PROTO_HDR_TCP,
  65	VIRTCHNL_PROTO_HDR_NONE,
  66};
  67
  68static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
  69	VIRTCHNL_PROTO_HDR_ETH,
  70	VIRTCHNL_PROTO_HDR_IPV4,
  71	VIRTCHNL_PROTO_HDR_UDP,
  72	VIRTCHNL_PROTO_HDR_NONE,
  73};
  74
  75static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
  76	VIRTCHNL_PROTO_HDR_ETH,
  77	VIRTCHNL_PROTO_HDR_IPV4,
  78	VIRTCHNL_PROTO_HDR_SCTP,
  79	VIRTCHNL_PROTO_HDR_NONE,
  80};
  81
  82static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
  83	VIRTCHNL_PROTO_HDR_ETH,
  84	VIRTCHNL_PROTO_HDR_IPV6,
  85	VIRTCHNL_PROTO_HDR_NONE,
  86};
  87
  88static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
  89	VIRTCHNL_PROTO_HDR_ETH,
  90	VIRTCHNL_PROTO_HDR_IPV6,
  91	VIRTCHNL_PROTO_HDR_TCP,
  92	VIRTCHNL_PROTO_HDR_NONE,
  93};
  94
  95static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
  96	VIRTCHNL_PROTO_HDR_ETH,
  97	VIRTCHNL_PROTO_HDR_IPV6,
  98	VIRTCHNL_PROTO_HDR_UDP,
  99	VIRTCHNL_PROTO_HDR_NONE,
 100};
 101
 102static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
 103	VIRTCHNL_PROTO_HDR_ETH,
 104	VIRTCHNL_PROTO_HDR_IPV6,
 105	VIRTCHNL_PROTO_HDR_SCTP,
 106	VIRTCHNL_PROTO_HDR_NONE,
 107};
 108
 109static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
 110	VIRTCHNL_PROTO_HDR_ETH,
 111	VIRTCHNL_PROTO_HDR_IPV4,
 112	VIRTCHNL_PROTO_HDR_UDP,
 113	VIRTCHNL_PROTO_HDR_GTPU_IP,
 114	VIRTCHNL_PROTO_HDR_NONE,
 115};
 116
 117static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
 118	VIRTCHNL_PROTO_HDR_ETH,
 119	VIRTCHNL_PROTO_HDR_IPV4,
 120	VIRTCHNL_PROTO_HDR_UDP,
 121	VIRTCHNL_PROTO_HDR_GTPU_IP,
 122	VIRTCHNL_PROTO_HDR_GTPU_EH,
 123	VIRTCHNL_PROTO_HDR_NONE,
 124};
 125
 126static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
 127	VIRTCHNL_PROTO_HDR_ETH,
 128	VIRTCHNL_PROTO_HDR_IPV4,
 129	VIRTCHNL_PROTO_HDR_L2TPV3,
 130	VIRTCHNL_PROTO_HDR_NONE,
 131};
 132
 133static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
 134	VIRTCHNL_PROTO_HDR_ETH,
 135	VIRTCHNL_PROTO_HDR_IPV6,
 136	VIRTCHNL_PROTO_HDR_L2TPV3,
 137	VIRTCHNL_PROTO_HDR_NONE,
 138};
 139
 140static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
 141	VIRTCHNL_PROTO_HDR_ETH,
 142	VIRTCHNL_PROTO_HDR_IPV4,
 143	VIRTCHNL_PROTO_HDR_ESP,
 144	VIRTCHNL_PROTO_HDR_NONE,
 145};
 146
 147static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
 148	VIRTCHNL_PROTO_HDR_ETH,
 149	VIRTCHNL_PROTO_HDR_IPV6,
 150	VIRTCHNL_PROTO_HDR_ESP,
 151	VIRTCHNL_PROTO_HDR_NONE,
 152};
 153
 154static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
 155	VIRTCHNL_PROTO_HDR_ETH,
 156	VIRTCHNL_PROTO_HDR_IPV4,
 157	VIRTCHNL_PROTO_HDR_AH,
 158	VIRTCHNL_PROTO_HDR_NONE,
 159};
 160
 161static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
 162	VIRTCHNL_PROTO_HDR_ETH,
 163	VIRTCHNL_PROTO_HDR_IPV6,
 164	VIRTCHNL_PROTO_HDR_AH,
 165	VIRTCHNL_PROTO_HDR_NONE,
 166};
 167
 168static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
 169	VIRTCHNL_PROTO_HDR_ETH,
 170	VIRTCHNL_PROTO_HDR_IPV4,
 171	VIRTCHNL_PROTO_HDR_UDP,
 172	VIRTCHNL_PROTO_HDR_ESP,
 173	VIRTCHNL_PROTO_HDR_NONE,
 174};
 175
 176static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
 177	VIRTCHNL_PROTO_HDR_ETH,
 178	VIRTCHNL_PROTO_HDR_IPV6,
 179	VIRTCHNL_PROTO_HDR_UDP,
 180	VIRTCHNL_PROTO_HDR_ESP,
 181	VIRTCHNL_PROTO_HDR_NONE,
 182};
 183
 184static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
 185	VIRTCHNL_PROTO_HDR_ETH,
 186	VIRTCHNL_PROTO_HDR_IPV4,
 187	VIRTCHNL_PROTO_HDR_UDP,
 188	VIRTCHNL_PROTO_HDR_PFCP,
 189	VIRTCHNL_PROTO_HDR_NONE,
 190};
 191
 192static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
 193	VIRTCHNL_PROTO_HDR_ETH,
 194	VIRTCHNL_PROTO_HDR_IPV6,
 195	VIRTCHNL_PROTO_HDR_UDP,
 196	VIRTCHNL_PROTO_HDR_PFCP,
 197	VIRTCHNL_PROTO_HDR_NONE,
 198};
 199
 200struct virtchnl_fdir_pattern_match_item {
 201	enum virtchnl_proto_hdr_type *list;
 202	u64 input_set;
 203	u64 *meta;
 204};
 205
 206static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
 207	{vc_pattern_ipv4,                     0,         NULL},
 208	{vc_pattern_ipv4_tcp,                 0,         NULL},
 209	{vc_pattern_ipv4_udp,                 0,         NULL},
 210	{vc_pattern_ipv4_sctp,                0,         NULL},
 211	{vc_pattern_ipv6,                     0,         NULL},
 212	{vc_pattern_ipv6_tcp,                 0,         NULL},
 213	{vc_pattern_ipv6_udp,                 0,         NULL},
 214	{vc_pattern_ipv6_sctp,                0,         NULL},
 215};
 216
 217static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
 218	{vc_pattern_ipv4,                     0,         NULL},
 219	{vc_pattern_ipv4_tcp,                 0,         NULL},
 220	{vc_pattern_ipv4_udp,                 0,         NULL},
 221	{vc_pattern_ipv4_sctp,                0,         NULL},
 222	{vc_pattern_ipv6,                     0,         NULL},
 223	{vc_pattern_ipv6_tcp,                 0,         NULL},
 224	{vc_pattern_ipv6_udp,                 0,         NULL},
 225	{vc_pattern_ipv6_sctp,                0,         NULL},
 226	{vc_pattern_ether,                    0,         NULL},
 227	{vc_pattern_ipv4_gtpu,                0,         NULL},
 228	{vc_pattern_ipv4_gtpu_eh,             0,         NULL},
 229	{vc_pattern_ipv4_l2tpv3,              0,         NULL},
 230	{vc_pattern_ipv6_l2tpv3,              0,         NULL},
 231	{vc_pattern_ipv4_esp,                 0,         NULL},
 232	{vc_pattern_ipv6_esp,                 0,         NULL},
 233	{vc_pattern_ipv4_ah,                  0,         NULL},
 234	{vc_pattern_ipv6_ah,                  0,         NULL},
 235	{vc_pattern_ipv4_nat_t_esp,           0,         NULL},
 236	{vc_pattern_ipv6_nat_t_esp,           0,         NULL},
 237	{vc_pattern_ipv4_pfcp,                0,         NULL},
 238	{vc_pattern_ipv6_pfcp,                0,         NULL},
 239};
 240
 241struct virtchnl_fdir_inset_map {
 242	enum virtchnl_proto_hdr_field field;
 243	enum ice_flow_field fld;
 244	u64 flag;
 245	u64 mask;
 246};
 247
 248static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
 249	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
 250	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
 251	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
 252	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
 253	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
 254	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
 255	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
 256	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
 257	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
 258	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
 259	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
 260	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
 261	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
 262	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
 263	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
 264	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
 265	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
 266	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
 267	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
 268	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
 269		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
 270	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
 271		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
 272	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
 273	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
 274	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
 275};
 276
 277/**
 278 * ice_vc_fdir_param_check
 279 * @vf: pointer to the VF structure
 280 * @vsi_id: VF relative VSI ID
 281 *
 282 * Check for the valid VSI ID, PF's state and VF's state
 283 *
 284 * Return: 0 on success, and -EINVAL on error.
 285 */
 286static int
 287ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
 288{
 289	struct ice_pf *pf = vf->pf;
 290
 291	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
 292		return -EINVAL;
 293
 294	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
 295		return -EINVAL;
 296
 297	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
 298		return -EINVAL;
 299
 300	if (vsi_id != vf->lan_vsi_num)
 301		return -EINVAL;
 302
 303	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
 304		return -EINVAL;
 305
 306	if (!pf->vsi[vf->lan_vsi_idx])
 307		return -EINVAL;
 308
 309	return 0;
 310}
 311
 312/**
 313 * ice_vf_start_ctrl_vsi
 314 * @vf: pointer to the VF structure
 315 *
 316 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
 317 *
 318 * Return: 0 on success, and other on error.
 319 */
 320static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
 321{
 322	struct ice_pf *pf = vf->pf;
 323	struct ice_vsi *ctrl_vsi;
 324	struct device *dev;
 325	int err;
 326
 327	dev = ice_pf_to_dev(pf);
 328	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 329		return -EEXIST;
 330
 331	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
 332	if (!ctrl_vsi) {
 333		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
 334			vf->vf_id);
 335		return -ENOMEM;
 336	}
 337
 338	err = ice_vsi_open_ctrl(ctrl_vsi);
 339	if (err) {
 340		dev_dbg(dev, "Could not open control VSI for VF %d\n",
 341			vf->vf_id);
 342		goto err_vsi_open;
 343	}
 344
 345	return 0;
 346
 347err_vsi_open:
 348	ice_vsi_release(ctrl_vsi);
 349	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
 350		pf->vsi[vf->ctrl_vsi_idx] = NULL;
 351		vf->ctrl_vsi_idx = ICE_NO_VSI;
 352	}
 353	return err;
 354}
 355
 356/**
 357 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
 358 * @vf: pointer to the VF structure
 359 * @flow: filter flow type
 360 *
 361 * Return: 0 on success, and other on error.
 362 */
 363static int
 364ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 365{
 366	struct ice_vf_fdir *fdir = &vf->fdir;
 367
 368	if (!fdir->fdir_prof) {
 369		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
 370					       ICE_FLTR_PTYPE_MAX,
 371					       sizeof(*fdir->fdir_prof),
 372					       GFP_KERNEL);
 373		if (!fdir->fdir_prof)
 374			return -ENOMEM;
 375	}
 376
 377	if (!fdir->fdir_prof[flow]) {
 378		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
 379						     sizeof(**fdir->fdir_prof),
 380						     GFP_KERNEL);
 381		if (!fdir->fdir_prof[flow])
 382			return -ENOMEM;
 383	}
 384
 385	return 0;
 386}
 387
 388/**
 389 * ice_vc_fdir_free_prof - free profile for this filter flow type
 390 * @vf: pointer to the VF structure
 391 * @flow: filter flow type
 392 */
 393static void
 394ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
 395{
 396	struct ice_vf_fdir *fdir = &vf->fdir;
 397
 398	if (!fdir->fdir_prof)
 399		return;
 400
 401	if (!fdir->fdir_prof[flow])
 402		return;
 403
 404	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
 405	fdir->fdir_prof[flow] = NULL;
 406}
 407
 408/**
 409 * ice_vc_fdir_free_prof_all - free all the profile for this VF
 410 * @vf: pointer to the VF structure
 411 */
 412static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
 413{
 414	struct ice_vf_fdir *fdir = &vf->fdir;
 415	enum ice_fltr_ptype flow;
 416
 417	if (!fdir->fdir_prof)
 418		return;
 419
 420	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
 421		ice_vc_fdir_free_prof(vf, flow);
 422
 423	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
 424	fdir->fdir_prof = NULL;
 425}
 426
 427/**
 428 * ice_vc_fdir_parse_flow_fld
 429 * @proto_hdr: virtual channel protocol filter header
 430 * @conf: FDIR configuration for each filter
 431 * @fld: field type array
 432 * @fld_cnt: field counter
 433 *
 434 * Parse the virtual channel filter header and store them into field type array
 435 *
 436 * Return: 0 on success, and other on error.
 437 */
 438static int
 439ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
 440			   struct virtchnl_fdir_fltr_conf *conf,
 441			   enum ice_flow_field *fld, int *fld_cnt)
 442{
 443	struct virtchnl_proto_hdr hdr;
 444	u32 i;
 445
 446	memcpy(&hdr, proto_hdr, sizeof(hdr));
 447
 448	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
 449	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
 450		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
 451			if (fdir_inset_map[i].mask &&
 452			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
 453			     fdir_inset_map[i].flag))
 454				continue;
 455
 456			fld[*fld_cnt] = fdir_inset_map[i].fld;
 457			*fld_cnt += 1;
 458			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
 459				return -EINVAL;
 460			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
 461						     fdir_inset_map[i].field);
 462		}
 463
 464	return 0;
 465}
 466
 467/**
 468 * ice_vc_fdir_set_flow_fld
 469 * @vf: pointer to the VF structure
 470 * @fltr: virtual channel add cmd buffer
 471 * @conf: FDIR configuration for each filter
 472 * @seg: array of one or more packet segments that describe the flow
 473 *
 474 * Parse the virtual channel add msg buffer's field vector and store them into
 475 * flow's packet segment field
 476 *
 477 * Return: 0 on success, and other on error.
 478 */
 479static int
 480ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 481			 struct virtchnl_fdir_fltr_conf *conf,
 482			 struct ice_flow_seg_info *seg)
 483{
 484	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
 485	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
 486	struct device *dev = ice_pf_to_dev(vf->pf);
 487	struct virtchnl_proto_hdrs *proto;
 488	int fld_cnt = 0;
 489	int i;
 490
 491	proto = &rule->proto_hdrs;
 492	for (i = 0; i < proto->count; i++) {
 493		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
 494		int ret;
 495
 496		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
 497		if (ret)
 498			return ret;
 499	}
 500
 501	if (fld_cnt == 0) {
 502		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
 503		return -EINVAL;
 504	}
 505
 506	for (i = 0; i < fld_cnt; i++)
 507		ice_flow_set_fld(seg, fld[i],
 508				 ICE_FLOW_FLD_OFF_INVAL,
 509				 ICE_FLOW_FLD_OFF_INVAL,
 510				 ICE_FLOW_FLD_OFF_INVAL, false);
 511
 512	return 0;
 513}
 514
 515/**
 516 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
 517 * @vf: pointer to the VF structure
 518 * @conf: FDIR configuration for each filter
 519 * @seg: array of one or more packet segments that describe the flow
 520 *
 521 * Return: 0 on success, and other on error.
 522 */
 523static int
 524ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
 525			 struct virtchnl_fdir_fltr_conf *conf,
 526			 struct ice_flow_seg_info *seg)
 527{
 528	enum ice_fltr_ptype flow = conf->input.flow_type;
 529	enum ice_fdir_tunnel_type ttype = conf->ttype;
 530	struct device *dev = ice_pf_to_dev(vf->pf);
 531
 532	switch (flow) {
 533	case ICE_FLTR_PTYPE_NON_IP_L2:
 534		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
 535		break;
 536	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
 537		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 538				  ICE_FLOW_SEG_HDR_IPV4 |
 539				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 540		break;
 541	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
 542		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 543				  ICE_FLOW_SEG_HDR_IPV4 |
 544				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 545		break;
 546	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
 547		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 548				  ICE_FLOW_SEG_HDR_IPV4 |
 549				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 550		break;
 551	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
 552		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 553				  ICE_FLOW_SEG_HDR_IPV4 |
 554				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 555		break;
 556	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
 557		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 558				  ICE_FLOW_SEG_HDR_IPV4 |
 559				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 560		break;
 561	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
 562		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 563				  ICE_FLOW_SEG_HDR_IPV4 |
 564				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 565		break;
 566	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
 567		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
 568				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 569		break;
 570	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
 571		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 572				  ICE_FLOW_SEG_HDR_IPV4 |
 573				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 574		break;
 575	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
 576		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 577				  ICE_FLOW_SEG_HDR_IPV4 |
 578				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 579		break;
 580	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
 581	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
 582	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
 583	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
 584		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
 585			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
 586					  ICE_FLOW_SEG_HDR_IPV4 |
 587					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 588		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
 589			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
 590					  ICE_FLOW_SEG_HDR_GTPU_IP |
 591					  ICE_FLOW_SEG_HDR_IPV4 |
 592					  ICE_FLOW_SEG_HDR_IPV_OTHER);
 593		} else {
 594			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
 595				flow, vf->vf_id);
 596			return -EINVAL;
 597		}
 598		break;
 599	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
 600		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 601				  ICE_FLOW_SEG_HDR_IPV4 |
 602				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 603		break;
 604	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
 605		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
 606				  ICE_FLOW_SEG_HDR_IPV6 |
 607				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 608		break;
 609	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
 610		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
 611				  ICE_FLOW_SEG_HDR_IPV6 |
 612				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 613		break;
 614	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
 615		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
 616				  ICE_FLOW_SEG_HDR_IPV6 |
 617				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 618		break;
 619	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
 620		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
 621				  ICE_FLOW_SEG_HDR_IPV6 |
 622				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 623		break;
 624	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
 625		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
 626				  ICE_FLOW_SEG_HDR_IPV6 |
 627				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 628		break;
 629	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
 630		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
 631				  ICE_FLOW_SEG_HDR_IPV6 |
 632				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 633		break;
 634	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
 635		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
 636				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 637		break;
 638	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
 639		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
 640				  ICE_FLOW_SEG_HDR_IPV6 |
 641				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 642		break;
 643	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
 644		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
 645				  ICE_FLOW_SEG_HDR_IPV6 |
 646				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 647		break;
 648	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
 649		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
 650				  ICE_FLOW_SEG_HDR_IPV6 |
 651				  ICE_FLOW_SEG_HDR_IPV_OTHER);
 652		break;
 653	default:
 654		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
 655			flow, vf->vf_id);
 656		return -EINVAL;
 657	}
 658
 659	return 0;
 660}
 661
 662/**
 663 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
 664 * @vf: pointer to the VF structure
 665 * @flow: filter flow type
 666 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 667 */
 668static void
 669ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
 670{
 671	struct ice_vf_fdir *fdir = &vf->fdir;
 672	struct ice_fd_hw_prof *vf_prof;
 673	struct ice_pf *pf = vf->pf;
 674	struct ice_vsi *vf_vsi;
 675	struct device *dev;
 676	struct ice_hw *hw;
 677	u64 prof_id;
 678	int i;
 679
 680	dev = ice_pf_to_dev(pf);
 681	hw = &pf->hw;
 682	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
 683		return;
 684
 685	vf_prof = fdir->fdir_prof[flow];
 
 686
 687	vf_vsi = pf->vsi[vf->lan_vsi_idx];
 688	if (!vf_vsi) {
 689		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
 690		return;
 691	}
 692
 693	if (!fdir->prof_entry_cnt[flow][tun])
 694		return;
 695
 696	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
 697				   flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
 698
 699	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
 700		if (vf_prof->entry_h[i][tun]) {
 701			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
 702
 703			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
 704			ice_flow_rem_entry(hw, ICE_BLK_FD,
 705					   vf_prof->entry_h[i][tun]);
 706			vf_prof->entry_h[i][tun] = 0;
 707		}
 708
 709	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 710	devm_kfree(dev, vf_prof->fdir_seg[tun]);
 711	vf_prof->fdir_seg[tun] = NULL;
 712
 713	for (i = 0; i < vf_prof->cnt; i++)
 714		vf_prof->vsi_h[i] = 0;
 715
 716	fdir->prof_entry_cnt[flow][tun] = 0;
 717}
 718
 719/**
 720 * ice_vc_fdir_rem_prof_all - remove profile for this VF
 721 * @vf: pointer to the VF structure
 722 */
 723static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
 724{
 725	enum ice_fltr_ptype flow;
 726
 727	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
 728	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
 729		ice_vc_fdir_rem_prof(vf, flow, 0);
 730		ice_vc_fdir_rem_prof(vf, flow, 1);
 731	}
 732}
 733
 734/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735 * ice_vc_fdir_write_flow_prof
 736 * @vf: pointer to the VF structure
 737 * @flow: filter flow type
 738 * @seg: array of one or more packet segments that describe the flow
 739 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 740 *
 741 * Write the flow's profile config and packet segment into the hardware
 742 *
 743 * Return: 0 on success, and other on error.
 744 */
 745static int
 746ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
 747			    struct ice_flow_seg_info *seg, int tun)
 748{
 749	struct ice_vf_fdir *fdir = &vf->fdir;
 750	struct ice_vsi *vf_vsi, *ctrl_vsi;
 751	struct ice_flow_seg_info *old_seg;
 752	struct ice_flow_prof *prof = NULL;
 753	struct ice_fd_hw_prof *vf_prof;
 754	enum ice_status status;
 755	struct device *dev;
 756	struct ice_pf *pf;
 757	struct ice_hw *hw;
 758	u64 entry1_h = 0;
 759	u64 entry2_h = 0;
 760	u64 prof_id;
 761	int ret;
 762
 763	pf = vf->pf;
 764	dev = ice_pf_to_dev(pf);
 765	hw = &pf->hw;
 766	vf_vsi = pf->vsi[vf->lan_vsi_idx];
 767	if (!vf_vsi)
 768		return -EINVAL;
 769
 770	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
 771	if (!ctrl_vsi)
 772		return -EINVAL;
 773
 774	vf_prof = fdir->fdir_prof[flow];
 775	old_seg = vf_prof->fdir_seg[tun];
 776	if (old_seg) {
 777		if (!memcmp(old_seg, seg, sizeof(*seg))) {
 778			dev_dbg(dev, "Duplicated profile for VF %d!\n",
 779				vf->vf_id);
 780			return -EEXIST;
 781		}
 782
 783		if (fdir->fdir_fltr_cnt[flow][tun]) {
 784			ret = -EINVAL;
 785			dev_dbg(dev, "Input set conflicts for VF %d\n",
 786				vf->vf_id);
 787			goto err_exit;
 788		}
 789
 790		/* remove previously allocated profile */
 791		ice_vc_fdir_rem_prof(vf, flow, tun);
 792	}
 793
 794	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
 795				   tun ? ICE_FLTR_PTYPE_MAX : 0);
 796
 797	status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
 798				   tun + 1, &prof);
 799	ret = ice_status_to_errno(status);
 800	if (ret) {
 801		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
 802			flow, vf->vf_id);
 803		goto err_exit;
 804	}
 805
 806	status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
 807				    vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 808				    seg, &entry1_h);
 809	ret = ice_status_to_errno(status);
 810	if (ret) {
 811		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
 812			flow, vf->vf_id);
 813		goto err_prof;
 814	}
 815
 816	status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
 817				    ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 818				    seg, &entry2_h);
 819	ret = ice_status_to_errno(status);
 820	if (ret) {
 821		dev_dbg(dev,
 822			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
 823			flow, vf->vf_id);
 824		goto err_entry_1;
 825	}
 826
 827	vf_prof->fdir_seg[tun] = seg;
 828	vf_prof->cnt = 0;
 829	fdir->prof_entry_cnt[flow][tun] = 0;
 830
 831	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
 832	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
 833	vf_prof->cnt++;
 834	fdir->prof_entry_cnt[flow][tun]++;
 835
 836	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
 837	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
 838	vf_prof->cnt++;
 839	fdir->prof_entry_cnt[flow][tun]++;
 840
 
 
 841	return 0;
 842
 843err_entry_1:
 844	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 845			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
 846	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 847err_prof:
 848	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 849err_exit:
 850	return ret;
 851}
 852
 853/**
 854 * ice_vc_fdir_config_input_set
 855 * @vf: pointer to the VF structure
 856 * @fltr: virtual channel add cmd buffer
 857 * @conf: FDIR configuration for each filter
 858 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
 859 *
 860 * Config the input set type and value for virtual channel add msg buffer
 861 *
 862 * Return: 0 on success, and other on error.
 863 */
 864static int
 865ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
 866			     struct virtchnl_fdir_fltr_conf *conf, int tun)
 867{
 868	struct ice_fdir_fltr *input = &conf->input;
 869	struct device *dev = ice_pf_to_dev(vf->pf);
 870	struct ice_flow_seg_info *seg;
 871	enum ice_fltr_ptype flow;
 872	int ret;
 873
 
 
 
 
 
 
 
 874	flow = input->flow_type;
 875	ret = ice_vc_fdir_alloc_prof(vf, flow);
 876	if (ret) {
 877		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
 878		return ret;
 879	}
 880
 881	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 882	if (!seg)
 883		return -ENOMEM;
 884
 885	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
 886	if (ret) {
 887		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
 888		goto err_exit;
 889	}
 890
 891	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
 892	if (ret) {
 893		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
 894		goto err_exit;
 895	}
 896
 897	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
 898	if (ret == -EEXIST) {
 899		devm_kfree(dev, seg);
 900	} else if (ret) {
 901		dev_dbg(dev, "Write flow profile for VF %d failed\n",
 902			vf->vf_id);
 903		goto err_exit;
 904	}
 905
 906	return 0;
 907
 908err_exit:
 909	devm_kfree(dev, seg);
 910	return ret;
 911}
 912
 913/**
 914 * ice_vc_fdir_match_pattern
 915 * @fltr: virtual channel add cmd buffer
 916 * @type: virtual channel protocol filter header type
 917 *
 918 * Matching the header type by comparing fltr and type's value.
 919 *
 920 * Return: true on success, and false on error.
 921 */
 922static bool
 923ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
 924			  enum virtchnl_proto_hdr_type *type)
 925{
 926	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
 927	int i = 0;
 928
 929	while ((i < proto->count) &&
 930	       (*type == proto->proto_hdr[i].type) &&
 931	       (*type != VIRTCHNL_PROTO_HDR_NONE)) {
 932		type++;
 933		i++;
 934	}
 935
 936	return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
 937}
 938
 939/**
 940 * ice_vc_fdir_get_pattern - get while list pattern
 941 * @vf: pointer to the VF info
 942 * @len: filter list length
 943 *
 944 * Return: pointer to allowed filter list
 945 */
 946static const struct virtchnl_fdir_pattern_match_item *
 947ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
 948{
 949	const struct virtchnl_fdir_pattern_match_item *item;
 950	struct ice_pf *pf = vf->pf;
 951	struct ice_hw *hw;
 952
 953	hw = &pf->hw;
 954	if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
 955		     sizeof(hw->active_pkg_name))) {
 956		item = vc_fdir_pattern_comms;
 957		*len = ARRAY_SIZE(vc_fdir_pattern_comms);
 958	} else {
 959		item = vc_fdir_pattern_os;
 960		*len = ARRAY_SIZE(vc_fdir_pattern_os);
 961	}
 962
 963	return item;
 964}
 965
 966/**
 967 * ice_vc_fdir_search_pattern
 968 * @vf: pointer to the VF info
 969 * @fltr: virtual channel add cmd buffer
 970 *
 971 * Search for matched pattern from supported pattern list
 972 *
 973 * Return: 0 on success, and other on error.
 974 */
 975static int
 976ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
 977{
 978	const struct virtchnl_fdir_pattern_match_item *pattern;
 979	int len, i;
 980
 981	pattern = ice_vc_fdir_get_pattern(vf, &len);
 982
 983	for (i = 0; i < len; i++)
 984		if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
 985			return 0;
 986
 987	return -EINVAL;
 988}
 989
 990/**
 991 * ice_vc_fdir_parse_pattern
 992 * @vf: pointer to the VF info
 993 * @fltr: virtual channel add cmd buffer
 994 * @conf: FDIR configuration for each filter
 995 *
 996 * Parse the virtual channel filter's pattern and store them into conf
 997 *
 998 * Return: 0 on success, and other on error.
 999 */
1000static int
1001ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1002			  struct virtchnl_fdir_fltr_conf *conf)
1003{
1004	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1005	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
1006	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
1007	struct device *dev = ice_pf_to_dev(vf->pf);
1008	struct ice_fdir_fltr *input = &conf->input;
1009	int i;
1010
1011	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1012		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
1013			proto->count, vf->vf_id);
1014		return -EINVAL;
1015	}
1016
1017	for (i = 0; i < proto->count; i++) {
1018		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
1019		struct ip_esp_hdr *esph;
1020		struct ip_auth_hdr *ah;
1021		struct sctphdr *sctph;
1022		struct ipv6hdr *ip6h;
1023		struct udphdr *udph;
1024		struct tcphdr *tcph;
1025		struct ethhdr *eth;
1026		struct iphdr *iph;
1027		u8 s_field;
1028		u8 *rawh;
1029
1030		switch (hdr->type) {
1031		case VIRTCHNL_PROTO_HDR_ETH:
1032			eth = (struct ethhdr *)hdr->buffer;
1033			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1034
1035			if (hdr->field_selector)
1036				input->ext_data.ether_type = eth->h_proto;
1037			break;
1038		case VIRTCHNL_PROTO_HDR_IPV4:
1039			iph = (struct iphdr *)hdr->buffer;
1040			l3 = VIRTCHNL_PROTO_HDR_IPV4;
1041			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1042
1043			if (hdr->field_selector) {
1044				input->ip.v4.src_ip = iph->saddr;
1045				input->ip.v4.dst_ip = iph->daddr;
1046				input->ip.v4.tos = iph->tos;
1047				input->ip.v4.proto = iph->protocol;
1048			}
1049			break;
1050		case VIRTCHNL_PROTO_HDR_IPV6:
1051			ip6h = (struct ipv6hdr *)hdr->buffer;
1052			l3 = VIRTCHNL_PROTO_HDR_IPV6;
1053			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1054
1055			if (hdr->field_selector) {
1056				memcpy(input->ip.v6.src_ip,
1057				       ip6h->saddr.in6_u.u6_addr8,
1058				       sizeof(ip6h->saddr));
1059				memcpy(input->ip.v6.dst_ip,
1060				       ip6h->daddr.in6_u.u6_addr8,
1061				       sizeof(ip6h->daddr));
1062				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
1063						  (ip6h->flow_lbl[0] >> 4);
1064				input->ip.v6.proto = ip6h->nexthdr;
1065			}
1066			break;
1067		case VIRTCHNL_PROTO_HDR_TCP:
1068			tcph = (struct tcphdr *)hdr->buffer;
1069			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1070				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1071			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1072				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1073
1074			if (hdr->field_selector) {
1075				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1076					input->ip.v4.src_port = tcph->source;
1077					input->ip.v4.dst_port = tcph->dest;
1078				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1079					input->ip.v6.src_port = tcph->source;
1080					input->ip.v6.dst_port = tcph->dest;
1081				}
1082			}
1083			break;
1084		case VIRTCHNL_PROTO_HDR_UDP:
1085			udph = (struct udphdr *)hdr->buffer;
1086			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1087				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1088			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1089				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1090
1091			if (hdr->field_selector) {
1092				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1093					input->ip.v4.src_port = udph->source;
1094					input->ip.v4.dst_port = udph->dest;
1095				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1096					input->ip.v6.src_port = udph->source;
1097					input->ip.v6.dst_port = udph->dest;
1098				}
1099			}
1100			break;
1101		case VIRTCHNL_PROTO_HDR_SCTP:
1102			sctph = (struct sctphdr *)hdr->buffer;
1103			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1104				input->flow_type =
1105					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1106			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1107				input->flow_type =
1108					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1109
1110			if (hdr->field_selector) {
1111				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1112					input->ip.v4.src_port = sctph->source;
1113					input->ip.v4.dst_port = sctph->dest;
1114				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1115					input->ip.v6.src_port = sctph->source;
1116					input->ip.v6.dst_port = sctph->dest;
1117				}
1118			}
1119			break;
1120		case VIRTCHNL_PROTO_HDR_L2TPV3:
1121			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1122				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
1123			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1124				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
1125
1126			if (hdr->field_selector)
1127				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
1128			break;
1129		case VIRTCHNL_PROTO_HDR_ESP:
1130			esph = (struct ip_esp_hdr *)hdr->buffer;
1131			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1132			    l4 == VIRTCHNL_PROTO_HDR_UDP)
1133				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
1134			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1135				 l4 == VIRTCHNL_PROTO_HDR_UDP)
1136				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
1137			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1138				 l4 == VIRTCHNL_PROTO_HDR_NONE)
1139				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
1140			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1141				 l4 == VIRTCHNL_PROTO_HDR_NONE)
1142				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
1143
1144			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
1145				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
1146			else
1147				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
1148
1149			if (hdr->field_selector) {
1150				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1151					input->ip.v4.sec_parm_idx = esph->spi;
1152				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1153					input->ip.v6.sec_parm_idx = esph->spi;
1154			}
1155			break;
1156		case VIRTCHNL_PROTO_HDR_AH:
1157			ah = (struct ip_auth_hdr *)hdr->buffer;
1158			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1159				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
1160			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1161				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
1162
1163			if (hdr->field_selector) {
1164				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1165					input->ip.v4.sec_parm_idx = ah->spi;
1166				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1167					input->ip.v6.sec_parm_idx = ah->spi;
1168			}
1169			break;
1170		case VIRTCHNL_PROTO_HDR_PFCP:
1171			rawh = (u8 *)hdr->buffer;
1172			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
1173			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
1174				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
1175			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
1176				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
1177			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
1178				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
1179			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
1180				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
1181
1182			if (hdr->field_selector) {
1183				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1184					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1185				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1186					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1187			}
1188			break;
1189		case VIRTCHNL_PROTO_HDR_GTPU_IP:
1190			rawh = (u8 *)hdr->buffer;
1191			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1192
1193			if (hdr->field_selector)
1194				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1195			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1196			break;
1197		case VIRTCHNL_PROTO_HDR_GTPU_EH:
1198			rawh = (u8 *)hdr->buffer;
1199
1200			if (hdr->field_selector)
1201				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1202			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1203			break;
1204		default:
1205			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1206				hdr->type, vf->vf_id);
1207			return -EINVAL;
1208		}
1209	}
1210
1211	return 0;
1212}
1213
1214/**
1215 * ice_vc_fdir_parse_action
1216 * @vf: pointer to the VF info
1217 * @fltr: virtual channel add cmd buffer
1218 * @conf: FDIR configuration for each filter
1219 *
1220 * Parse the virtual channel filter's action and store them into conf
1221 *
1222 * Return: 0 on success, and other on error.
1223 */
1224static int
1225ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1226			 struct virtchnl_fdir_fltr_conf *conf)
1227{
1228	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1229	struct device *dev = ice_pf_to_dev(vf->pf);
1230	struct ice_fdir_fltr *input = &conf->input;
1231	u32 dest_num = 0;
1232	u32 mark_num = 0;
1233	int i;
1234
1235	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1236		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1237			as->count, vf->vf_id);
1238		return -EINVAL;
1239	}
1240
1241	for (i = 0; i < as->count; i++) {
1242		struct virtchnl_filter_action *action = &as->actions[i];
1243
1244		switch (action->type) {
1245		case VIRTCHNL_ACTION_PASSTHRU:
1246			dest_num++;
1247			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1248			break;
1249		case VIRTCHNL_ACTION_DROP:
1250			dest_num++;
1251			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1252			break;
1253		case VIRTCHNL_ACTION_QUEUE:
1254			dest_num++;
1255			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1256			input->q_index = action->act_conf.queue.index;
1257			break;
1258		case VIRTCHNL_ACTION_Q_REGION:
1259			dest_num++;
1260			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1261			input->q_index = action->act_conf.queue.index;
1262			input->q_region = action->act_conf.queue.region;
1263			break;
1264		case VIRTCHNL_ACTION_MARK:
1265			mark_num++;
1266			input->fltr_id = action->act_conf.mark_id;
1267			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1268			break;
1269		default:
1270			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1271				action->type, vf->vf_id);
1272			return -EINVAL;
1273		}
1274	}
1275
1276	if (dest_num == 0 || dest_num >= 2) {
1277		dev_dbg(dev, "Invalid destination action for VF %d\n",
1278			vf->vf_id);
1279		return -EINVAL;
1280	}
1281
1282	if (mark_num >= 2) {
1283		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1284		return -EINVAL;
1285	}
1286
1287	return 0;
1288}
1289
1290/**
1291 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1292 * @vf: pointer to the VF info
1293 * @fltr: virtual channel add cmd buffer
1294 * @conf: FDIR configuration for each filter
1295 *
1296 * Return: 0 on success, and other on error.
1297 */
1298static int
1299ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1300			  struct virtchnl_fdir_fltr_conf *conf)
1301{
 
1302	int ret;
1303
1304	ret = ice_vc_fdir_search_pattern(vf, fltr);
1305	if (ret)
1306		return ret;
1307
1308	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1309	if (ret)
1310		return ret;
1311
1312	return ice_vc_fdir_parse_action(vf, fltr, conf);
1313}
1314
1315/**
1316 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1317 * @conf_a: FDIR configuration for filter a
1318 * @conf_b: FDIR configuration for filter b
1319 *
1320 * Return: 0 on success, and other on error.
1321 */
1322static bool
1323ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1324		       struct virtchnl_fdir_fltr_conf *conf_b)
1325{
1326	struct ice_fdir_fltr *a = &conf_a->input;
1327	struct ice_fdir_fltr *b = &conf_b->input;
1328
1329	if (conf_a->ttype != conf_b->ttype)
1330		return false;
1331	if (a->flow_type != b->flow_type)
1332		return false;
1333	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1334		return false;
1335	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1336		return false;
1337	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1338		return false;
1339	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1340		return false;
1341	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1342		return false;
1343	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1344		return false;
1345	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1346		return false;
1347	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1348		return false;
1349
1350	return true;
1351}
1352
1353/**
1354 * ice_vc_fdir_is_dup_fltr
1355 * @vf: pointer to the VF info
1356 * @conf: FDIR configuration for each filter
1357 *
1358 * Check if there is duplicated rule with same conf value
1359 *
1360 * Return: 0 true success, and false on error.
1361 */
1362static bool
1363ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1364{
1365	struct ice_fdir_fltr *desc;
1366	bool ret;
1367
1368	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1369		struct virtchnl_fdir_fltr_conf *node =
1370				to_fltr_conf_from_desc(desc);
1371
1372		ret = ice_vc_fdir_comp_rules(node, conf);
1373		if (ret)
1374			return true;
1375	}
1376
1377	return false;
1378}
1379
1380/**
1381 * ice_vc_fdir_insert_entry
1382 * @vf: pointer to the VF info
1383 * @conf: FDIR configuration for each filter
1384 * @id: pointer to ID value allocated by driver
1385 *
1386 * Insert FDIR conf entry into list and allocate ID for this filter
1387 *
1388 * Return: 0 true success, and other on error.
1389 */
1390static int
1391ice_vc_fdir_insert_entry(struct ice_vf *vf,
1392			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1393{
1394	struct ice_fdir_fltr *input = &conf->input;
1395	int i;
1396
1397	/* alloc ID corresponding with conf */
1398	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1399		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1400	if (i < 0)
1401		return -EINVAL;
1402	*id = i;
1403
1404	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1405	return 0;
1406}
1407
1408/**
1409 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1410 * @vf: pointer to the VF info
1411 * @conf: FDIR configuration for each filter
1412 * @id: filter rule's ID
1413 */
1414static void
1415ice_vc_fdir_remove_entry(struct ice_vf *vf,
1416			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1417{
1418	struct ice_fdir_fltr *input = &conf->input;
1419
1420	idr_remove(&vf->fdir.fdir_rule_idr, id);
1421	list_del(&input->fltr_node);
1422}
1423
1424/**
1425 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1426 * @vf: pointer to the VF info
1427 * @id: filter rule's ID
1428 *
1429 * Return: NULL on error, and other on success.
1430 */
1431static struct virtchnl_fdir_fltr_conf *
1432ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1433{
1434	return idr_find(&vf->fdir.fdir_rule_idr, id);
1435}
1436
1437/**
1438 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1439 * @vf: pointer to the VF info
1440 */
1441static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1442{
1443	struct virtchnl_fdir_fltr_conf *conf;
1444	struct ice_fdir_fltr *desc, *temp;
1445
1446	list_for_each_entry_safe(desc, temp,
1447				 &vf->fdir.fdir_rule_list, fltr_node) {
1448		conf = to_fltr_conf_from_desc(desc);
1449		list_del(&desc->fltr_node);
1450		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1451	}
1452}
1453
1454/**
1455 * ice_vc_fdir_write_fltr - write filter rule into hardware
1456 * @vf: pointer to the VF info
1457 * @conf: FDIR configuration for each filter
1458 * @add: true implies add rule, false implies del rules
1459 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1460 *
1461 * Return: 0 on success, and other on error.
1462 */
1463static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1464				  struct virtchnl_fdir_fltr_conf *conf,
1465				  bool add, bool is_tun)
1466{
1467	struct ice_fdir_fltr *input = &conf->input;
1468	struct ice_vsi *vsi, *ctrl_vsi;
1469	struct ice_fltr_desc desc;
1470	enum ice_status status;
1471	struct device *dev;
1472	struct ice_pf *pf;
1473	struct ice_hw *hw;
1474	int ret;
1475	u8 *pkt;
1476
1477	pf = vf->pf;
1478	dev = ice_pf_to_dev(pf);
1479	hw = &pf->hw;
1480	vsi = pf->vsi[vf->lan_vsi_idx];
1481	if (!vsi) {
1482		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1483		return -EINVAL;
1484	}
1485
1486	input->dest_vsi = vsi->idx;
1487	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1488
1489	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1490	if (!ctrl_vsi) {
1491		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1492		return -EINVAL;
1493	}
1494
1495	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1496	if (!pkt)
1497		return -ENOMEM;
1498
1499	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1500	status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1501	ret = ice_status_to_errno(status);
1502	if (ret) {
1503		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1504			vf->vf_id, input->flow_type);
1505		goto err_free_pkt;
1506	}
1507
1508	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1509	if (ret)
1510		goto err_free_pkt;
1511
1512	return 0;
1513
1514err_free_pkt:
1515	devm_kfree(dev, pkt);
1516	return ret;
1517}
1518
1519/**
1520 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1521 * @t: pointer to timer_list
1522 */
1523static void ice_vf_fdir_timer(struct timer_list *t)
1524{
1525	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1526	struct ice_vf_fdir_ctx *ctx_done;
1527	struct ice_vf_fdir *fdir;
1528	unsigned long flags;
1529	struct ice_vf *vf;
1530	struct ice_pf *pf;
1531
1532	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1533	vf = container_of(fdir, struct ice_vf, fdir);
1534	ctx_done = &fdir->ctx_done;
1535	pf = vf->pf;
1536	spin_lock_irqsave(&fdir->ctx_lock, flags);
1537	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1538		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1539		WARN_ON_ONCE(1);
1540		return;
1541	}
1542
1543	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1544
1545	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1546	ctx_done->conf = ctx_irq->conf;
1547	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1548	ctx_done->v_opcode = ctx_irq->v_opcode;
1549	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1550
1551	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1552	ice_service_task_schedule(pf);
1553}
1554
1555/**
1556 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1557 * @ctrl_vsi: pointer to a VF's CTRL VSI
1558 * @rx_desc: pointer to FDIR Rx queue descriptor
1559 */
1560void
1561ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1562			union ice_32b_rx_flex_desc *rx_desc)
1563{
1564	struct ice_pf *pf = ctrl_vsi->back;
 
1565	struct ice_vf_fdir_ctx *ctx_done;
1566	struct ice_vf_fdir_ctx *ctx_irq;
1567	struct ice_vf_fdir *fdir;
1568	unsigned long flags;
1569	struct device *dev;
1570	struct ice_vf *vf;
1571	int ret;
1572
1573	vf = &pf->vf[ctrl_vsi->vf_id];
 
1574
1575	fdir = &vf->fdir;
1576	ctx_done = &fdir->ctx_done;
1577	ctx_irq = &fdir->ctx_irq;
1578	dev = ice_pf_to_dev(pf);
1579	spin_lock_irqsave(&fdir->ctx_lock, flags);
1580	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1581		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1582		WARN_ON_ONCE(1);
1583		return;
1584	}
1585
1586	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1587
1588	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1589	ctx_done->conf = ctx_irq->conf;
1590	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1591	ctx_done->v_opcode = ctx_irq->v_opcode;
1592	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1593	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1594
1595	ret = del_timer(&ctx_irq->rx_tmr);
1596	if (!ret)
1597		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1598
1599	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1600	ice_service_task_schedule(pf);
1601}
1602
1603/**
1604 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1605 * @vf: pointer to the VF info
1606 */
1607static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1608{
 
1609	struct ice_vsi *vf_vsi;
1610	u32 fd_size, fd_cnt;
1611	struct device *dev;
1612	struct ice_pf *pf;
1613	struct ice_hw *hw;
1614	u16 vsi_num;
1615
1616	pf = vf->pf;
1617	hw = &pf->hw;
1618	dev = ice_pf_to_dev(pf);
1619	vf_vsi = pf->vsi[vf->lan_vsi_idx];
 
 
 
 
 
1620	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1621
1622	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1623	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1624	dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
1625		vf->vf_id,
1626		(fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1627		(fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
1628		(fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1629		(fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
 
 
 
 
 
 
 
 
 
 
 
 
 
1630}
1631
1632/**
1633 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1634 * @vf: pointer to the VF info
1635 * @ctx: FDIR context info for post processing
1636 * @status: virtchnl FDIR program status
1637 *
1638 * Return: 0 on success, and other on error.
1639 */
1640static int
1641ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1642		      enum virtchnl_fdir_prgm_status *status)
1643{
1644	struct device *dev = ice_pf_to_dev(vf->pf);
1645	u32 stat_err, error, prog_id;
1646	int ret;
1647
1648	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1649	if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
1650	    ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
1651		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1652		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1653		ret = -EINVAL;
1654		goto err_exit;
1655	}
1656
1657	prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
1658		ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
1659	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1660	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1661		dev_err(dev, "VF %d: Desc show add, but ctx not",
1662			vf->vf_id);
1663		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1664		ret = -EINVAL;
1665		goto err_exit;
1666	}
1667
1668	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1669	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1670		dev_err(dev, "VF %d: Desc show del, but ctx not",
1671			vf->vf_id);
1672		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1673		ret = -EINVAL;
1674		goto err_exit;
1675	}
1676
1677	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
1678		ICE_FXD_FLTR_WB_QW1_FAIL_S;
1679	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1680		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1681			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1682				vf->vf_id);
1683			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1684		} else {
1685			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1686				vf->vf_id);
1687			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1688		}
1689		ret = -EINVAL;
1690		goto err_exit;
1691	}
1692
1693	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
1694		ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
1695	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1696		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1697		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1698		ret = -EINVAL;
1699		goto err_exit;
1700	}
1701
1702	*status = VIRTCHNL_FDIR_SUCCESS;
1703
1704	return 0;
1705
1706err_exit:
1707	ice_vf_fdir_dump_info(vf);
1708	return ret;
1709}
1710
1711/**
1712 * ice_vc_add_fdir_fltr_post
1713 * @vf: pointer to the VF structure
1714 * @ctx: FDIR context info for post processing
1715 * @status: virtchnl FDIR program status
1716 * @success: true implies success, false implies failure
1717 *
1718 * Post process for flow director add command. If success, then do post process
1719 * and send back success msg by virtchnl. Otherwise, do context reversion and
1720 * send back failure msg by virtchnl.
1721 *
1722 * Return: 0 on success, and other on error.
1723 */
1724static int
1725ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1726			  enum virtchnl_fdir_prgm_status status,
1727			  bool success)
1728{
1729	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1730	struct device *dev = ice_pf_to_dev(vf->pf);
1731	enum virtchnl_status_code v_ret;
1732	struct virtchnl_fdir_add *resp;
1733	int ret, len, is_tun;
1734
1735	v_ret = VIRTCHNL_STATUS_SUCCESS;
1736	len = sizeof(*resp);
1737	resp = kzalloc(len, GFP_KERNEL);
1738	if (!resp) {
1739		len = 0;
1740		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1741		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1742		goto err_exit;
1743	}
1744
1745	if (!success)
1746		goto err_exit;
1747
1748	is_tun = 0;
1749	resp->status = status;
1750	resp->flow_id = conf->flow_id;
1751	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1752
1753	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1754				    (u8 *)resp, len);
1755	kfree(resp);
1756
1757	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1758		vf->vf_id, conf->flow_id,
1759		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1760		"add" : "del");
1761	return ret;
1762
1763err_exit:
1764	if (resp)
1765		resp->status = status;
1766	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1767	devm_kfree(dev, conf);
1768
1769	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1770				    (u8 *)resp, len);
1771	kfree(resp);
1772	return ret;
1773}
1774
1775/**
1776 * ice_vc_del_fdir_fltr_post
1777 * @vf: pointer to the VF structure
1778 * @ctx: FDIR context info for post processing
1779 * @status: virtchnl FDIR program status
1780 * @success: true implies success, false implies failure
1781 *
1782 * Post process for flow director del command. If success, then do post process
1783 * and send back success msg by virtchnl. Otherwise, do context reversion and
1784 * send back failure msg by virtchnl.
1785 *
1786 * Return: 0 on success, and other on error.
1787 */
1788static int
1789ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1790			  enum virtchnl_fdir_prgm_status status,
1791			  bool success)
1792{
1793	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1794	struct device *dev = ice_pf_to_dev(vf->pf);
1795	enum virtchnl_status_code v_ret;
1796	struct virtchnl_fdir_del *resp;
1797	int ret, len, is_tun;
1798
1799	v_ret = VIRTCHNL_STATUS_SUCCESS;
1800	len = sizeof(*resp);
1801	resp = kzalloc(len, GFP_KERNEL);
1802	if (!resp) {
1803		len = 0;
1804		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1805		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1806		goto err_exit;
1807	}
1808
1809	if (!success)
1810		goto err_exit;
1811
1812	is_tun = 0;
1813	resp->status = status;
1814	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1815	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1816
1817	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1818				    (u8 *)resp, len);
1819	kfree(resp);
1820
1821	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1822		vf->vf_id, conf->flow_id,
1823		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1824		"add" : "del");
1825	devm_kfree(dev, conf);
1826	return ret;
1827
1828err_exit:
1829	if (resp)
1830		resp->status = status;
1831	if (success)
1832		devm_kfree(dev, conf);
1833
1834	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1835				    (u8 *)resp, len);
1836	kfree(resp);
1837	return ret;
1838}
1839
1840/**
1841 * ice_flush_fdir_ctx
1842 * @pf: pointer to the PF structure
1843 *
1844 * Flush all the pending event on ctx_done list and process them.
1845 */
1846void ice_flush_fdir_ctx(struct ice_pf *pf)
1847{
1848	int i;
 
1849
1850	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1851		return;
1852
1853	ice_for_each_vf(pf, i) {
 
1854		struct device *dev = ice_pf_to_dev(pf);
1855		enum virtchnl_fdir_prgm_status status;
1856		struct ice_vf *vf = &pf->vf[i];
1857		struct ice_vf_fdir_ctx *ctx;
1858		unsigned long flags;
1859		int ret;
1860
1861		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1862			continue;
1863
1864		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1865			continue;
1866
1867		ctx = &vf->fdir.ctx_done;
1868		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1869		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1870			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1871			continue;
1872		}
1873		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1874
1875		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1876		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1877			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1878			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1879				vf->vf_id);
1880			goto err_exit;
1881		}
1882
1883		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1884		if (ret)
1885			goto err_exit;
1886
1887		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1888			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1889		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1890			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1891		else
1892			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1893
1894		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1895		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1896		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1897		continue;
1898err_exit:
1899		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1900			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1901		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1902			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1903		else
1904			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1905
1906		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1907		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1908		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1909	}
 
1910}
1911
1912/**
1913 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1914 * @vf: pointer to the VF structure
1915 * @conf: FDIR configuration for each filter
1916 * @v_opcode: virtual channel operation code
1917 *
1918 * Return: 0 on success, and other on error.
1919 */
1920static int
1921ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1922			enum virtchnl_ops v_opcode)
1923{
1924	struct device *dev = ice_pf_to_dev(vf->pf);
1925	struct ice_vf_fdir_ctx *ctx;
1926	unsigned long flags;
1927
1928	ctx = &vf->fdir.ctx_irq;
1929	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1930	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1931	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1932		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1933		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1934			vf->vf_id);
1935		return -EBUSY;
1936	}
1937	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1938	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1939
1940	ctx->conf = conf;
1941	ctx->v_opcode = v_opcode;
1942	ctx->stat = ICE_FDIR_CTX_READY;
1943	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1944
1945	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1946
1947	return 0;
1948}
1949
1950/**
1951 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1952 * @vf: pointer to the VF structure
1953 *
1954 * Return: 0 on success, and other on error.
1955 */
1956static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1957{
1958	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1959	unsigned long flags;
1960
1961	del_timer(&ctx->rx_tmr);
1962	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1963	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1964	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1965}
1966
1967/**
1968 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1969 * @vf: pointer to the VF info
1970 * @msg: pointer to the msg buffer
1971 *
1972 * Return: 0 on success, and other on error.
1973 */
1974int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1975{
1976	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1977	struct virtchnl_fdir_add *stat = NULL;
1978	struct virtchnl_fdir_fltr_conf *conf;
1979	enum virtchnl_status_code v_ret;
1980	struct device *dev;
1981	struct ice_pf *pf;
1982	int is_tun = 0;
1983	int len = 0;
1984	int ret;
1985
1986	pf = vf->pf;
1987	dev = ice_pf_to_dev(pf);
1988	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1989	if (ret) {
1990		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1991		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1992		goto err_exit;
1993	}
1994
1995	ret = ice_vf_start_ctrl_vsi(vf);
1996	if (ret && (ret != -EEXIST)) {
1997		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1998		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1999			vf->vf_id, ret);
2000		goto err_exit;
2001	}
2002
2003	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2004	if (!stat) {
2005		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2006		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2007		goto err_exit;
2008	}
2009
2010	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
2011	if (!conf) {
2012		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2013		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
2014		goto err_exit;
2015	}
2016
2017	len = sizeof(*stat);
2018	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
2019	if (ret) {
2020		v_ret = VIRTCHNL_STATUS_SUCCESS;
2021		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
2022		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
2023		goto err_free_conf;
2024	}
2025
2026	if (fltr->validate_only) {
2027		v_ret = VIRTCHNL_STATUS_SUCCESS;
2028		stat->status = VIRTCHNL_FDIR_SUCCESS;
2029		devm_kfree(dev, conf);
2030		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
2031					    v_ret, (u8 *)stat, len);
2032		goto exit;
2033	}
2034
2035	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
2036	if (ret) {
2037		v_ret = VIRTCHNL_STATUS_SUCCESS;
2038		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
2039		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
2040			vf->vf_id, ret);
2041		goto err_free_conf;
2042	}
2043
2044	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
2045	if (ret) {
2046		v_ret = VIRTCHNL_STATUS_SUCCESS;
2047		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
2048		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
2049			vf->vf_id);
2050		goto err_free_conf;
2051	}
2052
2053	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
2054	if (ret) {
2055		v_ret = VIRTCHNL_STATUS_SUCCESS;
2056		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2057		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
2058		goto err_free_conf;
2059	}
2060
2061	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
2062	if (ret) {
2063		v_ret = VIRTCHNL_STATUS_SUCCESS;
2064		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2065		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2066		goto err_free_conf;
2067	}
2068
2069	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
2070	if (ret) {
2071		v_ret = VIRTCHNL_STATUS_SUCCESS;
2072		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2073		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2074			vf->vf_id, ret);
2075		goto err_rem_entry;
2076	}
2077
2078exit:
2079	kfree(stat);
2080	return ret;
2081
 
 
2082err_rem_entry:
2083	ice_vc_fdir_clear_irq_ctx(vf);
2084	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
2085err_free_conf:
2086	devm_kfree(dev, conf);
2087err_exit:
2088	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
2089				    (u8 *)stat, len);
2090	kfree(stat);
2091	return ret;
2092}
2093
2094/**
2095 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
2096 * @vf: pointer to the VF info
2097 * @msg: pointer to the msg buffer
2098 *
2099 * Return: 0 on success, and other on error.
2100 */
2101int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
2102{
2103	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
2104	struct virtchnl_fdir_del *stat = NULL;
2105	struct virtchnl_fdir_fltr_conf *conf;
2106	enum virtchnl_status_code v_ret;
2107	struct device *dev;
2108	struct ice_pf *pf;
2109	int is_tun = 0;
2110	int len = 0;
2111	int ret;
2112
2113	pf = vf->pf;
2114	dev = ice_pf_to_dev(pf);
2115	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
2116	if (ret) {
2117		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2118		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
2119		goto err_exit;
2120	}
2121
2122	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
2123	if (!stat) {
2124		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2125		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
2126		goto err_exit;
2127	}
2128
2129	len = sizeof(*stat);
2130
2131	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
2132	if (!conf) {
2133		v_ret = VIRTCHNL_STATUS_SUCCESS;
2134		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
2135		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
2136			vf->vf_id, fltr->flow_id);
2137		goto err_exit;
2138	}
2139
2140	/* Just return failure when ctrl_vsi idx is invalid */
2141	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
2142		v_ret = VIRTCHNL_STATUS_SUCCESS;
2143		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2144		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
2145		goto err_exit;
2146	}
2147
2148	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
2149	if (ret) {
2150		v_ret = VIRTCHNL_STATUS_SUCCESS;
2151		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2152		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2153		goto err_exit;
2154	}
2155
2156	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
2157	if (ret) {
2158		v_ret = VIRTCHNL_STATUS_SUCCESS;
2159		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2160		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2161			vf->vf_id, ret);
2162		goto err_del_tmr;
2163	}
2164
2165	kfree(stat);
2166
2167	return ret;
2168
2169err_del_tmr:
2170	ice_vc_fdir_clear_irq_ctx(vf);
2171err_exit:
2172	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2173				    (u8 *)stat, len);
2174	kfree(stat);
2175	return ret;
2176}
2177
2178/**
2179 * ice_vf_fdir_init - init FDIR resource for VF
2180 * @vf: pointer to the VF info
2181 */
2182void ice_vf_fdir_init(struct ice_vf *vf)
2183{
2184	struct ice_vf_fdir *fdir = &vf->fdir;
2185
2186	idr_init(&fdir->fdir_rule_idr);
2187	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2188
2189	spin_lock_init(&fdir->ctx_lock);
2190	fdir->ctx_irq.flags = 0;
2191	fdir->ctx_done.flags = 0;
 
2192}
2193
2194/**
2195 * ice_vf_fdir_exit - destroy FDIR resource for VF
2196 * @vf: pointer to the VF info
2197 */
2198void ice_vf_fdir_exit(struct ice_vf *vf)
2199{
2200	ice_vc_fdir_flush_entry(vf);
2201	idr_destroy(&vf->fdir.fdir_rule_idr);
2202	ice_vc_fdir_rem_prof_all(vf);
2203	ice_vc_fdir_free_prof_all(vf);
2204}