Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2018-2023, Intel Corporation. */
   3
   4/* flow director ethtool support for ice */
   5
   6#include "ice.h"
   7#include "ice_lib.h"
   8#include "ice_fdir.h"
   9#include "ice_flow.h"
  10
  11static struct in6_addr full_ipv6_addr_mask = {
  12	.in6_u = {
  13		.u6_addr8 = {
  14			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  15			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  16		}
  17	}
  18};
  19
  20static struct in6_addr zero_ipv6_addr_mask = {
  21	.in6_u = {
  22		.u6_addr8 = {
  23			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  24			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  25		}
  26	}
  27};
  28
  29/* calls to ice_flow_add_prof require the number of segments in the array
  30 * for segs_cnt. In this code that is one more than the index.
  31 */
  32#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
  33
  34/**
  35 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
  36 * flow type values
  37 * @flow: filter type to be converted
  38 *
  39 * Returns the corresponding ethtool flow type.
  40 */
  41static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
  42{
  43	switch (flow) {
  44	case ICE_FLTR_PTYPE_NONF_ETH:
  45		return ETHER_FLOW;
  46	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
  47		return TCP_V4_FLOW;
  48	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
  49		return UDP_V4_FLOW;
  50	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
  51		return SCTP_V4_FLOW;
  52	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
  53		return IPV4_USER_FLOW;
  54	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
  55		return TCP_V6_FLOW;
  56	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
  57		return UDP_V6_FLOW;
  58	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
  59		return SCTP_V6_FLOW;
  60	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
  61		return IPV6_USER_FLOW;
  62	default:
  63		/* 0 is undefined ethtool flow */
  64		return 0;
  65	}
  66}
  67
  68/**
  69 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
  70 * @eth: Ethtool flow type to be converted
  71 *
  72 * Returns flow enum
  73 */
  74static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
  75{
  76	switch (eth) {
  77	case ETHER_FLOW:
  78		return ICE_FLTR_PTYPE_NONF_ETH;
  79	case TCP_V4_FLOW:
  80		return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
  81	case UDP_V4_FLOW:
  82		return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
  83	case SCTP_V4_FLOW:
  84		return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
  85	case IPV4_USER_FLOW:
  86		return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
  87	case TCP_V6_FLOW:
  88		return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
  89	case UDP_V6_FLOW:
  90		return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
  91	case SCTP_V6_FLOW:
  92		return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
  93	case IPV6_USER_FLOW:
  94		return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
  95	default:
  96		return ICE_FLTR_PTYPE_NONF_NONE;
  97	}
  98}
  99
 100/**
 101 * ice_is_mask_valid - check mask field set
 102 * @mask: full mask to check
 103 * @field: field for which mask should be valid
 104 *
 105 * If the mask is fully set return true. If it is not valid for field return
 106 * false.
 107 */
 108static bool ice_is_mask_valid(u64 mask, u64 field)
 109{
 110	return (mask & field) == field;
 111}
 112
 113/**
 114 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
 115 * @hw: hardware structure that contains filter list
 116 * @cmd: ethtool command data structure to receive the filter data
 117 *
 118 * Returns 0 on success and -EINVAL on failure
 119 */
 120int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
 121{
 122	struct ethtool_rx_flow_spec *fsp;
 123	struct ice_fdir_fltr *rule;
 124	int ret = 0;
 125	u16 idx;
 126
 127	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
 128
 129	mutex_lock(&hw->fdir_fltr_lock);
 130
 131	rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
 132
 133	if (!rule || fsp->location != rule->fltr_id) {
 134		ret = -EINVAL;
 135		goto release_lock;
 136	}
 137
 138	fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
 139
 140	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
 141	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
 142
 143	switch (fsp->flow_type) {
 144	case ETHER_FLOW:
 145		fsp->h_u.ether_spec = rule->eth;
 146		fsp->m_u.ether_spec = rule->eth_mask;
 147		break;
 148	case IPV4_USER_FLOW:
 149		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 150		fsp->h_u.usr_ip4_spec.proto = 0;
 151		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
 152		fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
 153		fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
 154		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 155		fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
 156		fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 157		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
 158		fsp->m_u.usr_ip4_spec.proto = 0;
 159		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
 160		fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
 161		break;
 162	case TCP_V4_FLOW:
 163	case UDP_V4_FLOW:
 164	case SCTP_V4_FLOW:
 165		fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
 166		fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
 167		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
 168		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 169		fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
 170		fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
 171		fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
 172		fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 173		break;
 174	case IPV6_USER_FLOW:
 175		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
 176		fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
 177		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
 178		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 179		       sizeof(struct in6_addr));
 180		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 181		       sizeof(struct in6_addr));
 182		memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
 183		       sizeof(struct in6_addr));
 184		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
 185		       sizeof(struct in6_addr));
 186		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
 187		fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
 188		fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
 189		break;
 190	case TCP_V6_FLOW:
 191	case UDP_V6_FLOW:
 192	case SCTP_V6_FLOW:
 193		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 194		       sizeof(struct in6_addr));
 195		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 196		       sizeof(struct in6_addr));
 197		fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
 198		fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
 199		memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
 200		       rule->mask.v6.src_ip,
 201		       sizeof(struct in6_addr));
 202		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
 203		       rule->mask.v6.dst_ip,
 204		       sizeof(struct in6_addr));
 205		fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
 206		fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
 207		fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
 208		fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
 209		break;
 210	default:
 211		break;
 212	}
 213
 214	if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
 215		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 216	else
 217		fsp->ring_cookie = rule->orig_q_index;
 218
 219	idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
 220	if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
 221		dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
 222			rule->flow_type);
 223		ret = -EINVAL;
 224	}
 225
 226release_lock:
 227	mutex_unlock(&hw->fdir_fltr_lock);
 228	return ret;
 229}
 230
 231/**
 232 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
 233 * @hw: hardware structure containing the filter list
 234 * @cmd: ethtool command data structure
 235 * @rule_locs: ethtool array passed in from OS to receive filter IDs
 236 *
 237 * Returns 0 as expected for success by ethtool
 238 */
 239int
 240ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
 241		      u32 *rule_locs)
 242{
 243	struct ice_fdir_fltr *f_rule;
 244	unsigned int cnt = 0;
 245	int val = 0;
 246
 247	/* report total rule count */
 248	cmd->data = ice_get_fdir_cnt_all(hw);
 249
 250	mutex_lock(&hw->fdir_fltr_lock);
 251
 252	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
 253		if (cnt == cmd->rule_cnt) {
 254			val = -EMSGSIZE;
 255			goto release_lock;
 256		}
 257		rule_locs[cnt] = f_rule->fltr_id;
 258		cnt++;
 259	}
 260
 261release_lock:
 262	mutex_unlock(&hw->fdir_fltr_lock);
 263	if (!val)
 264		cmd->rule_cnt = cnt;
 265	return val;
 266}
 267
 268/**
 269 * ice_fdir_remap_entries - update the FDir entries in profile
 270 * @prof: FDir structure pointer
 271 * @tun: tunneled or non-tunneled packet
 272 * @idx: FDir entry index
 273 */
 274static void
 275ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
 276{
 277	if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
 278		int i;
 279
 280		for (i = idx; i < (prof->cnt - 1); i++) {
 281			u64 old_entry_h;
 282
 283			old_entry_h = prof->entry_h[i + 1][tun];
 284			prof->entry_h[i][tun] = old_entry_h;
 285			prof->vsi_h[i] = prof->vsi_h[i + 1];
 286		}
 287
 288		prof->entry_h[i][tun] = 0;
 289		prof->vsi_h[i] = 0;
 290	}
 291}
 292
 293/**
 294 * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
 295 * @hw: hardware structure containing filter list
 296 * @vsi_idx: VSI handle
 297 */
 298void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
 299{
 300	int status, flow;
 301
 302	if (!hw->fdir_prof)
 303		return;
 304
 305	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
 306		struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
 307		int tun, i;
 308
 309		if (!prof || !prof->cnt)
 310			continue;
 311
 312		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 313			u64 prof_id = prof->prof_id[tun];
 
 
 314
 315			for (i = 0; i < prof->cnt; i++) {
 316				if (prof->vsi_h[i] != vsi_idx)
 317					continue;
 318
 319				prof->entry_h[i][tun] = 0;
 320				prof->vsi_h[i] = 0;
 321				break;
 322			}
 323
 324			/* after clearing FDir entries update the remaining */
 325			ice_fdir_remap_entries(prof, tun, i);
 326
 327			/* find flow profile corresponding to prof_id and clear
 328			 * vsi_idx from bitmap.
 329			 */
 330			status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
 331			if (status) {
 332				dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
 333					status);
 334			}
 335		}
 336		prof->cnt--;
 337	}
 338}
 339
 340/**
 341 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
 342 * @hw: hardware structure containing the filter list
 343 * @blk: hardware block
 344 * @flow: FDir flow type to release
 345 */
 346static struct ice_fd_hw_prof *
 347ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
 348{
 349	if (blk == ICE_BLK_FD && hw->fdir_prof)
 350		return hw->fdir_prof[flow];
 351
 352	return NULL;
 353}
 354
 355/**
 356 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
 357 * @hw: hardware structure containing the filter list
 358 * @blk: hardware block
 359 * @flow: FDir flow type to release
 360 */
 361static void
 362ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
 363{
 364	struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
 365	int tun;
 366
 367	if (!prof)
 368		return;
 369
 370	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 371		u64 prof_id = prof->prof_id[tun];
 372		int j;
 373
 
 374		for (j = 0; j < prof->cnt; j++) {
 375			u16 vsi_num;
 376
 377			if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
 378				continue;
 379			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
 380			ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
 381			ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
 382			prof->entry_h[j][tun] = 0;
 383		}
 384		ice_flow_rem_prof(hw, blk, prof_id);
 385	}
 386}
 387
 388/**
 389 * ice_fdir_rem_flow - release the ice_flow structures for a filter type
 390 * @hw: hardware structure containing the filter list
 391 * @blk: hardware block
 392 * @flow_type: FDir flow type to release
 393 */
 394static void
 395ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
 396		  enum ice_fltr_ptype flow_type)
 397{
 398	int flow = (int)flow_type & ~FLOW_EXT;
 399	struct ice_fd_hw_prof *prof;
 400	int tun, i;
 401
 402	prof = ice_fdir_get_hw_prof(hw, blk, flow);
 403	if (!prof)
 404		return;
 405
 406	ice_fdir_erase_flow_from_hw(hw, blk, flow);
 407	for (i = 0; i < prof->cnt; i++)
 408		prof->vsi_h[i] = 0;
 409	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 410		if (!prof->fdir_seg[tun])
 411			continue;
 412		devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
 413		prof->fdir_seg[tun] = NULL;
 414	}
 415	prof->cnt = 0;
 416}
 417
 418/**
 419 * ice_fdir_release_flows - release all flows in use for later replay
 420 * @hw: pointer to HW instance
 421 */
 422void ice_fdir_release_flows(struct ice_hw *hw)
 423{
 424	int flow;
 425
 426	/* release Flow Director HW table entries */
 427	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
 428		ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
 429}
 430
 431/**
 432 * ice_fdir_replay_flows - replay HW Flow Director filter info
 433 * @hw: pointer to HW instance
 434 */
 435void ice_fdir_replay_flows(struct ice_hw *hw)
 436{
 437	int flow;
 438
 439	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
 440		int tun;
 441
 442		if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
 443			continue;
 444		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 445			struct ice_flow_prof *hw_prof;
 446			struct ice_fd_hw_prof *prof;
 
 447			int j;
 448
 449			prof = hw->fdir_prof[flow];
 450			ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
 
 451					  prof->fdir_seg[tun], TNL_SEG_CNT(tun),
 452					  false, &hw_prof);
 453			for (j = 0; j < prof->cnt; j++) {
 454				enum ice_flow_priority prio;
 455				u64 entry_h = 0;
 456				int err;
 457
 458				prio = ICE_FLOW_PRIO_NORMAL;
 459				err = ice_flow_add_entry(hw, ICE_BLK_FD,
 460							 hw_prof->id,
 461							 prof->vsi_h[0],
 462							 prof->vsi_h[j],
 463							 prio, prof->fdir_seg,
 464							 &entry_h);
 465				if (err) {
 466					dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
 467						flow);
 468					continue;
 469				}
 470				prof->prof_id[tun] = hw_prof->id;
 471				prof->entry_h[j][tun] = entry_h;
 472			}
 473		}
 474	}
 475}
 476
 477/**
 478 * ice_parse_rx_flow_user_data - deconstruct user-defined data
 479 * @fsp: pointer to ethtool Rx flow specification
 480 * @data: pointer to userdef data structure for storage
 481 *
 482 * Returns 0 on success, negative error value on failure
 483 */
 484static int
 485ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 486			    struct ice_rx_flow_userdef *data)
 487{
 488	u64 value, mask;
 489
 490	memset(data, 0, sizeof(*data));
 491	if (!(fsp->flow_type & FLOW_EXT))
 492		return 0;
 493
 494	value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
 495	mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
 496	if (!mask)
 497		return 0;
 498
 499#define ICE_USERDEF_FLEX_WORD_M	GENMASK_ULL(15, 0)
 500#define ICE_USERDEF_FLEX_OFFS_S	16
 501#define ICE_USERDEF_FLEX_OFFS_M	GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
 502#define ICE_USERDEF_FLEX_FLTR_M	GENMASK_ULL(31, 0)
 503
 504	/* 0x1fe is the maximum value for offsets stored in the internal
 505	 * filtering tables.
 506	 */
 507#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
 508
 509	if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
 510	    value > ICE_USERDEF_FLEX_FLTR_M)
 511		return -EINVAL;
 512
 513	data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
 514	data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value);
 
 515	if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
 516		return -EINVAL;
 517
 518	data->flex_fltr = true;
 519
 520	return 0;
 521}
 522
 523/**
 524 * ice_fdir_num_avail_fltr - return the number of unused flow director filters
 525 * @hw: pointer to hardware structure
 526 * @vsi: software VSI structure
 527 *
 528 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
 529 * use filters from either pool. The guaranteed pool is divided between VSIs.
 530 * The best effort filter pool is common to all VSIs and is a device shared
 531 * resource pool. The number of filters available to this VSI is the sum of
 532 * the VSIs guaranteed filter pool and the global available best effort
 533 * filter pool.
 534 *
 535 * Returns the number of available flow director filters to this VSI
 536 */
 537int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
 538{
 539	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
 540	u16 num_guar;
 541	u16 num_be;
 542
 543	/* total guaranteed filters assigned to this VSI */
 544	num_guar = vsi->num_gfltr;
 545
 
 
 
 
 546	/* total global best effort filters */
 547	num_be = hw->func_caps.fd_fltr_best_effort;
 548
 549	/* Subtract the number of programmed filters from the global values */
 550	switch (hw->mac_type) {
 551	case ICE_MAC_E830:
 552		num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
 553				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
 554		num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
 555				    rd32(hw, GLQF_FD_CNT));
 556		break;
 557	case ICE_MAC_E810:
 558	default:
 559		num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
 560				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
 561		num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
 562				    rd32(hw, GLQF_FD_CNT));
 563	}
 564
 565	return num_guar + num_be;
 566}
 567
 568/**
 569 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
 570 * @hw: HW structure containing the FDir flow profile structure(s)
 571 * @flow: flow type to allocate the flow profile for
 572 *
 573 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
 574 * on success and negative on error.
 575 */
 576static int
 577ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
 578{
 579	if (!hw)
 580		return -EINVAL;
 581
 582	if (!hw->fdir_prof) {
 583		hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
 584					     ICE_FLTR_PTYPE_MAX,
 585					     sizeof(*hw->fdir_prof),
 586					     GFP_KERNEL);
 587		if (!hw->fdir_prof)
 588			return -ENOMEM;
 589	}
 590
 591	if (!hw->fdir_prof[flow]) {
 592		hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
 593						   sizeof(**hw->fdir_prof),
 594						   GFP_KERNEL);
 595		if (!hw->fdir_prof[flow])
 596			return -ENOMEM;
 597	}
 598
 599	return 0;
 600}
 601
 602/**
 603 * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
 604 * @prof: pointer to flow director HW profile
 605 * @vsi_idx: vsi_idx to locate
 606 *
 607 * return the index of the vsi_idx. if vsi_idx is not found insert it
 608 * into the vsi_h table.
 609 */
 610static u16
 611ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
 612{
 613	u16 idx = 0;
 614
 615	for (idx = 0; idx < prof->cnt; idx++)
 616		if (prof->vsi_h[idx] == vsi_idx)
 617			return idx;
 618
 619	if (idx == prof->cnt)
 620		prof->vsi_h[prof->cnt++] = vsi_idx;
 621	return idx;
 622}
 623
 624/**
 625 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
 626 * @pf: pointer to the PF structure
 627 * @seg: protocol header description pointer
 628 * @flow: filter enum
 629 * @tun: FDir segment to program
 630 */
 631static int
 632ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
 633			  enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
 634{
 635	struct device *dev = ice_pf_to_dev(pf);
 636	struct ice_vsi *main_vsi, *ctrl_vsi;
 637	struct ice_flow_seg_info *old_seg;
 638	struct ice_flow_prof *prof = NULL;
 639	struct ice_fd_hw_prof *hw_prof;
 640	struct ice_hw *hw = &pf->hw;
 641	u64 entry1_h = 0;
 642	u64 entry2_h = 0;
 643	bool del_last;
 
 644	int err;
 645	int idx;
 646
 647	main_vsi = ice_get_main_vsi(pf);
 648	if (!main_vsi)
 649		return -EINVAL;
 650
 651	ctrl_vsi = ice_get_ctrl_vsi(pf);
 652	if (!ctrl_vsi)
 653		return -EINVAL;
 654
 655	err = ice_fdir_alloc_flow_prof(hw, flow);
 656	if (err)
 657		return err;
 658
 659	hw_prof = hw->fdir_prof[flow];
 660	old_seg = hw_prof->fdir_seg[tun];
 661	if (old_seg) {
 662		/* This flow_type already has a changed input set.
 663		 * If it matches the requested input set then we are
 664		 * done. Or, if it's different then it's an error.
 665		 */
 666		if (!memcmp(old_seg, seg, sizeof(*seg)))
 667			return -EEXIST;
 668
 669		/* if there are FDir filters using this flow,
 670		 * then return error.
 671		 */
 672		if (hw->fdir_fltr_cnt[flow]) {
 673			dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
 674			return -EINVAL;
 675		}
 676
 677		if (ice_is_arfs_using_perfect_flow(hw, flow)) {
 678			dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
 679				flow);
 680			return -EINVAL;
 681		}
 682
 683		/* remove HW filter definition */
 684		ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
 685	}
 686
 687	/* Adding a profile, but there is only one header supported.
 688	 * That is the final parameters are 1 header (segment), no
 689	 * actions (NULL) and zero actions 0.
 690	 */
 691	err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
 692				TNL_SEG_CNT(tun), false, &prof);
 
 693	if (err)
 694		return err;
 695	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
 696				 main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 697				 seg, &entry1_h);
 698	if (err)
 699		goto err_prof;
 700	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
 701				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 702				 seg, &entry2_h);
 703	if (err)
 704		goto err_entry;
 705
 706	hw_prof->fdir_seg[tun] = seg;
 707	hw_prof->prof_id[tun] = prof->id;
 708	hw_prof->entry_h[0][tun] = entry1_h;
 709	hw_prof->entry_h[1][tun] = entry2_h;
 710	hw_prof->vsi_h[0] = main_vsi->idx;
 711	hw_prof->vsi_h[1] = ctrl_vsi->idx;
 712	if (!hw_prof->cnt)
 713		hw_prof->cnt = 2;
 714
 715	for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
 716		u16 vsi_idx;
 717		u16 vsi_h;
 718
 719		if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
 720			continue;
 721
 722		entry1_h = 0;
 723		vsi_h = main_vsi->tc_map_vsi[idx]->idx;
 724		err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
 725					 main_vsi->idx, vsi_h,
 726					 ICE_FLOW_PRIO_NORMAL, seg,
 727					 &entry1_h);
 728		if (err) {
 729			dev_err(dev, "Could not add Channel VSI %d to flow group\n",
 730				idx);
 731			goto err_unroll;
 732		}
 733
 734		vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
 735						main_vsi->tc_map_vsi[idx]->idx);
 736		hw_prof->entry_h[vsi_idx][tun] = entry1_h;
 737	}
 738
 739	return 0;
 740
 741err_unroll:
 742	entry1_h = 0;
 743	hw_prof->fdir_seg[tun] = NULL;
 744
 745	/* The variable del_last will be used to determine when to clean up
 746	 * the VSI group data. The VSI data is not needed if there are no
 747	 * segments.
 748	 */
 749	del_last = true;
 750	for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
 751		if (hw_prof->fdir_seg[idx]) {
 752			del_last = false;
 753			break;
 754		}
 755
 756	for (idx = 0; idx < hw_prof->cnt; idx++) {
 757		u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
 758
 759		if (!hw_prof->entry_h[idx][tun])
 760			continue;
 761		ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
 762		ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
 763		hw_prof->entry_h[idx][tun] = 0;
 764		if (del_last)
 765			hw_prof->vsi_h[idx] = 0;
 766	}
 767	if (del_last)
 768		hw_prof->cnt = 0;
 769err_entry:
 770	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 771			     ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
 772	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 773err_prof:
 774	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
 775	dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
 776
 777	return err;
 778}
 779
 780/**
 781 * ice_set_init_fdir_seg
 782 * @seg: flow segment for programming
 783 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
 784 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
 785 *
 786 * Set the configuration for perfect filters to the provided flow segment for
 787 * programming the HW filter. This is to be called only when initializing
 788 * filters as this function it assumes no filters exist.
 789 */
 790static int
 791ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
 792		      enum ice_flow_seg_hdr l3_proto,
 793		      enum ice_flow_seg_hdr l4_proto)
 794{
 795	enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
 796
 797	if (!seg)
 798		return -EINVAL;
 799
 800	if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
 801		src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
 802		dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
 803	} else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
 804		src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
 805		dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
 806	} else {
 807		return -EINVAL;
 808	}
 809
 810	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 811		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 812		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 813	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 814		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 815		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 816	} else {
 817		return -EINVAL;
 818	}
 819
 820	ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
 821
 822	/* IP source address */
 823	ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
 824			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 825
 826	/* IP destination address */
 827	ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
 828			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 829
 830	/* Layer 4 source port */
 831	ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 832			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 833
 834	/* Layer 4 destination port */
 835	ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 836			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 837
 838	return 0;
 839}
 840
 841/**
 842 * ice_create_init_fdir_rule
 843 * @pf: PF structure
 844 * @flow: filter enum
 845 *
 846 * Return error value or 0 on success.
 847 */
 848static int
 849ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
 850{
 851	struct ice_flow_seg_info *seg, *tun_seg;
 852	struct device *dev = ice_pf_to_dev(pf);
 853	struct ice_hw *hw = &pf->hw;
 854	int ret;
 855
 856	/* if there is already a filter rule for kind return -EINVAL */
 857	if (hw->fdir_prof && hw->fdir_prof[flow] &&
 858	    hw->fdir_prof[flow]->fdir_seg[0])
 859		return -EINVAL;
 860
 861	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 862	if (!seg)
 863		return -ENOMEM;
 864
 865	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
 866			       GFP_KERNEL);
 867	if (!tun_seg) {
 868		devm_kfree(dev, seg);
 869		return -ENOMEM;
 870	}
 871
 872	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
 873		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 874					    ICE_FLOW_SEG_HDR_TCP);
 875	else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
 876		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 877					    ICE_FLOW_SEG_HDR_UDP);
 878	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
 879		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 880					    ICE_FLOW_SEG_HDR_TCP);
 881	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
 882		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 883					    ICE_FLOW_SEG_HDR_UDP);
 884	else
 885		ret = -EINVAL;
 886	if (ret)
 887		goto err_exit;
 888
 889	/* add filter for outer headers */
 890	ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
 891	if (ret)
 892		/* could not write filter, free memory */
 893		goto err_exit;
 894
 895	/* make tunneled filter HW entries if possible */
 896	memcpy(&tun_seg[1], seg, sizeof(*seg));
 897	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
 898	if (ret)
 899		/* could not write tunnel filter, but outer header filter
 900		 * exists
 901		 */
 902		devm_kfree(dev, tun_seg);
 903
 904	set_bit(flow, hw->fdir_perfect_fltr);
 905	return ret;
 906err_exit:
 907	devm_kfree(dev, tun_seg);
 908	devm_kfree(dev, seg);
 909
 910	return -EOPNOTSUPP;
 911}
 912
 913/**
 914 * ice_set_fdir_ip4_seg
 915 * @seg: flow segment for programming
 916 * @tcp_ip4_spec: mask data from ethtool
 917 * @l4_proto: Layer 4 protocol to program
 918 * @perfect_fltr: only valid on success; returns true if perfect filter,
 919 *		  false if not
 920 *
 921 * Set the mask data into the flow segment to be used to program HW
 922 * table based on provided L4 protocol for IPv4
 923 */
 924static int
 925ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
 926		     struct ethtool_tcpip4_spec *tcp_ip4_spec,
 927		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
 928{
 929	enum ice_flow_field src_port, dst_port;
 930
 931	/* make sure we don't have any empty rule */
 932	if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
 933	    !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
 934		return -EINVAL;
 935
 936	/* filtering on TOS not supported */
 937	if (tcp_ip4_spec->tos)
 938		return -EOPNOTSUPP;
 939
 940	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 941		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 942		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 943	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 944		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 945		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 946	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
 947		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
 948		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
 949	} else {
 950		return -EOPNOTSUPP;
 951	}
 952
 953	*perfect_fltr = true;
 954	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
 955
 956	/* IP source address */
 957	if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
 958		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
 959				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 960				 ICE_FLOW_FLD_OFF_INVAL, false);
 961	else if (!tcp_ip4_spec->ip4src)
 962		*perfect_fltr = false;
 963	else
 964		return -EOPNOTSUPP;
 965
 966	/* IP destination address */
 967	if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
 968		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
 969				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 970				 ICE_FLOW_FLD_OFF_INVAL, false);
 971	else if (!tcp_ip4_spec->ip4dst)
 972		*perfect_fltr = false;
 973	else
 974		return -EOPNOTSUPP;
 975
 976	/* Layer 4 source port */
 977	if (tcp_ip4_spec->psrc == htons(0xFFFF))
 978		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 979				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 980				 false);
 981	else if (!tcp_ip4_spec->psrc)
 982		*perfect_fltr = false;
 983	else
 984		return -EOPNOTSUPP;
 985
 986	/* Layer 4 destination port */
 987	if (tcp_ip4_spec->pdst == htons(0xFFFF))
 988		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 989				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 990				 false);
 991	else if (!tcp_ip4_spec->pdst)
 992		*perfect_fltr = false;
 993	else
 994		return -EOPNOTSUPP;
 995
 996	return 0;
 997}
 998
 999/**
1000 * ice_set_fdir_ip4_usr_seg
1001 * @seg: flow segment for programming
1002 * @usr_ip4_spec: ethtool userdef packet offset
1003 * @perfect_fltr: only valid on success; returns true if perfect filter,
1004 *		  false if not
1005 *
1006 * Set the offset data into the flow segment to be used to program HW
1007 * table for IPv4
1008 */
1009static int
1010ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
1011			 struct ethtool_usrip4_spec *usr_ip4_spec,
1012			 bool *perfect_fltr)
1013{
1014	/* first 4 bytes of Layer 4 header */
1015	if (usr_ip4_spec->l4_4_bytes)
1016		return -EINVAL;
1017	if (usr_ip4_spec->tos)
1018		return -EINVAL;
1019	if (usr_ip4_spec->ip_ver)
1020		return -EINVAL;
1021	/* Filtering on Layer 4 protocol not supported */
1022	if (usr_ip4_spec->proto)
1023		return -EOPNOTSUPP;
1024	/* empty rules are not valid */
1025	if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
1026		return -EINVAL;
1027
1028	*perfect_fltr = true;
1029	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
1030
1031	/* IP source address */
1032	if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
1033		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
1034				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1035				 ICE_FLOW_FLD_OFF_INVAL, false);
1036	else if (!usr_ip4_spec->ip4src)
1037		*perfect_fltr = false;
1038	else
1039		return -EOPNOTSUPP;
1040
1041	/* IP destination address */
1042	if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
1043		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
1044				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1045				 ICE_FLOW_FLD_OFF_INVAL, false);
1046	else if (!usr_ip4_spec->ip4dst)
1047		*perfect_fltr = false;
1048	else
1049		return -EOPNOTSUPP;
1050
1051	return 0;
1052}
1053
1054/**
1055 * ice_set_fdir_ip6_seg
1056 * @seg: flow segment for programming
1057 * @tcp_ip6_spec: mask data from ethtool
1058 * @l4_proto: Layer 4 protocol to program
1059 * @perfect_fltr: only valid on success; returns true if perfect filter,
1060 *		  false if not
1061 *
1062 * Set the mask data into the flow segment to be used to program HW
1063 * table based on provided L4 protocol for IPv6
1064 */
1065static int
1066ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
1067		     struct ethtool_tcpip6_spec *tcp_ip6_spec,
1068		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
1069{
1070	enum ice_flow_field src_port, dst_port;
1071
1072	/* make sure we don't have any empty rule */
1073	if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1074		    sizeof(struct in6_addr)) &&
1075	    !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1076		    sizeof(struct in6_addr)) &&
1077	    !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
1078		return -EINVAL;
1079
1080	/* filtering on TC not supported */
1081	if (tcp_ip6_spec->tclass)
1082		return -EOPNOTSUPP;
1083
1084	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
1085		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
1086		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
1087	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
1088		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
1089		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
1090	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
1091		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
1092		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
1093	} else {
1094		return -EINVAL;
1095	}
1096
1097	*perfect_fltr = true;
1098	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
1099
1100	if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
1101		    sizeof(struct in6_addr)))
1102		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1103				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1104				 ICE_FLOW_FLD_OFF_INVAL, false);
1105	else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1106			 sizeof(struct in6_addr)))
1107		*perfect_fltr = false;
1108	else
1109		return -EOPNOTSUPP;
1110
1111	if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1112		    sizeof(struct in6_addr)))
1113		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1114				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1115				 ICE_FLOW_FLD_OFF_INVAL, false);
1116	else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1117			 sizeof(struct in6_addr)))
1118		*perfect_fltr = false;
1119	else
1120		return -EOPNOTSUPP;
1121
1122	/* Layer 4 source port */
1123	if (tcp_ip6_spec->psrc == htons(0xFFFF))
1124		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
1125				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1126				 false);
1127	else if (!tcp_ip6_spec->psrc)
1128		*perfect_fltr = false;
1129	else
1130		return -EOPNOTSUPP;
1131
1132	/* Layer 4 destination port */
1133	if (tcp_ip6_spec->pdst == htons(0xFFFF))
1134		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
1135				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1136				 false);
1137	else if (!tcp_ip6_spec->pdst)
1138		*perfect_fltr = false;
1139	else
1140		return -EOPNOTSUPP;
1141
1142	return 0;
1143}
1144
1145/**
1146 * ice_set_fdir_ip6_usr_seg
1147 * @seg: flow segment for programming
1148 * @usr_ip6_spec: ethtool userdef packet offset
1149 * @perfect_fltr: only valid on success; returns true if perfect filter,
1150 *		  false if not
1151 *
1152 * Set the offset data into the flow segment to be used to program HW
1153 * table for IPv6
1154 */
1155static int
1156ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
1157			 struct ethtool_usrip6_spec *usr_ip6_spec,
1158			 bool *perfect_fltr)
1159{
1160	/* filtering on Layer 4 bytes not supported */
1161	if (usr_ip6_spec->l4_4_bytes)
1162		return -EOPNOTSUPP;
1163	/* filtering on TC not supported */
1164	if (usr_ip6_spec->tclass)
1165		return -EOPNOTSUPP;
1166	/* filtering on Layer 4 protocol not supported */
1167	if (usr_ip6_spec->l4_proto)
1168		return -EOPNOTSUPP;
1169	/* empty rules are not valid */
1170	if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1171		    sizeof(struct in6_addr)) &&
1172	    !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1173		    sizeof(struct in6_addr)))
1174		return -EINVAL;
1175
1176	*perfect_fltr = true;
1177	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
1178
1179	if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
1180		    sizeof(struct in6_addr)))
1181		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1182				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1183				 ICE_FLOW_FLD_OFF_INVAL, false);
1184	else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1185			 sizeof(struct in6_addr)))
1186		*perfect_fltr = false;
1187	else
1188		return -EOPNOTSUPP;
1189
1190	if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1191		    sizeof(struct in6_addr)))
1192		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1193				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1194				 ICE_FLOW_FLD_OFF_INVAL, false);
1195	else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1196			 sizeof(struct in6_addr)))
1197		*perfect_fltr = false;
1198	else
1199		return -EOPNOTSUPP;
1200
1201	return 0;
1202}
1203
1204/**
1205 * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule
1206 * @dev: network interface device structure
1207 * @fsp: pointer to ethtool Rx flow specification
1208 *
1209 * Return: true if vlan data is valid, false otherwise
1210 */
1211static bool ice_fdir_vlan_valid(struct device *dev,
1212				struct ethtool_rx_flow_spec *fsp)
1213{
1214	if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype))
1215		return false;
1216
1217	if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
1218		return false;
1219
1220	/* proto and vlan must have vlan-etype defined */
1221	if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci &&
1222	    !fsp->m_ext.vlan_etype) {
1223		dev_warn(dev, "Filter with proto and vlan require also vlan-etype");
1224		return false;
1225	}
1226
1227	return true;
1228}
1229
1230/**
1231 * ice_set_ether_flow_seg - set address and protocol segments for ether flow
1232 * @dev: network interface device structure
1233 * @seg: flow segment for programming
1234 * @eth_spec: mask data from ethtool
1235 *
1236 * Return: 0 on success and errno in case of error.
1237 */
1238static int ice_set_ether_flow_seg(struct device *dev,
1239				  struct ice_flow_seg_info *seg,
1240				  struct ethhdr *eth_spec)
1241{
1242	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH);
1243
1244	/* empty rules are not valid */
1245	if (is_zero_ether_addr(eth_spec->h_source) &&
1246	    is_zero_ether_addr(eth_spec->h_dest) &&
1247	    !eth_spec->h_proto)
1248		return -EINVAL;
1249
1250	/* Ethertype */
1251	if (eth_spec->h_proto == htons(0xFFFF)) {
1252		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE,
1253				 ICE_FLOW_FLD_OFF_INVAL,
1254				 ICE_FLOW_FLD_OFF_INVAL,
1255				 ICE_FLOW_FLD_OFF_INVAL, false);
1256	} else if (eth_spec->h_proto) {
1257		dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether");
1258		return -EOPNOTSUPP;
1259	}
1260
1261	/* Source MAC address */
1262	if (is_broadcast_ether_addr(eth_spec->h_source))
1263		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
1264				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1265				 ICE_FLOW_FLD_OFF_INVAL, false);
1266	else if (!is_zero_ether_addr(eth_spec->h_source))
1267		goto err_mask;
1268
1269	/* Destination MAC address */
1270	if (is_broadcast_ether_addr(eth_spec->h_dest))
1271		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
1272				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1273				 ICE_FLOW_FLD_OFF_INVAL, false);
1274	else if (!is_zero_ether_addr(eth_spec->h_dest))
1275		goto err_mask;
1276
1277	return 0;
1278
1279err_mask:
1280	dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether");
1281	return -EOPNOTSUPP;
1282}
1283
1284/**
1285 * ice_set_fdir_vlan_seg - set vlan segments for ether flow
1286 * @seg: flow segment for programming
1287 * @ext_masks: masks for additional RX flow fields
1288 *
1289 * Return: 0 on success and errno in case of error.
1290 */
1291static int
1292ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg,
1293		      struct ethtool_flow_ext *ext_masks)
1294{
1295	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN);
1296
1297	if (ext_masks->vlan_etype) {
1298		if (ext_masks->vlan_etype != htons(0xFFFF))
1299			return -EOPNOTSUPP;
1300
1301		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN,
1302				 ICE_FLOW_FLD_OFF_INVAL,
1303				 ICE_FLOW_FLD_OFF_INVAL,
1304				 ICE_FLOW_FLD_OFF_INVAL, false);
1305	}
1306
1307	if (ext_masks->vlan_tci) {
1308		if (ext_masks->vlan_tci != htons(0xFFFF))
1309			return -EOPNOTSUPP;
1310
1311		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN,
1312				 ICE_FLOW_FLD_OFF_INVAL,
1313				 ICE_FLOW_FLD_OFF_INVAL,
1314				 ICE_FLOW_FLD_OFF_INVAL, false);
1315	}
1316
1317	return 0;
1318}
1319
1320/**
1321 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
1322 * @pf: PF structure
1323 * @fsp: pointer to ethtool Rx flow specification
1324 * @user: user defined data from flow specification
1325 *
1326 * Returns 0 on success.
1327 */
1328static int
1329ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
1330		       struct ice_rx_flow_userdef *user)
1331{
1332	struct ice_flow_seg_info *seg, *tun_seg;
1333	struct device *dev = ice_pf_to_dev(pf);
1334	enum ice_fltr_ptype fltr_idx;
1335	struct ice_hw *hw = &pf->hw;
1336	bool perfect_filter = false;
1337	int ret;
1338
1339	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
1340	if (!seg)
1341		return -ENOMEM;
1342
1343	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
1344			       GFP_KERNEL);
1345	if (!tun_seg) {
1346		devm_kfree(dev, seg);
1347		return -ENOMEM;
1348	}
1349
1350	switch (fsp->flow_type & ~FLOW_EXT) {
1351	case TCP_V4_FLOW:
1352		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1353					   ICE_FLOW_SEG_HDR_TCP,
1354					   &perfect_filter);
1355		break;
1356	case UDP_V4_FLOW:
1357		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1358					   ICE_FLOW_SEG_HDR_UDP,
1359					   &perfect_filter);
1360		break;
1361	case SCTP_V4_FLOW:
1362		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1363					   ICE_FLOW_SEG_HDR_SCTP,
1364					   &perfect_filter);
1365		break;
1366	case IPV4_USER_FLOW:
1367		ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
1368					       &perfect_filter);
1369		break;
1370	case TCP_V6_FLOW:
1371		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1372					   ICE_FLOW_SEG_HDR_TCP,
1373					   &perfect_filter);
1374		break;
1375	case UDP_V6_FLOW:
1376		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1377					   ICE_FLOW_SEG_HDR_UDP,
1378					   &perfect_filter);
1379		break;
1380	case SCTP_V6_FLOW:
1381		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1382					   ICE_FLOW_SEG_HDR_SCTP,
1383					   &perfect_filter);
1384		break;
1385	case IPV6_USER_FLOW:
1386		ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
1387					       &perfect_filter);
1388		break;
1389	case ETHER_FLOW:
1390		ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec);
1391		if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) {
1392			if (!ice_fdir_vlan_valid(dev, fsp)) {
1393				ret = -EINVAL;
1394				break;
1395			}
1396			ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext);
1397		}
1398		break;
1399	default:
1400		ret = -EINVAL;
1401	}
1402	if (ret)
1403		goto err_exit;
1404
1405	/* tunnel segments are shifted up one. */
1406	memcpy(&tun_seg[1], seg, sizeof(*seg));
1407
1408	if (user && user->flex_fltr) {
1409		perfect_filter = false;
1410		ice_flow_add_fld_raw(seg, user->flex_offset,
1411				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1412				     ICE_FLOW_FLD_OFF_INVAL,
1413				     ICE_FLOW_FLD_OFF_INVAL);
1414		ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
1415				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1416				     ICE_FLOW_FLD_OFF_INVAL,
1417				     ICE_FLOW_FLD_OFF_INVAL);
1418	}
1419
1420	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
1421
1422	assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
1423
1424	/* add filter for outer headers */
 
1425	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
1426					ICE_FD_HW_SEG_NON_TUN);
1427	if (ret == -EEXIST) {
1428		/* Rule already exists, free memory and count as success */
1429		ret = 0;
1430		goto err_exit;
1431	} else if (ret) {
1432		/* could not write filter, free memory */
1433		goto err_exit;
1434	}
1435
1436	/* make tunneled filter HW entries if possible */
1437	memcpy(&tun_seg[1], seg, sizeof(*seg));
1438	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
1439					ICE_FD_HW_SEG_TUN);
1440	if (ret == -EEXIST) {
1441		/* Rule already exists, free memory and count as success */
1442		devm_kfree(dev, tun_seg);
1443		ret = 0;
1444	} else if (ret) {
1445		/* could not write tunnel filter, but outer filter exists */
1446		devm_kfree(dev, tun_seg);
1447	}
1448
 
 
 
 
 
1449	return ret;
1450
1451err_exit:
1452	devm_kfree(dev, tun_seg);
1453	devm_kfree(dev, seg);
1454
1455	return ret;
1456}
1457
1458/**
1459 * ice_update_per_q_fltr
1460 * @vsi: ptr to VSI
1461 * @q_index: queue index
1462 * @inc: true to increment or false to decrement per queue filter count
1463 *
1464 * This function is used to keep track of per queue sideband filters
1465 */
1466static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
1467{
1468	struct ice_rx_ring *rx_ring;
1469
1470	if (!vsi->num_rxq || q_index >= vsi->num_rxq)
1471		return;
1472
1473	rx_ring = vsi->rx_rings[q_index];
1474	if (!rx_ring || !rx_ring->ch)
1475		return;
1476
1477	if (inc)
1478		atomic_inc(&rx_ring->ch->num_sb_fltr);
1479	else
1480		atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
1481}
1482
1483/**
1484 * ice_fdir_write_fltr - send a flow director filter to the hardware
1485 * @pf: PF data structure
1486 * @input: filter structure
1487 * @add: true adds filter and false removed filter
1488 * @is_tun: true adds inner filter on tunnel and false outer headers
1489 *
1490 * returns 0 on success and negative value on error
1491 */
1492int
1493ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
1494		    bool is_tun)
1495{
1496	struct device *dev = ice_pf_to_dev(pf);
1497	struct ice_hw *hw = &pf->hw;
1498	struct ice_fltr_desc desc;
1499	struct ice_vsi *ctrl_vsi;
1500	u8 *pkt, *frag_pkt;
1501	bool has_frag;
1502	int err;
1503
1504	ctrl_vsi = ice_get_ctrl_vsi(pf);
1505	if (!ctrl_vsi)
1506		return -EINVAL;
1507
1508	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1509	if (!pkt)
1510		return -ENOMEM;
1511	frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1512	if (!frag_pkt) {
1513		err = -ENOMEM;
1514		goto err_free;
1515	}
1516
1517	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1518	err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1519	if (err)
1520		goto err_free_all;
1521	err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1522	if (err)
1523		goto err_free_all;
1524
1525	/* repeat for fragment packet */
1526	has_frag = ice_fdir_has_frag(input->flow_type);
1527	if (has_frag) {
1528		/* does not return error */
1529		ice_fdir_get_prgm_desc(hw, input, &desc, add);
1530		err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
1531						is_tun);
1532		if (err)
1533			goto err_frag;
1534		err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
1535		if (err)
1536			goto err_frag;
1537	} else {
1538		devm_kfree(dev, frag_pkt);
1539	}
1540
1541	return 0;
1542
1543err_free_all:
1544	devm_kfree(dev, frag_pkt);
1545err_free:
1546	devm_kfree(dev, pkt);
1547	return err;
1548
1549err_frag:
1550	devm_kfree(dev, frag_pkt);
1551	return err;
1552}
1553
1554/**
1555 * ice_fdir_write_all_fltr - send a flow director filter to the hardware
1556 * @pf: PF data structure
1557 * @input: filter structure
1558 * @add: true adds filter and false removed filter
1559 *
1560 * returns 0 on success and negative value on error
1561 */
1562static int
1563ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
1564			bool add)
1565{
1566	u16 port_num;
1567	int tun;
1568
1569	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
1570		bool is_tun = tun == ICE_FD_HW_SEG_TUN;
1571		int err;
1572
1573		if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
1574			continue;
1575		err = ice_fdir_write_fltr(pf, input, add, is_tun);
1576		if (err)
1577			return err;
1578	}
1579	return 0;
1580}
1581
1582/**
1583 * ice_fdir_replay_fltrs - replay filters from the HW filter list
1584 * @pf: board private structure
1585 */
1586void ice_fdir_replay_fltrs(struct ice_pf *pf)
1587{
1588	struct ice_fdir_fltr *f_rule;
1589	struct ice_hw *hw = &pf->hw;
1590
1591	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
1592		int err = ice_fdir_write_all_fltr(pf, f_rule, true);
1593
1594		if (err)
1595			dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
1596				err, f_rule->fltr_id);
1597	}
1598}
1599
1600/**
1601 * ice_fdir_create_dflt_rules - create default perfect filters
1602 * @pf: PF data structure
1603 *
1604 * Returns 0 for success or error.
1605 */
1606int ice_fdir_create_dflt_rules(struct ice_pf *pf)
1607{
1608	int err;
1609
1610	/* Create perfect TCP and UDP rules in hardware. */
1611	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
1612	if (err)
1613		return err;
1614
1615	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
1616	if (err)
1617		return err;
1618
1619	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
1620	if (err)
1621		return err;
1622
1623	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
1624
1625	return err;
1626}
1627
1628/**
1629 * ice_fdir_del_all_fltrs - Delete all flow director filters
1630 * @vsi: the VSI being changed
1631 *
1632 * This function needs to be called while holding hw->fdir_fltr_lock
1633 */
1634void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
1635{
1636	struct ice_fdir_fltr *f_rule, *tmp;
1637	struct ice_pf *pf = vsi->back;
1638	struct ice_hw *hw = &pf->hw;
1639
1640	list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
1641		ice_fdir_write_all_fltr(pf, f_rule, false);
1642		ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
1643		list_del(&f_rule->fltr_node);
1644		devm_kfree(ice_pf_to_dev(pf), f_rule);
1645	}
1646}
1647
1648/**
1649 * ice_vsi_manage_fdir - turn on/off flow director
1650 * @vsi: the VSI being changed
1651 * @ena: boolean value indicating if this is an enable or disable request
1652 */
1653void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
1654{
1655	struct ice_pf *pf = vsi->back;
1656	struct ice_hw *hw = &pf->hw;
1657	enum ice_fltr_ptype flow;
1658
1659	if (ena) {
1660		set_bit(ICE_FLAG_FD_ENA, pf->flags);
1661		ice_fdir_create_dflt_rules(pf);
1662		return;
1663	}
1664
1665	mutex_lock(&hw->fdir_fltr_lock);
1666	if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
1667		goto release_lock;
1668
1669	ice_fdir_del_all_fltrs(vsi);
1670
1671	if (hw->fdir_prof)
1672		for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
1673		     flow++)
1674			if (hw->fdir_prof[flow])
1675				ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
1676
1677release_lock:
1678	mutex_unlock(&hw->fdir_fltr_lock);
1679}
1680
1681/**
1682 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
1683 * @pf: PF structure
1684 * @flow_type: FDir flow type to release
1685 */
1686static void
1687ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
1688{
1689	struct ice_hw *hw = &pf->hw;
1690	bool need_perfect = false;
1691
1692	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
1693	    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
1694	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
1695	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1696		need_perfect = true;
1697
1698	if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
1699		return;
1700
1701	ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
1702	if (need_perfect)
1703		ice_create_init_fdir_rule(pf, flow_type);
1704}
1705
1706/**
1707 * ice_fdir_update_list_entry - add or delete a filter from the filter list
1708 * @pf: PF structure
1709 * @input: filter structure
1710 * @fltr_idx: ethtool index of filter to modify
1711 *
1712 * returns 0 on success and negative on errors
1713 */
1714static int
1715ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
1716			   int fltr_idx)
1717{
1718	struct ice_fdir_fltr *old_fltr;
1719	struct ice_hw *hw = &pf->hw;
1720	struct ice_vsi *vsi;
1721	int err = -ENOENT;
1722
1723	/* Do not update filters during reset */
1724	if (ice_is_reset_in_progress(pf->state))
1725		return -EBUSY;
1726
1727	vsi = ice_get_main_vsi(pf);
1728	if (!vsi)
1729		return -EINVAL;
1730
1731	old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
1732	if (old_fltr) {
1733		err = ice_fdir_write_all_fltr(pf, old_fltr, false);
1734		if (err)
1735			return err;
1736		ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
1737		/* update sb-filters count, specific to ring->channel */
1738		ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
1739		if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
1740			/* we just deleted the last filter of flow_type so we
1741			 * should also delete the HW filter info.
1742			 */
1743			ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
1744		list_del(&old_fltr->fltr_node);
1745		devm_kfree(ice_hw_to_dev(hw), old_fltr);
1746	}
1747	if (!input)
1748		return err;
1749	ice_fdir_list_add_fltr(hw, input);
1750	/* update sb-filters count, specific to ring->channel */
1751	ice_update_per_q_fltr(vsi, input->orig_q_index, true);
1752	ice_fdir_update_cntrs(hw, input->flow_type, true);
1753	return 0;
1754}
1755
1756/**
1757 * ice_del_fdir_ethtool - delete Flow Director filter
1758 * @vsi: pointer to target VSI
1759 * @cmd: command to add or delete Flow Director filter
1760 *
1761 * Returns 0 on success and negative values for failure
1762 */
1763int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1764{
1765	struct ethtool_rx_flow_spec *fsp =
1766		(struct ethtool_rx_flow_spec *)&cmd->fs;
1767	struct ice_pf *pf = vsi->back;
1768	struct ice_hw *hw = &pf->hw;
1769	int val;
1770
1771	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1772		return -EOPNOTSUPP;
1773
1774	/* Do not delete filters during reset */
1775	if (ice_is_reset_in_progress(pf->state)) {
1776		dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
1777		return -EBUSY;
1778	}
1779
1780	if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
1781		return -EBUSY;
1782
1783	mutex_lock(&hw->fdir_fltr_lock);
1784	val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
1785	mutex_unlock(&hw->fdir_fltr_lock);
1786
1787	return val;
1788}
1789
1790/**
1791 * ice_update_ring_dest_vsi - update dest ring and dest VSI
1792 * @vsi: pointer to target VSI
1793 * @dest_vsi: ptr to dest VSI index
1794 * @ring: ptr to dest ring
1795 *
1796 * This function updates destination VSI and queue if user specifies
1797 * target queue which falls in channel's (aka ADQ) queue region
1798 */
1799static void
1800ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
1801{
1802	struct ice_channel *ch;
1803
1804	list_for_each_entry(ch, &vsi->ch_list, list) {
1805		if (!ch->ch_vsi)
1806			continue;
1807
1808		/* make sure to locate corresponding channel based on "queue"
1809		 * specified
1810		 */
1811		if ((*ring < ch->base_q) ||
1812		    (*ring >= (ch->base_q + ch->num_rxq)))
1813			continue;
1814
1815		/* update the dest_vsi based on channel */
1816		*dest_vsi = ch->ch_vsi->idx;
1817
1818		/* update the "ring" to be correct based on channel */
1819		*ring -= ch->base_q;
1820	}
1821}
1822
1823/**
1824 * ice_set_fdir_input_set - Set the input set for Flow Director
1825 * @vsi: pointer to target VSI
1826 * @fsp: pointer to ethtool Rx flow specification
1827 * @input: filter structure
1828 */
1829static int
1830ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
1831		       struct ice_fdir_fltr *input)
1832{
1833	s16 q_index = ICE_FDIR_NO_QUEUE_IDX;
1834	u16 orig_q_index = 0;
1835	struct ice_pf *pf;
1836	struct ice_hw *hw;
1837	int flow_type;
1838	u16 dest_vsi;
1839	u8 dest_ctl;
1840
1841	if (!vsi || !fsp || !input)
1842		return -EINVAL;
1843
1844	pf = vsi->back;
1845	hw = &pf->hw;
1846
1847	dest_vsi = vsi->idx;
1848	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1849		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1850	} else {
1851		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1852		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1853
1854		if (vf) {
1855			dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
1856			return -EINVAL;
1857		}
1858
1859		if (ring >= vsi->num_rxq)
1860			return -EINVAL;
1861
1862		orig_q_index = ring;
1863		ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
1864		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1865		q_index = ring;
1866	}
1867
1868	input->fltr_id = fsp->location;
1869	input->q_index = q_index;
1870	flow_type = fsp->flow_type & ~FLOW_EXT;
1871
1872	/* Record the original queue index as specified by user.
1873	 * with channel configuration 'q_index' becomes relative
1874	 * to TC (channel).
1875	 */
1876	input->orig_q_index = orig_q_index;
1877	input->dest_vsi = dest_vsi;
1878	input->dest_ctl = dest_ctl;
1879	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
1880	input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
1881	input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
1882
1883	if (fsp->flow_type & FLOW_EXT) {
1884		memcpy(input->ext_data.usr_def, fsp->h_ext.data,
1885		       sizeof(input->ext_data.usr_def));
1886		input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
1887		input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
1888		memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
1889		       sizeof(input->ext_mask.usr_def));
1890		input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
1891		input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
1892	}
1893
1894	switch (flow_type) {
1895	case TCP_V4_FLOW:
1896	case UDP_V4_FLOW:
1897	case SCTP_V4_FLOW:
1898		input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1899		input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1900		input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1901		input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1902		input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1903		input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1904		input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1905		input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1906		break;
1907	case IPV4_USER_FLOW:
1908		input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1909		input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1910		input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1911		input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
1912		input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
1913		input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
1914		input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1915		input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1916		input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1917		input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
1918		input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
1919		input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
1920		break;
1921	case TCP_V6_FLOW:
1922	case UDP_V6_FLOW:
1923	case SCTP_V6_FLOW:
1924		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1925		       sizeof(struct in6_addr));
1926		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1927		       sizeof(struct in6_addr));
1928		input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1929		input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1930		input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
1931		memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
1932		       sizeof(struct in6_addr));
1933		memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
1934		       sizeof(struct in6_addr));
1935		input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1936		input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1937		input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
1938		break;
1939	case IPV6_USER_FLOW:
1940		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1941		       sizeof(struct in6_addr));
1942		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1943		       sizeof(struct in6_addr));
1944		input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1945		input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
1946
1947		/* if no protocol requested, use IPPROTO_NONE */
1948		if (!fsp->m_u.usr_ip6_spec.l4_proto)
1949			input->ip.v6.proto = IPPROTO_NONE;
1950		else
1951			input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1952
1953		memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1954		       sizeof(struct in6_addr));
1955		memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1956		       sizeof(struct in6_addr));
1957		input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1958		input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
1959		input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1960		break;
1961	case ETHER_FLOW:
1962		input->eth = fsp->h_u.ether_spec;
1963		input->eth_mask = fsp->m_u.ether_spec;
1964		break;
1965	default:
1966		/* not doing un-parsed flow types */
1967		return -EINVAL;
1968	}
1969
1970	return 0;
1971}
1972
1973/**
1974 * ice_add_fdir_ethtool - Add/Remove Flow Director filter
1975 * @vsi: pointer to target VSI
1976 * @cmd: command to add or delete Flow Director filter
1977 *
1978 * Returns 0 on success and negative values for failure
1979 */
1980int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1981{
1982	struct ice_rx_flow_userdef userdata;
1983	struct ethtool_rx_flow_spec *fsp;
1984	struct ice_fdir_fltr *input;
1985	struct device *dev;
1986	struct ice_pf *pf;
1987	struct ice_hw *hw;
1988	int fltrs_needed;
1989	u32 max_location;
1990	u16 tunnel_port;
1991	int ret;
1992
1993	if (!vsi)
1994		return -EINVAL;
1995
1996	pf = vsi->back;
1997	hw = &pf->hw;
1998	dev = ice_pf_to_dev(pf);
1999
2000	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
2001		return -EOPNOTSUPP;
2002
2003	/* Do not program filters during reset */
2004	if (ice_is_reset_in_progress(pf->state)) {
2005		dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
2006		return -EBUSY;
2007	}
2008
2009	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2010
2011	if (ice_parse_rx_flow_user_data(fsp, &userdata))
2012		return -EINVAL;
2013
2014	if (fsp->flow_type & FLOW_MAC_EXT)
2015		return -EINVAL;
2016
2017	ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
2018	if (ret)
2019		return ret;
2020
2021	max_location = ice_get_fdir_cnt_all(hw);
2022	if (fsp->location >= max_location) {
2023		dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
2024			max_location);
2025		return -ENOSPC;
2026	}
2027
2028	/* return error if not an update and no available filters */
2029	fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
2030	if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
2031	    ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
2032		dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
2033		return -ENOSPC;
2034	}
2035
2036	input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
2037	if (!input)
2038		return -ENOMEM;
2039
2040	ret = ice_set_fdir_input_set(vsi, fsp, input);
2041	if (ret)
2042		goto free_input;
2043
2044	mutex_lock(&hw->fdir_fltr_lock);
2045	if (ice_fdir_is_dup_fltr(hw, input)) {
2046		ret = -EINVAL;
2047		goto release_lock;
2048	}
2049
2050	if (userdata.flex_fltr) {
2051		input->flex_fltr = true;
2052		input->flex_word = cpu_to_be16(userdata.flex_word);
2053		input->flex_offset = userdata.flex_offset;
2054	}
2055
2056	input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
2057	input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
2058	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
2059
2060	/* input struct is added to the HW filter list */
2061	ret = ice_fdir_update_list_entry(pf, input, fsp->location);
2062	if (ret)
2063		goto release_lock;
2064
2065	ret = ice_fdir_write_all_fltr(pf, input, true);
2066	if (ret)
2067		goto remove_sw_rule;
2068
2069	goto release_lock;
2070
2071remove_sw_rule:
2072	ice_fdir_update_cntrs(hw, input->flow_type, false);
2073	/* update sb-filters count, specific to ring->channel */
2074	ice_update_per_q_fltr(vsi, input->orig_q_index, false);
2075	list_del(&input->fltr_node);
2076release_lock:
2077	mutex_unlock(&hw->fdir_fltr_lock);
2078free_input:
2079	if (ret)
2080		devm_kfree(dev, input);
2081
2082	return ret;
2083}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2018-2020, Intel Corporation. */
   3
   4/* flow director ethtool support for ice */
   5
   6#include "ice.h"
   7#include "ice_lib.h"
   8#include "ice_fdir.h"
   9#include "ice_flow.h"
  10
  11static struct in6_addr full_ipv6_addr_mask = {
  12	.in6_u = {
  13		.u6_addr8 = {
  14			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  15			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  16		}
  17	}
  18};
  19
  20static struct in6_addr zero_ipv6_addr_mask = {
  21	.in6_u = {
  22		.u6_addr8 = {
  23			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  24			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  25		}
  26	}
  27};
  28
  29/* calls to ice_flow_add_prof require the number of segments in the array
  30 * for segs_cnt. In this code that is one more than the index.
  31 */
  32#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
  33
  34/**
  35 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
  36 * flow type values
  37 * @flow: filter type to be converted
  38 *
  39 * Returns the corresponding ethtool flow type.
  40 */
  41static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
  42{
  43	switch (flow) {
 
 
  44	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
  45		return TCP_V4_FLOW;
  46	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
  47		return UDP_V4_FLOW;
  48	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
  49		return SCTP_V4_FLOW;
  50	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
  51		return IPV4_USER_FLOW;
  52	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
  53		return TCP_V6_FLOW;
  54	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
  55		return UDP_V6_FLOW;
  56	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
  57		return SCTP_V6_FLOW;
  58	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
  59		return IPV6_USER_FLOW;
  60	default:
  61		/* 0 is undefined ethtool flow */
  62		return 0;
  63	}
  64}
  65
  66/**
  67 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
  68 * @eth: Ethtool flow type to be converted
  69 *
  70 * Returns flow enum
  71 */
  72static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
  73{
  74	switch (eth) {
 
 
  75	case TCP_V4_FLOW:
  76		return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
  77	case UDP_V4_FLOW:
  78		return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
  79	case SCTP_V4_FLOW:
  80		return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
  81	case IPV4_USER_FLOW:
  82		return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
  83	case TCP_V6_FLOW:
  84		return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
  85	case UDP_V6_FLOW:
  86		return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
  87	case SCTP_V6_FLOW:
  88		return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
  89	case IPV6_USER_FLOW:
  90		return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
  91	default:
  92		return ICE_FLTR_PTYPE_NONF_NONE;
  93	}
  94}
  95
  96/**
  97 * ice_is_mask_valid - check mask field set
  98 * @mask: full mask to check
  99 * @field: field for which mask should be valid
 100 *
 101 * If the mask is fully set return true. If it is not valid for field return
 102 * false.
 103 */
 104static bool ice_is_mask_valid(u64 mask, u64 field)
 105{
 106	return (mask & field) == field;
 107}
 108
 109/**
 110 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
 111 * @hw: hardware structure that contains filter list
 112 * @cmd: ethtool command data structure to receive the filter data
 113 *
 114 * Returns 0 on success and -EINVAL on failure
 115 */
 116int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
 117{
 118	struct ethtool_rx_flow_spec *fsp;
 119	struct ice_fdir_fltr *rule;
 120	int ret = 0;
 121	u16 idx;
 122
 123	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
 124
 125	mutex_lock(&hw->fdir_fltr_lock);
 126
 127	rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
 128
 129	if (!rule || fsp->location != rule->fltr_id) {
 130		ret = -EINVAL;
 131		goto release_lock;
 132	}
 133
 134	fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
 135
 136	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
 137	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
 138
 139	switch (fsp->flow_type) {
 
 
 
 
 140	case IPV4_USER_FLOW:
 141		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 142		fsp->h_u.usr_ip4_spec.proto = 0;
 143		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
 144		fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
 145		fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
 146		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 147		fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
 148		fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 149		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
 150		fsp->m_u.usr_ip4_spec.proto = 0;
 151		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
 152		fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
 153		break;
 154	case TCP_V4_FLOW:
 155	case UDP_V4_FLOW:
 156	case SCTP_V4_FLOW:
 157		fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
 158		fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
 159		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
 160		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 161		fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
 162		fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
 163		fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
 164		fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 165		break;
 166	case IPV6_USER_FLOW:
 167		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
 168		fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
 169		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
 170		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 171		       sizeof(struct in6_addr));
 172		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 173		       sizeof(struct in6_addr));
 174		memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
 175		       sizeof(struct in6_addr));
 176		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
 177		       sizeof(struct in6_addr));
 178		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
 179		fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
 180		fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
 181		break;
 182	case TCP_V6_FLOW:
 183	case UDP_V6_FLOW:
 184	case SCTP_V6_FLOW:
 185		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 186		       sizeof(struct in6_addr));
 187		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 188		       sizeof(struct in6_addr));
 189		fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
 190		fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
 191		memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
 192		       rule->mask.v6.src_ip,
 193		       sizeof(struct in6_addr));
 194		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
 195		       rule->mask.v6.dst_ip,
 196		       sizeof(struct in6_addr));
 197		fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
 198		fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
 199		fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
 200		fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
 201		break;
 202	default:
 203		break;
 204	}
 205
 206	if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
 207		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 208	else
 209		fsp->ring_cookie = rule->orig_q_index;
 210
 211	idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
 212	if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
 213		dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
 214			rule->flow_type);
 215		ret = -EINVAL;
 216	}
 217
 218release_lock:
 219	mutex_unlock(&hw->fdir_fltr_lock);
 220	return ret;
 221}
 222
 223/**
 224 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
 225 * @hw: hardware structure containing the filter list
 226 * @cmd: ethtool command data structure
 227 * @rule_locs: ethtool array passed in from OS to receive filter IDs
 228 *
 229 * Returns 0 as expected for success by ethtool
 230 */
 231int
 232ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
 233		      u32 *rule_locs)
 234{
 235	struct ice_fdir_fltr *f_rule;
 236	unsigned int cnt = 0;
 237	int val = 0;
 238
 239	/* report total rule count */
 240	cmd->data = ice_get_fdir_cnt_all(hw);
 241
 242	mutex_lock(&hw->fdir_fltr_lock);
 243
 244	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
 245		if (cnt == cmd->rule_cnt) {
 246			val = -EMSGSIZE;
 247			goto release_lock;
 248		}
 249		rule_locs[cnt] = f_rule->fltr_id;
 250		cnt++;
 251	}
 252
 253release_lock:
 254	mutex_unlock(&hw->fdir_fltr_lock);
 255	if (!val)
 256		cmd->rule_cnt = cnt;
 257	return val;
 258}
 259
 260/**
 261 * ice_fdir_remap_entries - update the FDir entries in profile
 262 * @prof: FDir structure pointer
 263 * @tun: tunneled or non-tunneled packet
 264 * @idx: FDir entry index
 265 */
 266static void
 267ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
 268{
 269	if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
 270		int i;
 271
 272		for (i = idx; i < (prof->cnt - 1); i++) {
 273			u64 old_entry_h;
 274
 275			old_entry_h = prof->entry_h[i + 1][tun];
 276			prof->entry_h[i][tun] = old_entry_h;
 277			prof->vsi_h[i] = prof->vsi_h[i + 1];
 278		}
 279
 280		prof->entry_h[i][tun] = 0;
 281		prof->vsi_h[i] = 0;
 282	}
 283}
 284
 285/**
 286 * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
 287 * @hw: hardware structure containing filter list
 288 * @vsi_idx: VSI handle
 289 */
 290void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
 291{
 292	int status, flow;
 293
 294	if (!hw->fdir_prof)
 295		return;
 296
 297	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
 298		struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
 299		int tun, i;
 300
 301		if (!prof || !prof->cnt)
 302			continue;
 303
 304		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 305			u64 prof_id;
 306
 307			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
 308
 309			for (i = 0; i < prof->cnt; i++) {
 310				if (prof->vsi_h[i] != vsi_idx)
 311					continue;
 312
 313				prof->entry_h[i][tun] = 0;
 314				prof->vsi_h[i] = 0;
 315				break;
 316			}
 317
 318			/* after clearing FDir entries update the remaining */
 319			ice_fdir_remap_entries(prof, tun, i);
 320
 321			/* find flow profile corresponding to prof_id and clear
 322			 * vsi_idx from bitmap.
 323			 */
 324			status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
 325			if (status) {
 326				dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
 327					status);
 328			}
 329		}
 330		prof->cnt--;
 331	}
 332}
 333
 334/**
 335 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
 336 * @hw: hardware structure containing the filter list
 337 * @blk: hardware block
 338 * @flow: FDir flow type to release
 339 */
 340static struct ice_fd_hw_prof *
 341ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
 342{
 343	if (blk == ICE_BLK_FD && hw->fdir_prof)
 344		return hw->fdir_prof[flow];
 345
 346	return NULL;
 347}
 348
 349/**
 350 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
 351 * @hw: hardware structure containing the filter list
 352 * @blk: hardware block
 353 * @flow: FDir flow type to release
 354 */
 355static void
 356ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
 357{
 358	struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
 359	int tun;
 360
 361	if (!prof)
 362		return;
 363
 364	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 365		u64 prof_id;
 366		int j;
 367
 368		prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
 369		for (j = 0; j < prof->cnt; j++) {
 370			u16 vsi_num;
 371
 372			if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
 373				continue;
 374			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
 375			ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
 376			ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
 377			prof->entry_h[j][tun] = 0;
 378		}
 379		ice_flow_rem_prof(hw, blk, prof_id);
 380	}
 381}
 382
 383/**
 384 * ice_fdir_rem_flow - release the ice_flow structures for a filter type
 385 * @hw: hardware structure containing the filter list
 386 * @blk: hardware block
 387 * @flow_type: FDir flow type to release
 388 */
 389static void
 390ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
 391		  enum ice_fltr_ptype flow_type)
 392{
 393	int flow = (int)flow_type & ~FLOW_EXT;
 394	struct ice_fd_hw_prof *prof;
 395	int tun, i;
 396
 397	prof = ice_fdir_get_hw_prof(hw, blk, flow);
 398	if (!prof)
 399		return;
 400
 401	ice_fdir_erase_flow_from_hw(hw, blk, flow);
 402	for (i = 0; i < prof->cnt; i++)
 403		prof->vsi_h[i] = 0;
 404	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 405		if (!prof->fdir_seg[tun])
 406			continue;
 407		devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
 408		prof->fdir_seg[tun] = NULL;
 409	}
 410	prof->cnt = 0;
 411}
 412
 413/**
 414 * ice_fdir_release_flows - release all flows in use for later replay
 415 * @hw: pointer to HW instance
 416 */
 417void ice_fdir_release_flows(struct ice_hw *hw)
 418{
 419	int flow;
 420
 421	/* release Flow Director HW table entries */
 422	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
 423		ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
 424}
 425
 426/**
 427 * ice_fdir_replay_flows - replay HW Flow Director filter info
 428 * @hw: pointer to HW instance
 429 */
 430void ice_fdir_replay_flows(struct ice_hw *hw)
 431{
 432	int flow;
 433
 434	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
 435		int tun;
 436
 437		if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
 438			continue;
 439		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 440			struct ice_flow_prof *hw_prof;
 441			struct ice_fd_hw_prof *prof;
 442			u64 prof_id;
 443			int j;
 444
 445			prof = hw->fdir_prof[flow];
 446			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
 447			ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
 448					  prof->fdir_seg[tun], TNL_SEG_CNT(tun),
 449					  &hw_prof);
 450			for (j = 0; j < prof->cnt; j++) {
 451				enum ice_flow_priority prio;
 452				u64 entry_h = 0;
 453				int err;
 454
 455				prio = ICE_FLOW_PRIO_NORMAL;
 456				err = ice_flow_add_entry(hw, ICE_BLK_FD,
 457							 prof_id,
 458							 prof->vsi_h[0],
 459							 prof->vsi_h[j],
 460							 prio, prof->fdir_seg,
 461							 &entry_h);
 462				if (err) {
 463					dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
 464						flow);
 465					continue;
 466				}
 
 467				prof->entry_h[j][tun] = entry_h;
 468			}
 469		}
 470	}
 471}
 472
 473/**
 474 * ice_parse_rx_flow_user_data - deconstruct user-defined data
 475 * @fsp: pointer to ethtool Rx flow specification
 476 * @data: pointer to userdef data structure for storage
 477 *
 478 * Returns 0 on success, negative error value on failure
 479 */
 480static int
 481ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 482			    struct ice_rx_flow_userdef *data)
 483{
 484	u64 value, mask;
 485
 486	memset(data, 0, sizeof(*data));
 487	if (!(fsp->flow_type & FLOW_EXT))
 488		return 0;
 489
 490	value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
 491	mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
 492	if (!mask)
 493		return 0;
 494
 495#define ICE_USERDEF_FLEX_WORD_M	GENMASK_ULL(15, 0)
 496#define ICE_USERDEF_FLEX_OFFS_S	16
 497#define ICE_USERDEF_FLEX_OFFS_M	GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
 498#define ICE_USERDEF_FLEX_FLTR_M	GENMASK_ULL(31, 0)
 499
 500	/* 0x1fe is the maximum value for offsets stored in the internal
 501	 * filtering tables.
 502	 */
 503#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
 504
 505	if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
 506	    value > ICE_USERDEF_FLEX_FLTR_M)
 507		return -EINVAL;
 508
 509	data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
 510	data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
 511			     ICE_USERDEF_FLEX_OFFS_S;
 512	if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
 513		return -EINVAL;
 514
 515	data->flex_fltr = true;
 516
 517	return 0;
 518}
 519
 520/**
 521 * ice_fdir_num_avail_fltr - return the number of unused flow director filters
 522 * @hw: pointer to hardware structure
 523 * @vsi: software VSI structure
 524 *
 525 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
 526 * use filters from either pool. The guaranteed pool is divided between VSIs.
 527 * The best effort filter pool is common to all VSIs and is a device shared
 528 * resource pool. The number of filters available to this VSI is the sum of
 529 * the VSIs guaranteed filter pool and the global available best effort
 530 * filter pool.
 531 *
 532 * Returns the number of available flow director filters to this VSI
 533 */
 534static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
 535{
 536	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
 537	u16 num_guar;
 538	u16 num_be;
 539
 540	/* total guaranteed filters assigned to this VSI */
 541	num_guar = vsi->num_gfltr;
 542
 543	/* minus the guaranteed filters programed by this VSI */
 544	num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
 545		     VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
 546
 547	/* total global best effort filters */
 548	num_be = hw->func_caps.fd_fltr_best_effort;
 549
 550	/* minus the global best effort filters programmed */
 551	num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
 552		   GLQF_FD_CNT_FD_BCNT_S;
 
 
 
 
 
 
 
 
 
 
 
 
 553
 554	return num_guar + num_be;
 555}
 556
 557/**
 558 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
 559 * @hw: HW structure containing the FDir flow profile structure(s)
 560 * @flow: flow type to allocate the flow profile for
 561 *
 562 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
 563 * on success and negative on error.
 564 */
 565static int
 566ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
 567{
 568	if (!hw)
 569		return -EINVAL;
 570
 571	if (!hw->fdir_prof) {
 572		hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
 573					     ICE_FLTR_PTYPE_MAX,
 574					     sizeof(*hw->fdir_prof),
 575					     GFP_KERNEL);
 576		if (!hw->fdir_prof)
 577			return -ENOMEM;
 578	}
 579
 580	if (!hw->fdir_prof[flow]) {
 581		hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
 582						   sizeof(**hw->fdir_prof),
 583						   GFP_KERNEL);
 584		if (!hw->fdir_prof[flow])
 585			return -ENOMEM;
 586	}
 587
 588	return 0;
 589}
 590
 591/**
 592 * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
 593 * @prof: pointer to flow director HW profile
 594 * @vsi_idx: vsi_idx to locate
 595 *
 596 * return the index of the vsi_idx. if vsi_idx is not found insert it
 597 * into the vsi_h table.
 598 */
 599static u16
 600ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
 601{
 602	u16 idx = 0;
 603
 604	for (idx = 0; idx < prof->cnt; idx++)
 605		if (prof->vsi_h[idx] == vsi_idx)
 606			return idx;
 607
 608	if (idx == prof->cnt)
 609		prof->vsi_h[prof->cnt++] = vsi_idx;
 610	return idx;
 611}
 612
 613/**
 614 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
 615 * @pf: pointer to the PF structure
 616 * @seg: protocol header description pointer
 617 * @flow: filter enum
 618 * @tun: FDir segment to program
 619 */
 620static int
 621ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
 622			  enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
 623{
 624	struct device *dev = ice_pf_to_dev(pf);
 625	struct ice_vsi *main_vsi, *ctrl_vsi;
 626	struct ice_flow_seg_info *old_seg;
 627	struct ice_flow_prof *prof = NULL;
 628	struct ice_fd_hw_prof *hw_prof;
 629	struct ice_hw *hw = &pf->hw;
 630	u64 entry1_h = 0;
 631	u64 entry2_h = 0;
 632	bool del_last;
 633	u64 prof_id;
 634	int err;
 635	int idx;
 636
 637	main_vsi = ice_get_main_vsi(pf);
 638	if (!main_vsi)
 639		return -EINVAL;
 640
 641	ctrl_vsi = ice_get_ctrl_vsi(pf);
 642	if (!ctrl_vsi)
 643		return -EINVAL;
 644
 645	err = ice_fdir_alloc_flow_prof(hw, flow);
 646	if (err)
 647		return err;
 648
 649	hw_prof = hw->fdir_prof[flow];
 650	old_seg = hw_prof->fdir_seg[tun];
 651	if (old_seg) {
 652		/* This flow_type already has a changed input set.
 653		 * If it matches the requested input set then we are
 654		 * done. Or, if it's different then it's an error.
 655		 */
 656		if (!memcmp(old_seg, seg, sizeof(*seg)))
 657			return -EEXIST;
 658
 659		/* if there are FDir filters using this flow,
 660		 * then return error.
 661		 */
 662		if (hw->fdir_fltr_cnt[flow]) {
 663			dev_err(dev, "Failed to add filter.  Flow director filters on each port must have the same input set.\n");
 664			return -EINVAL;
 665		}
 666
 667		if (ice_is_arfs_using_perfect_flow(hw, flow)) {
 668			dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
 669				flow);
 670			return -EINVAL;
 671		}
 672
 673		/* remove HW filter definition */
 674		ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
 675	}
 676
 677	/* Adding a profile, but there is only one header supported.
 678	 * That is the final parameters are 1 header (segment), no
 679	 * actions (NULL) and zero actions 0.
 680	 */
 681	prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
 682	err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
 683				TNL_SEG_CNT(tun), &prof);
 684	if (err)
 685		return err;
 686	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
 687				 main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 688				 seg, &entry1_h);
 689	if (err)
 690		goto err_prof;
 691	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
 692				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 693				 seg, &entry2_h);
 694	if (err)
 695		goto err_entry;
 696
 697	hw_prof->fdir_seg[tun] = seg;
 
 698	hw_prof->entry_h[0][tun] = entry1_h;
 699	hw_prof->entry_h[1][tun] = entry2_h;
 700	hw_prof->vsi_h[0] = main_vsi->idx;
 701	hw_prof->vsi_h[1] = ctrl_vsi->idx;
 702	if (!hw_prof->cnt)
 703		hw_prof->cnt = 2;
 704
 705	for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
 706		u16 vsi_idx;
 707		u16 vsi_h;
 708
 709		if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
 710			continue;
 711
 712		entry1_h = 0;
 713		vsi_h = main_vsi->tc_map_vsi[idx]->idx;
 714		err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
 715					 main_vsi->idx, vsi_h,
 716					 ICE_FLOW_PRIO_NORMAL, seg,
 717					 &entry1_h);
 718		if (err) {
 719			dev_err(dev, "Could not add Channel VSI %d to flow group\n",
 720				idx);
 721			goto err_unroll;
 722		}
 723
 724		vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
 725						main_vsi->tc_map_vsi[idx]->idx);
 726		hw_prof->entry_h[vsi_idx][tun] = entry1_h;
 727	}
 728
 729	return 0;
 730
 731err_unroll:
 732	entry1_h = 0;
 733	hw_prof->fdir_seg[tun] = NULL;
 734
 735	/* The variable del_last will be used to determine when to clean up
 736	 * the VSI group data. The VSI data is not needed if there are no
 737	 * segments.
 738	 */
 739	del_last = true;
 740	for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
 741		if (hw_prof->fdir_seg[idx]) {
 742			del_last = false;
 743			break;
 744		}
 745
 746	for (idx = 0; idx < hw_prof->cnt; idx++) {
 747		u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
 748
 749		if (!hw_prof->entry_h[idx][tun])
 750			continue;
 751		ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
 752		ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
 753		hw_prof->entry_h[idx][tun] = 0;
 754		if (del_last)
 755			hw_prof->vsi_h[idx] = 0;
 756	}
 757	if (del_last)
 758		hw_prof->cnt = 0;
 759err_entry:
 760	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 761			     ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
 762	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 763err_prof:
 764	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 765	dev_err(dev, "Failed to add filter.  Flow director filters on each port must have the same input set.\n");
 766
 767	return err;
 768}
 769
 770/**
 771 * ice_set_init_fdir_seg
 772 * @seg: flow segment for programming
 773 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
 774 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
 775 *
 776 * Set the configuration for perfect filters to the provided flow segment for
 777 * programming the HW filter. This is to be called only when initializing
 778 * filters as this function it assumes no filters exist.
 779 */
 780static int
 781ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
 782		      enum ice_flow_seg_hdr l3_proto,
 783		      enum ice_flow_seg_hdr l4_proto)
 784{
 785	enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
 786
 787	if (!seg)
 788		return -EINVAL;
 789
 790	if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
 791		src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
 792		dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
 793	} else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
 794		src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
 795		dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
 796	} else {
 797		return -EINVAL;
 798	}
 799
 800	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 801		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 802		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 803	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 804		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 805		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 806	} else {
 807		return -EINVAL;
 808	}
 809
 810	ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
 811
 812	/* IP source address */
 813	ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
 814			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 815
 816	/* IP destination address */
 817	ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
 818			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 819
 820	/* Layer 4 source port */
 821	ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 822			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 823
 824	/* Layer 4 destination port */
 825	ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 826			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 827
 828	return 0;
 829}
 830
 831/**
 832 * ice_create_init_fdir_rule
 833 * @pf: PF structure
 834 * @flow: filter enum
 835 *
 836 * Return error value or 0 on success.
 837 */
 838static int
 839ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
 840{
 841	struct ice_flow_seg_info *seg, *tun_seg;
 842	struct device *dev = ice_pf_to_dev(pf);
 843	struct ice_hw *hw = &pf->hw;
 844	int ret;
 845
 846	/* if there is already a filter rule for kind return -EINVAL */
 847	if (hw->fdir_prof && hw->fdir_prof[flow] &&
 848	    hw->fdir_prof[flow]->fdir_seg[0])
 849		return -EINVAL;
 850
 851	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 852	if (!seg)
 853		return -ENOMEM;
 854
 855	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
 856			       GFP_KERNEL);
 857	if (!tun_seg) {
 858		devm_kfree(dev, seg);
 859		return -ENOMEM;
 860	}
 861
 862	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
 863		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 864					    ICE_FLOW_SEG_HDR_TCP);
 865	else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
 866		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 867					    ICE_FLOW_SEG_HDR_UDP);
 868	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
 869		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 870					    ICE_FLOW_SEG_HDR_TCP);
 871	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
 872		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 873					    ICE_FLOW_SEG_HDR_UDP);
 874	else
 875		ret = -EINVAL;
 876	if (ret)
 877		goto err_exit;
 878
 879	/* add filter for outer headers */
 880	ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
 881	if (ret)
 882		/* could not write filter, free memory */
 883		goto err_exit;
 884
 885	/* make tunneled filter HW entries if possible */
 886	memcpy(&tun_seg[1], seg, sizeof(*seg));
 887	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
 888	if (ret)
 889		/* could not write tunnel filter, but outer header filter
 890		 * exists
 891		 */
 892		devm_kfree(dev, tun_seg);
 893
 894	set_bit(flow, hw->fdir_perfect_fltr);
 895	return ret;
 896err_exit:
 897	devm_kfree(dev, tun_seg);
 898	devm_kfree(dev, seg);
 899
 900	return -EOPNOTSUPP;
 901}
 902
 903/**
 904 * ice_set_fdir_ip4_seg
 905 * @seg: flow segment for programming
 906 * @tcp_ip4_spec: mask data from ethtool
 907 * @l4_proto: Layer 4 protocol to program
 908 * @perfect_fltr: only valid on success; returns true if perfect filter,
 909 *		  false if not
 910 *
 911 * Set the mask data into the flow segment to be used to program HW
 912 * table based on provided L4 protocol for IPv4
 913 */
 914static int
 915ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
 916		     struct ethtool_tcpip4_spec *tcp_ip4_spec,
 917		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
 918{
 919	enum ice_flow_field src_port, dst_port;
 920
 921	/* make sure we don't have any empty rule */
 922	if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
 923	    !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
 924		return -EINVAL;
 925
 926	/* filtering on TOS not supported */
 927	if (tcp_ip4_spec->tos)
 928		return -EOPNOTSUPP;
 929
 930	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 931		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 932		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 933	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 934		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 935		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 936	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
 937		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
 938		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
 939	} else {
 940		return -EOPNOTSUPP;
 941	}
 942
 943	*perfect_fltr = true;
 944	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
 945
 946	/* IP source address */
 947	if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
 948		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
 949				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 950				 ICE_FLOW_FLD_OFF_INVAL, false);
 951	else if (!tcp_ip4_spec->ip4src)
 952		*perfect_fltr = false;
 953	else
 954		return -EOPNOTSUPP;
 955
 956	/* IP destination address */
 957	if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
 958		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
 959				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 960				 ICE_FLOW_FLD_OFF_INVAL, false);
 961	else if (!tcp_ip4_spec->ip4dst)
 962		*perfect_fltr = false;
 963	else
 964		return -EOPNOTSUPP;
 965
 966	/* Layer 4 source port */
 967	if (tcp_ip4_spec->psrc == htons(0xFFFF))
 968		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 969				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 970				 false);
 971	else if (!tcp_ip4_spec->psrc)
 972		*perfect_fltr = false;
 973	else
 974		return -EOPNOTSUPP;
 975
 976	/* Layer 4 destination port */
 977	if (tcp_ip4_spec->pdst == htons(0xFFFF))
 978		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 979				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 980				 false);
 981	else if (!tcp_ip4_spec->pdst)
 982		*perfect_fltr = false;
 983	else
 984		return -EOPNOTSUPP;
 985
 986	return 0;
 987}
 988
 989/**
 990 * ice_set_fdir_ip4_usr_seg
 991 * @seg: flow segment for programming
 992 * @usr_ip4_spec: ethtool userdef packet offset
 993 * @perfect_fltr: only valid on success; returns true if perfect filter,
 994 *		  false if not
 995 *
 996 * Set the offset data into the flow segment to be used to program HW
 997 * table for IPv4
 998 */
 999static int
1000ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
1001			 struct ethtool_usrip4_spec *usr_ip4_spec,
1002			 bool *perfect_fltr)
1003{
1004	/* first 4 bytes of Layer 4 header */
1005	if (usr_ip4_spec->l4_4_bytes)
1006		return -EINVAL;
1007	if (usr_ip4_spec->tos)
1008		return -EINVAL;
1009	if (usr_ip4_spec->ip_ver)
1010		return -EINVAL;
1011	/* Filtering on Layer 4 protocol not supported */
1012	if (usr_ip4_spec->proto)
1013		return -EOPNOTSUPP;
1014	/* empty rules are not valid */
1015	if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
1016		return -EINVAL;
1017
1018	*perfect_fltr = true;
1019	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
1020
1021	/* IP source address */
1022	if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
1023		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
1024				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1025				 ICE_FLOW_FLD_OFF_INVAL, false);
1026	else if (!usr_ip4_spec->ip4src)
1027		*perfect_fltr = false;
1028	else
1029		return -EOPNOTSUPP;
1030
1031	/* IP destination address */
1032	if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
1033		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
1034				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1035				 ICE_FLOW_FLD_OFF_INVAL, false);
1036	else if (!usr_ip4_spec->ip4dst)
1037		*perfect_fltr = false;
1038	else
1039		return -EOPNOTSUPP;
1040
1041	return 0;
1042}
1043
1044/**
1045 * ice_set_fdir_ip6_seg
1046 * @seg: flow segment for programming
1047 * @tcp_ip6_spec: mask data from ethtool
1048 * @l4_proto: Layer 4 protocol to program
1049 * @perfect_fltr: only valid on success; returns true if perfect filter,
1050 *		  false if not
1051 *
1052 * Set the mask data into the flow segment to be used to program HW
1053 * table based on provided L4 protocol for IPv6
1054 */
1055static int
1056ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
1057		     struct ethtool_tcpip6_spec *tcp_ip6_spec,
1058		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
1059{
1060	enum ice_flow_field src_port, dst_port;
1061
1062	/* make sure we don't have any empty rule */
1063	if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1064		    sizeof(struct in6_addr)) &&
1065	    !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1066		    sizeof(struct in6_addr)) &&
1067	    !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
1068		return -EINVAL;
1069
1070	/* filtering on TC not supported */
1071	if (tcp_ip6_spec->tclass)
1072		return -EOPNOTSUPP;
1073
1074	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
1075		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
1076		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
1077	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
1078		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
1079		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
1080	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
1081		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
1082		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
1083	} else {
1084		return -EINVAL;
1085	}
1086
1087	*perfect_fltr = true;
1088	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
1089
1090	if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
1091		    sizeof(struct in6_addr)))
1092		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1093				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1094				 ICE_FLOW_FLD_OFF_INVAL, false);
1095	else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1096			 sizeof(struct in6_addr)))
1097		*perfect_fltr = false;
1098	else
1099		return -EOPNOTSUPP;
1100
1101	if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1102		    sizeof(struct in6_addr)))
1103		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1104				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1105				 ICE_FLOW_FLD_OFF_INVAL, false);
1106	else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1107			 sizeof(struct in6_addr)))
1108		*perfect_fltr = false;
1109	else
1110		return -EOPNOTSUPP;
1111
1112	/* Layer 4 source port */
1113	if (tcp_ip6_spec->psrc == htons(0xFFFF))
1114		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
1115				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1116				 false);
1117	else if (!tcp_ip6_spec->psrc)
1118		*perfect_fltr = false;
1119	else
1120		return -EOPNOTSUPP;
1121
1122	/* Layer 4 destination port */
1123	if (tcp_ip6_spec->pdst == htons(0xFFFF))
1124		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
1125				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1126				 false);
1127	else if (!tcp_ip6_spec->pdst)
1128		*perfect_fltr = false;
1129	else
1130		return -EOPNOTSUPP;
1131
1132	return 0;
1133}
1134
1135/**
1136 * ice_set_fdir_ip6_usr_seg
1137 * @seg: flow segment for programming
1138 * @usr_ip6_spec: ethtool userdef packet offset
1139 * @perfect_fltr: only valid on success; returns true if perfect filter,
1140 *		  false if not
1141 *
1142 * Set the offset data into the flow segment to be used to program HW
1143 * table for IPv6
1144 */
1145static int
1146ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
1147			 struct ethtool_usrip6_spec *usr_ip6_spec,
1148			 bool *perfect_fltr)
1149{
1150	/* filtering on Layer 4 bytes not supported */
1151	if (usr_ip6_spec->l4_4_bytes)
1152		return -EOPNOTSUPP;
1153	/* filtering on TC not supported */
1154	if (usr_ip6_spec->tclass)
1155		return -EOPNOTSUPP;
1156	/* filtering on Layer 4 protocol not supported */
1157	if (usr_ip6_spec->l4_proto)
1158		return -EOPNOTSUPP;
1159	/* empty rules are not valid */
1160	if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1161		    sizeof(struct in6_addr)) &&
1162	    !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1163		    sizeof(struct in6_addr)))
1164		return -EINVAL;
1165
1166	*perfect_fltr = true;
1167	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
1168
1169	if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
1170		    sizeof(struct in6_addr)))
1171		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1172				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1173				 ICE_FLOW_FLD_OFF_INVAL, false);
1174	else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1175			 sizeof(struct in6_addr)))
1176		*perfect_fltr = false;
1177	else
1178		return -EOPNOTSUPP;
1179
1180	if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1181		    sizeof(struct in6_addr)))
1182		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1183				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1184				 ICE_FLOW_FLD_OFF_INVAL, false);
1185	else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1186			 sizeof(struct in6_addr)))
1187		*perfect_fltr = false;
1188	else
1189		return -EOPNOTSUPP;
1190
1191	return 0;
1192}
1193
1194/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1195 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
1196 * @pf: PF structure
1197 * @fsp: pointer to ethtool Rx flow specification
1198 * @user: user defined data from flow specification
1199 *
1200 * Returns 0 on success.
1201 */
1202static int
1203ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
1204		       struct ice_rx_flow_userdef *user)
1205{
1206	struct ice_flow_seg_info *seg, *tun_seg;
1207	struct device *dev = ice_pf_to_dev(pf);
1208	enum ice_fltr_ptype fltr_idx;
1209	struct ice_hw *hw = &pf->hw;
1210	bool perfect_filter;
1211	int ret;
1212
1213	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
1214	if (!seg)
1215		return -ENOMEM;
1216
1217	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
1218			       GFP_KERNEL);
1219	if (!tun_seg) {
1220		devm_kfree(dev, seg);
1221		return -ENOMEM;
1222	}
1223
1224	switch (fsp->flow_type & ~FLOW_EXT) {
1225	case TCP_V4_FLOW:
1226		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1227					   ICE_FLOW_SEG_HDR_TCP,
1228					   &perfect_filter);
1229		break;
1230	case UDP_V4_FLOW:
1231		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1232					   ICE_FLOW_SEG_HDR_UDP,
1233					   &perfect_filter);
1234		break;
1235	case SCTP_V4_FLOW:
1236		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1237					   ICE_FLOW_SEG_HDR_SCTP,
1238					   &perfect_filter);
1239		break;
1240	case IPV4_USER_FLOW:
1241		ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
1242					       &perfect_filter);
1243		break;
1244	case TCP_V6_FLOW:
1245		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1246					   ICE_FLOW_SEG_HDR_TCP,
1247					   &perfect_filter);
1248		break;
1249	case UDP_V6_FLOW:
1250		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1251					   ICE_FLOW_SEG_HDR_UDP,
1252					   &perfect_filter);
1253		break;
1254	case SCTP_V6_FLOW:
1255		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1256					   ICE_FLOW_SEG_HDR_SCTP,
1257					   &perfect_filter);
1258		break;
1259	case IPV6_USER_FLOW:
1260		ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
1261					       &perfect_filter);
1262		break;
 
 
 
 
 
 
 
 
 
 
1263	default:
1264		ret = -EINVAL;
1265	}
1266	if (ret)
1267		goto err_exit;
1268
1269	/* tunnel segments are shifted up one. */
1270	memcpy(&tun_seg[1], seg, sizeof(*seg));
1271
1272	if (user && user->flex_fltr) {
1273		perfect_filter = false;
1274		ice_flow_add_fld_raw(seg, user->flex_offset,
1275				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1276				     ICE_FLOW_FLD_OFF_INVAL,
1277				     ICE_FLOW_FLD_OFF_INVAL);
1278		ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
1279				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1280				     ICE_FLOW_FLD_OFF_INVAL,
1281				     ICE_FLOW_FLD_OFF_INVAL);
1282	}
1283
 
 
 
 
1284	/* add filter for outer headers */
1285	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
1286	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
1287					ICE_FD_HW_SEG_NON_TUN);
1288	if (ret == -EEXIST)
1289		/* Rule already exists, free memory and continue */
1290		devm_kfree(dev, seg);
1291	else if (ret)
 
1292		/* could not write filter, free memory */
1293		goto err_exit;
 
1294
1295	/* make tunneled filter HW entries if possible */
1296	memcpy(&tun_seg[1], seg, sizeof(*seg));
1297	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
1298					ICE_FD_HW_SEG_TUN);
1299	if (ret == -EEXIST) {
1300		/* Rule already exists, free memory and count as success */
1301		devm_kfree(dev, tun_seg);
1302		ret = 0;
1303	} else if (ret) {
1304		/* could not write tunnel filter, but outer filter exists */
1305		devm_kfree(dev, tun_seg);
1306	}
1307
1308	if (perfect_filter)
1309		set_bit(fltr_idx, hw->fdir_perfect_fltr);
1310	else
1311		clear_bit(fltr_idx, hw->fdir_perfect_fltr);
1312
1313	return ret;
1314
1315err_exit:
1316	devm_kfree(dev, tun_seg);
1317	devm_kfree(dev, seg);
1318
1319	return -EOPNOTSUPP;
1320}
1321
1322/**
1323 * ice_update_per_q_fltr
1324 * @vsi: ptr to VSI
1325 * @q_index: queue index
1326 * @inc: true to increment or false to decrement per queue filter count
1327 *
1328 * This function is used to keep track of per queue sideband filters
1329 */
1330static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
1331{
1332	struct ice_rx_ring *rx_ring;
1333
1334	if (!vsi->num_rxq || q_index >= vsi->num_rxq)
1335		return;
1336
1337	rx_ring = vsi->rx_rings[q_index];
1338	if (!rx_ring || !rx_ring->ch)
1339		return;
1340
1341	if (inc)
1342		atomic_inc(&rx_ring->ch->num_sb_fltr);
1343	else
1344		atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
1345}
1346
1347/**
1348 * ice_fdir_write_fltr - send a flow director filter to the hardware
1349 * @pf: PF data structure
1350 * @input: filter structure
1351 * @add: true adds filter and false removed filter
1352 * @is_tun: true adds inner filter on tunnel and false outer headers
1353 *
1354 * returns 0 on success and negative value on error
1355 */
1356int
1357ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
1358		    bool is_tun)
1359{
1360	struct device *dev = ice_pf_to_dev(pf);
1361	struct ice_hw *hw = &pf->hw;
1362	struct ice_fltr_desc desc;
1363	struct ice_vsi *ctrl_vsi;
1364	u8 *pkt, *frag_pkt;
1365	bool has_frag;
1366	int err;
1367
1368	ctrl_vsi = ice_get_ctrl_vsi(pf);
1369	if (!ctrl_vsi)
1370		return -EINVAL;
1371
1372	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1373	if (!pkt)
1374		return -ENOMEM;
1375	frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1376	if (!frag_pkt) {
1377		err = -ENOMEM;
1378		goto err_free;
1379	}
1380
1381	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1382	err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1383	if (err)
1384		goto err_free_all;
1385	err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1386	if (err)
1387		goto err_free_all;
1388
1389	/* repeat for fragment packet */
1390	has_frag = ice_fdir_has_frag(input->flow_type);
1391	if (has_frag) {
1392		/* does not return error */
1393		ice_fdir_get_prgm_desc(hw, input, &desc, add);
1394		err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
1395						is_tun);
1396		if (err)
1397			goto err_frag;
1398		err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
1399		if (err)
1400			goto err_frag;
1401	} else {
1402		devm_kfree(dev, frag_pkt);
1403	}
1404
1405	return 0;
1406
1407err_free_all:
1408	devm_kfree(dev, frag_pkt);
1409err_free:
1410	devm_kfree(dev, pkt);
1411	return err;
1412
1413err_frag:
1414	devm_kfree(dev, frag_pkt);
1415	return err;
1416}
1417
1418/**
1419 * ice_fdir_write_all_fltr - send a flow director filter to the hardware
1420 * @pf: PF data structure
1421 * @input: filter structure
1422 * @add: true adds filter and false removed filter
1423 *
1424 * returns 0 on success and negative value on error
1425 */
1426static int
1427ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
1428			bool add)
1429{
1430	u16 port_num;
1431	int tun;
1432
1433	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
1434		bool is_tun = tun == ICE_FD_HW_SEG_TUN;
1435		int err;
1436
1437		if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
1438			continue;
1439		err = ice_fdir_write_fltr(pf, input, add, is_tun);
1440		if (err)
1441			return err;
1442	}
1443	return 0;
1444}
1445
1446/**
1447 * ice_fdir_replay_fltrs - replay filters from the HW filter list
1448 * @pf: board private structure
1449 */
1450void ice_fdir_replay_fltrs(struct ice_pf *pf)
1451{
1452	struct ice_fdir_fltr *f_rule;
1453	struct ice_hw *hw = &pf->hw;
1454
1455	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
1456		int err = ice_fdir_write_all_fltr(pf, f_rule, true);
1457
1458		if (err)
1459			dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
1460				err, f_rule->fltr_id);
1461	}
1462}
1463
1464/**
1465 * ice_fdir_create_dflt_rules - create default perfect filters
1466 * @pf: PF data structure
1467 *
1468 * Returns 0 for success or error.
1469 */
1470int ice_fdir_create_dflt_rules(struct ice_pf *pf)
1471{
1472	int err;
1473
1474	/* Create perfect TCP and UDP rules in hardware. */
1475	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
1476	if (err)
1477		return err;
1478
1479	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
1480	if (err)
1481		return err;
1482
1483	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
1484	if (err)
1485		return err;
1486
1487	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
1488
1489	return err;
1490}
1491
1492/**
1493 * ice_fdir_del_all_fltrs - Delete all flow director filters
1494 * @vsi: the VSI being changed
1495 *
1496 * This function needs to be called while holding hw->fdir_fltr_lock
1497 */
1498void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
1499{
1500	struct ice_fdir_fltr *f_rule, *tmp;
1501	struct ice_pf *pf = vsi->back;
1502	struct ice_hw *hw = &pf->hw;
1503
1504	list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
1505		ice_fdir_write_all_fltr(pf, f_rule, false);
1506		ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
1507		list_del(&f_rule->fltr_node);
1508		devm_kfree(ice_pf_to_dev(pf), f_rule);
1509	}
1510}
1511
1512/**
1513 * ice_vsi_manage_fdir - turn on/off flow director
1514 * @vsi: the VSI being changed
1515 * @ena: boolean value indicating if this is an enable or disable request
1516 */
1517void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
1518{
1519	struct ice_pf *pf = vsi->back;
1520	struct ice_hw *hw = &pf->hw;
1521	enum ice_fltr_ptype flow;
1522
1523	if (ena) {
1524		set_bit(ICE_FLAG_FD_ENA, pf->flags);
1525		ice_fdir_create_dflt_rules(pf);
1526		return;
1527	}
1528
1529	mutex_lock(&hw->fdir_fltr_lock);
1530	if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
1531		goto release_lock;
1532
1533	ice_fdir_del_all_fltrs(vsi);
1534
1535	if (hw->fdir_prof)
1536		for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
1537		     flow++)
1538			if (hw->fdir_prof[flow])
1539				ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
1540
1541release_lock:
1542	mutex_unlock(&hw->fdir_fltr_lock);
1543}
1544
1545/**
1546 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
1547 * @pf: PF structure
1548 * @flow_type: FDir flow type to release
1549 */
1550static void
1551ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
1552{
1553	struct ice_hw *hw = &pf->hw;
1554	bool need_perfect = false;
1555
1556	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
1557	    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
1558	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
1559	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1560		need_perfect = true;
1561
1562	if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
1563		return;
1564
1565	ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
1566	if (need_perfect)
1567		ice_create_init_fdir_rule(pf, flow_type);
1568}
1569
1570/**
1571 * ice_fdir_update_list_entry - add or delete a filter from the filter list
1572 * @pf: PF structure
1573 * @input: filter structure
1574 * @fltr_idx: ethtool index of filter to modify
1575 *
1576 * returns 0 on success and negative on errors
1577 */
1578static int
1579ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
1580			   int fltr_idx)
1581{
1582	struct ice_fdir_fltr *old_fltr;
1583	struct ice_hw *hw = &pf->hw;
1584	struct ice_vsi *vsi;
1585	int err = -ENOENT;
1586
1587	/* Do not update filters during reset */
1588	if (ice_is_reset_in_progress(pf->state))
1589		return -EBUSY;
1590
1591	vsi = ice_get_main_vsi(pf);
1592	if (!vsi)
1593		return -EINVAL;
1594
1595	old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
1596	if (old_fltr) {
1597		err = ice_fdir_write_all_fltr(pf, old_fltr, false);
1598		if (err)
1599			return err;
1600		ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
1601		/* update sb-filters count, specific to ring->channel */
1602		ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
1603		if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
1604			/* we just deleted the last filter of flow_type so we
1605			 * should also delete the HW filter info.
1606			 */
1607			ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
1608		list_del(&old_fltr->fltr_node);
1609		devm_kfree(ice_hw_to_dev(hw), old_fltr);
1610	}
1611	if (!input)
1612		return err;
1613	ice_fdir_list_add_fltr(hw, input);
1614	/* update sb-filters count, specific to ring->channel */
1615	ice_update_per_q_fltr(vsi, input->orig_q_index, true);
1616	ice_fdir_update_cntrs(hw, input->flow_type, true);
1617	return 0;
1618}
1619
1620/**
1621 * ice_del_fdir_ethtool - delete Flow Director filter
1622 * @vsi: pointer to target VSI
1623 * @cmd: command to add or delete Flow Director filter
1624 *
1625 * Returns 0 on success and negative values for failure
1626 */
1627int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1628{
1629	struct ethtool_rx_flow_spec *fsp =
1630		(struct ethtool_rx_flow_spec *)&cmd->fs;
1631	struct ice_pf *pf = vsi->back;
1632	struct ice_hw *hw = &pf->hw;
1633	int val;
1634
1635	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1636		return -EOPNOTSUPP;
1637
1638	/* Do not delete filters during reset */
1639	if (ice_is_reset_in_progress(pf->state)) {
1640		dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
1641		return -EBUSY;
1642	}
1643
1644	if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
1645		return -EBUSY;
1646
1647	mutex_lock(&hw->fdir_fltr_lock);
1648	val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
1649	mutex_unlock(&hw->fdir_fltr_lock);
1650
1651	return val;
1652}
1653
1654/**
1655 * ice_update_ring_dest_vsi - update dest ring and dest VSI
1656 * @vsi: pointer to target VSI
1657 * @dest_vsi: ptr to dest VSI index
1658 * @ring: ptr to dest ring
1659 *
1660 * This function updates destination VSI and queue if user specifies
1661 * target queue which falls in channel's (aka ADQ) queue region
1662 */
1663static void
1664ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
1665{
1666	struct ice_channel *ch;
1667
1668	list_for_each_entry(ch, &vsi->ch_list, list) {
1669		if (!ch->ch_vsi)
1670			continue;
1671
1672		/* make sure to locate corresponding channel based on "queue"
1673		 * specified
1674		 */
1675		if ((*ring < ch->base_q) ||
1676		    (*ring >= (ch->base_q + ch->num_rxq)))
1677			continue;
1678
1679		/* update the dest_vsi based on channel */
1680		*dest_vsi = ch->ch_vsi->idx;
1681
1682		/* update the "ring" to be correct based on channel */
1683		*ring -= ch->base_q;
1684	}
1685}
1686
1687/**
1688 * ice_set_fdir_input_set - Set the input set for Flow Director
1689 * @vsi: pointer to target VSI
1690 * @fsp: pointer to ethtool Rx flow specification
1691 * @input: filter structure
1692 */
1693static int
1694ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
1695		       struct ice_fdir_fltr *input)
1696{
1697	u16 dest_vsi, q_index = 0;
1698	u16 orig_q_index = 0;
1699	struct ice_pf *pf;
1700	struct ice_hw *hw;
1701	int flow_type;
 
1702	u8 dest_ctl;
1703
1704	if (!vsi || !fsp || !input)
1705		return -EINVAL;
1706
1707	pf = vsi->back;
1708	hw = &pf->hw;
1709
1710	dest_vsi = vsi->idx;
1711	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1712		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1713	} else {
1714		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1715		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1716
1717		if (vf) {
1718			dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
1719			return -EINVAL;
1720		}
1721
1722		if (ring >= vsi->num_rxq)
1723			return -EINVAL;
1724
1725		orig_q_index = ring;
1726		ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
1727		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1728		q_index = ring;
1729	}
1730
1731	input->fltr_id = fsp->location;
1732	input->q_index = q_index;
1733	flow_type = fsp->flow_type & ~FLOW_EXT;
1734
1735	/* Record the original queue index as specified by user.
1736	 * with channel configuration 'q_index' becomes relative
1737	 * to TC (channel).
1738	 */
1739	input->orig_q_index = orig_q_index;
1740	input->dest_vsi = dest_vsi;
1741	input->dest_ctl = dest_ctl;
1742	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
1743	input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
1744	input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
1745
1746	if (fsp->flow_type & FLOW_EXT) {
1747		memcpy(input->ext_data.usr_def, fsp->h_ext.data,
1748		       sizeof(input->ext_data.usr_def));
1749		input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
1750		input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
1751		memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
1752		       sizeof(input->ext_mask.usr_def));
1753		input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
1754		input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
1755	}
1756
1757	switch (flow_type) {
1758	case TCP_V4_FLOW:
1759	case UDP_V4_FLOW:
1760	case SCTP_V4_FLOW:
1761		input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1762		input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1763		input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1764		input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1765		input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1766		input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1767		input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1768		input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1769		break;
1770	case IPV4_USER_FLOW:
1771		input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1772		input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1773		input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1774		input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
1775		input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
1776		input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
1777		input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1778		input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1779		input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1780		input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
1781		input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
1782		input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
1783		break;
1784	case TCP_V6_FLOW:
1785	case UDP_V6_FLOW:
1786	case SCTP_V6_FLOW:
1787		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1788		       sizeof(struct in6_addr));
1789		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1790		       sizeof(struct in6_addr));
1791		input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1792		input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1793		input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
1794		memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
1795		       sizeof(struct in6_addr));
1796		memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
1797		       sizeof(struct in6_addr));
1798		input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1799		input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1800		input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
1801		break;
1802	case IPV6_USER_FLOW:
1803		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1804		       sizeof(struct in6_addr));
1805		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1806		       sizeof(struct in6_addr));
1807		input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1808		input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
1809
1810		/* if no protocol requested, use IPPROTO_NONE */
1811		if (!fsp->m_u.usr_ip6_spec.l4_proto)
1812			input->ip.v6.proto = IPPROTO_NONE;
1813		else
1814			input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1815
1816		memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1817		       sizeof(struct in6_addr));
1818		memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1819		       sizeof(struct in6_addr));
1820		input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1821		input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
1822		input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1823		break;
 
 
 
 
1824	default:
1825		/* not doing un-parsed flow types */
1826		return -EINVAL;
1827	}
1828
1829	return 0;
1830}
1831
1832/**
1833 * ice_add_fdir_ethtool - Add/Remove Flow Director filter
1834 * @vsi: pointer to target VSI
1835 * @cmd: command to add or delete Flow Director filter
1836 *
1837 * Returns 0 on success and negative values for failure
1838 */
1839int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1840{
1841	struct ice_rx_flow_userdef userdata;
1842	struct ethtool_rx_flow_spec *fsp;
1843	struct ice_fdir_fltr *input;
1844	struct device *dev;
1845	struct ice_pf *pf;
1846	struct ice_hw *hw;
1847	int fltrs_needed;
 
1848	u16 tunnel_port;
1849	int ret;
1850
1851	if (!vsi)
1852		return -EINVAL;
1853
1854	pf = vsi->back;
1855	hw = &pf->hw;
1856	dev = ice_pf_to_dev(pf);
1857
1858	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1859		return -EOPNOTSUPP;
1860
1861	/* Do not program filters during reset */
1862	if (ice_is_reset_in_progress(pf->state)) {
1863		dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
1864		return -EBUSY;
1865	}
1866
1867	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1868
1869	if (ice_parse_rx_flow_user_data(fsp, &userdata))
1870		return -EINVAL;
1871
1872	if (fsp->flow_type & FLOW_MAC_EXT)
1873		return -EINVAL;
1874
1875	ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
1876	if (ret)
1877		return ret;
1878
1879	if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
1880		dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
 
 
1881		return -ENOSPC;
1882	}
1883
1884	/* return error if not an update and no available filters */
1885	fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
1886	if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
1887	    ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
1888		dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
1889		return -ENOSPC;
1890	}
1891
1892	input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
1893	if (!input)
1894		return -ENOMEM;
1895
1896	ret = ice_set_fdir_input_set(vsi, fsp, input);
1897	if (ret)
1898		goto free_input;
1899
1900	mutex_lock(&hw->fdir_fltr_lock);
1901	if (ice_fdir_is_dup_fltr(hw, input)) {
1902		ret = -EINVAL;
1903		goto release_lock;
1904	}
1905
1906	if (userdata.flex_fltr) {
1907		input->flex_fltr = true;
1908		input->flex_word = cpu_to_be16(userdata.flex_word);
1909		input->flex_offset = userdata.flex_offset;
1910	}
1911
1912	input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1913	input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1914	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
1915
1916	/* input struct is added to the HW filter list */
1917	ice_fdir_update_list_entry(pf, input, fsp->location);
 
 
1918
1919	ret = ice_fdir_write_all_fltr(pf, input, true);
1920	if (ret)
1921		goto remove_sw_rule;
1922
1923	goto release_lock;
1924
1925remove_sw_rule:
1926	ice_fdir_update_cntrs(hw, input->flow_type, false);
1927	/* update sb-filters count, specific to ring->channel */
1928	ice_update_per_q_fltr(vsi, input->orig_q_index, false);
1929	list_del(&input->fltr_node);
1930release_lock:
1931	mutex_unlock(&hw->fdir_fltr_lock);
1932free_input:
1933	if (ret)
1934		devm_kfree(dev, input);
1935
1936	return ret;
1937}