Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2018-2023, Intel Corporation. */
   3
   4/* flow director ethtool support for ice */
   5
   6#include "ice.h"
   7#include "ice_lib.h"
   8#include "ice_fdir.h"
   9#include "ice_flow.h"
  10
  11static struct in6_addr full_ipv6_addr_mask = {
  12	.in6_u = {
  13		.u6_addr8 = {
  14			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  15			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  16		}
  17	}
  18};
  19
  20static struct in6_addr zero_ipv6_addr_mask = {
  21	.in6_u = {
  22		.u6_addr8 = {
  23			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  24			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  25		}
  26	}
  27};
  28
  29/* calls to ice_flow_add_prof require the number of segments in the array
  30 * for segs_cnt. In this code that is one more than the index.
  31 */
  32#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
  33
  34/**
  35 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
  36 * flow type values
  37 * @flow: filter type to be converted
  38 *
  39 * Returns the corresponding ethtool flow type.
  40 */
  41static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
  42{
  43	switch (flow) {
  44	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
  45		return TCP_V4_FLOW;
  46	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
  47		return UDP_V4_FLOW;
  48	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
  49		return SCTP_V4_FLOW;
  50	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
  51		return IPV4_USER_FLOW;
  52	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
  53		return TCP_V6_FLOW;
  54	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
  55		return UDP_V6_FLOW;
  56	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
  57		return SCTP_V6_FLOW;
  58	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
  59		return IPV6_USER_FLOW;
  60	default:
  61		/* 0 is undefined ethtool flow */
  62		return 0;
  63	}
  64}
  65
  66/**
  67 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
  68 * @eth: Ethtool flow type to be converted
  69 *
  70 * Returns flow enum
  71 */
  72static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
  73{
  74	switch (eth) {
  75	case TCP_V4_FLOW:
  76		return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
  77	case UDP_V4_FLOW:
  78		return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
  79	case SCTP_V4_FLOW:
  80		return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
  81	case IPV4_USER_FLOW:
  82		return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
  83	case TCP_V6_FLOW:
  84		return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
  85	case UDP_V6_FLOW:
  86		return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
  87	case SCTP_V6_FLOW:
  88		return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
  89	case IPV6_USER_FLOW:
  90		return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
  91	default:
  92		return ICE_FLTR_PTYPE_NONF_NONE;
  93	}
  94}
  95
  96/**
  97 * ice_is_mask_valid - check mask field set
  98 * @mask: full mask to check
  99 * @field: field for which mask should be valid
 100 *
 101 * If the mask is fully set return true. If it is not valid for field return
 102 * false.
 103 */
 104static bool ice_is_mask_valid(u64 mask, u64 field)
 105{
 106	return (mask & field) == field;
 107}
 108
 109/**
 110 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
 111 * @hw: hardware structure that contains filter list
 112 * @cmd: ethtool command data structure to receive the filter data
 113 *
 114 * Returns 0 on success and -EINVAL on failure
 115 */
 116int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
 117{
 118	struct ethtool_rx_flow_spec *fsp;
 119	struct ice_fdir_fltr *rule;
 120	int ret = 0;
 121	u16 idx;
 122
 123	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
 124
 125	mutex_lock(&hw->fdir_fltr_lock);
 126
 127	rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
 128
 129	if (!rule || fsp->location != rule->fltr_id) {
 130		ret = -EINVAL;
 131		goto release_lock;
 132	}
 133
 134	fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
 135
 136	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
 137	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
 138
 139	switch (fsp->flow_type) {
 140	case IPV4_USER_FLOW:
 141		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 142		fsp->h_u.usr_ip4_spec.proto = 0;
 143		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
 144		fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
 145		fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
 146		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 147		fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
 148		fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 149		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
 150		fsp->m_u.usr_ip4_spec.proto = 0;
 151		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
 152		fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
 153		break;
 154	case TCP_V4_FLOW:
 155	case UDP_V4_FLOW:
 156	case SCTP_V4_FLOW:
 157		fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
 158		fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
 159		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
 160		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 161		fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
 162		fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
 163		fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
 164		fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 165		break;
 166	case IPV6_USER_FLOW:
 167		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
 168		fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
 169		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
 170		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 171		       sizeof(struct in6_addr));
 172		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 173		       sizeof(struct in6_addr));
 174		memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
 175		       sizeof(struct in6_addr));
 176		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
 177		       sizeof(struct in6_addr));
 178		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
 179		fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
 180		fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
 181		break;
 182	case TCP_V6_FLOW:
 183	case UDP_V6_FLOW:
 184	case SCTP_V6_FLOW:
 185		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 186		       sizeof(struct in6_addr));
 187		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 188		       sizeof(struct in6_addr));
 189		fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
 190		fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
 191		memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
 192		       rule->mask.v6.src_ip,
 193		       sizeof(struct in6_addr));
 194		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
 195		       rule->mask.v6.dst_ip,
 196		       sizeof(struct in6_addr));
 197		fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
 198		fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
 199		fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
 200		fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
 201		break;
 202	default:
 203		break;
 204	}
 205
 206	if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
 207		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 208	else
 209		fsp->ring_cookie = rule->orig_q_index;
 210
 211	idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
 212	if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
 213		dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
 214			rule->flow_type);
 215		ret = -EINVAL;
 216	}
 217
 218release_lock:
 219	mutex_unlock(&hw->fdir_fltr_lock);
 220	return ret;
 221}
 222
 223/**
 224 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
 225 * @hw: hardware structure containing the filter list
 226 * @cmd: ethtool command data structure
 227 * @rule_locs: ethtool array passed in from OS to receive filter IDs
 228 *
 229 * Returns 0 as expected for success by ethtool
 230 */
 231int
 232ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
 233		      u32 *rule_locs)
 234{
 235	struct ice_fdir_fltr *f_rule;
 236	unsigned int cnt = 0;
 237	int val = 0;
 238
 239	/* report total rule count */
 240	cmd->data = ice_get_fdir_cnt_all(hw);
 241
 242	mutex_lock(&hw->fdir_fltr_lock);
 243
 244	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
 245		if (cnt == cmd->rule_cnt) {
 246			val = -EMSGSIZE;
 247			goto release_lock;
 248		}
 249		rule_locs[cnt] = f_rule->fltr_id;
 250		cnt++;
 251	}
 252
 253release_lock:
 254	mutex_unlock(&hw->fdir_fltr_lock);
 255	if (!val)
 256		cmd->rule_cnt = cnt;
 257	return val;
 258}
 259
 260/**
 261 * ice_fdir_remap_entries - update the FDir entries in profile
 262 * @prof: FDir structure pointer
 263 * @tun: tunneled or non-tunneled packet
 264 * @idx: FDir entry index
 265 */
 266static void
 267ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
 268{
 269	if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
 270		int i;
 271
 272		for (i = idx; i < (prof->cnt - 1); i++) {
 273			u64 old_entry_h;
 274
 275			old_entry_h = prof->entry_h[i + 1][tun];
 276			prof->entry_h[i][tun] = old_entry_h;
 277			prof->vsi_h[i] = prof->vsi_h[i + 1];
 278		}
 279
 280		prof->entry_h[i][tun] = 0;
 281		prof->vsi_h[i] = 0;
 282	}
 283}
 284
 285/**
 286 * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
 287 * @hw: hardware structure containing filter list
 288 * @vsi_idx: VSI handle
 289 */
 290void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
 291{
 292	int status, flow;
 293
 294	if (!hw->fdir_prof)
 295		return;
 296
 297	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
 298		struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
 299		int tun, i;
 300
 301		if (!prof || !prof->cnt)
 302			continue;
 303
 304		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 305			u64 prof_id = prof->prof_id[tun];
 306
 307			for (i = 0; i < prof->cnt; i++) {
 308				if (prof->vsi_h[i] != vsi_idx)
 309					continue;
 310
 311				prof->entry_h[i][tun] = 0;
 312				prof->vsi_h[i] = 0;
 313				break;
 314			}
 315
 316			/* after clearing FDir entries update the remaining */
 317			ice_fdir_remap_entries(prof, tun, i);
 318
 319			/* find flow profile corresponding to prof_id and clear
 320			 * vsi_idx from bitmap.
 321			 */
 322			status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
 323			if (status) {
 324				dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
 325					status);
 326			}
 327		}
 328		prof->cnt--;
 329	}
 330}
 331
 332/**
 333 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
 334 * @hw: hardware structure containing the filter list
 335 * @blk: hardware block
 336 * @flow: FDir flow type to release
 337 */
 338static struct ice_fd_hw_prof *
 339ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
 340{
 341	if (blk == ICE_BLK_FD && hw->fdir_prof)
 342		return hw->fdir_prof[flow];
 343
 344	return NULL;
 345}
 346
 347/**
 348 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
 349 * @hw: hardware structure containing the filter list
 350 * @blk: hardware block
 351 * @flow: FDir flow type to release
 352 */
 353static void
 354ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
 355{
 356	struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
 357	int tun;
 358
 359	if (!prof)
 360		return;
 361
 362	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 363		u64 prof_id = prof->prof_id[tun];
 364		int j;
 365
 
 366		for (j = 0; j < prof->cnt; j++) {
 367			u16 vsi_num;
 368
 369			if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
 370				continue;
 371			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
 372			ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
 373			ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
 374			prof->entry_h[j][tun] = 0;
 375		}
 376		ice_flow_rem_prof(hw, blk, prof_id);
 377	}
 378}
 379
 380/**
 381 * ice_fdir_rem_flow - release the ice_flow structures for a filter type
 382 * @hw: hardware structure containing the filter list
 383 * @blk: hardware block
 384 * @flow_type: FDir flow type to release
 385 */
 386static void
 387ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
 388		  enum ice_fltr_ptype flow_type)
 389{
 390	int flow = (int)flow_type & ~FLOW_EXT;
 391	struct ice_fd_hw_prof *prof;
 392	int tun, i;
 393
 394	prof = ice_fdir_get_hw_prof(hw, blk, flow);
 395	if (!prof)
 396		return;
 397
 398	ice_fdir_erase_flow_from_hw(hw, blk, flow);
 399	for (i = 0; i < prof->cnt; i++)
 400		prof->vsi_h[i] = 0;
 401	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 402		if (!prof->fdir_seg[tun])
 403			continue;
 404		devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
 405		prof->fdir_seg[tun] = NULL;
 406	}
 407	prof->cnt = 0;
 408}
 409
 410/**
 411 * ice_fdir_release_flows - release all flows in use for later replay
 412 * @hw: pointer to HW instance
 413 */
 414void ice_fdir_release_flows(struct ice_hw *hw)
 415{
 416	int flow;
 417
 418	/* release Flow Director HW table entries */
 419	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
 420		ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
 421}
 422
 423/**
 424 * ice_fdir_replay_flows - replay HW Flow Director filter info
 425 * @hw: pointer to HW instance
 426 */
 427void ice_fdir_replay_flows(struct ice_hw *hw)
 428{
 429	int flow;
 430
 431	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
 432		int tun;
 433
 434		if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
 435			continue;
 436		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 437			struct ice_flow_prof *hw_prof;
 438			struct ice_fd_hw_prof *prof;
 
 439			int j;
 440
 441			prof = hw->fdir_prof[flow];
 442			ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
 
 443					  prof->fdir_seg[tun], TNL_SEG_CNT(tun),
 444					  false, &hw_prof);
 445			for (j = 0; j < prof->cnt; j++) {
 446				enum ice_flow_priority prio;
 447				u64 entry_h = 0;
 448				int err;
 449
 450				prio = ICE_FLOW_PRIO_NORMAL;
 451				err = ice_flow_add_entry(hw, ICE_BLK_FD,
 452							 hw_prof->id,
 453							 prof->vsi_h[0],
 454							 prof->vsi_h[j],
 455							 prio, prof->fdir_seg,
 456							 &entry_h);
 457				if (err) {
 458					dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
 459						flow);
 460					continue;
 461				}
 462				prof->prof_id[tun] = hw_prof->id;
 463				prof->entry_h[j][tun] = entry_h;
 464			}
 465		}
 466	}
 467}
 468
 469/**
 470 * ice_parse_rx_flow_user_data - deconstruct user-defined data
 471 * @fsp: pointer to ethtool Rx flow specification
 472 * @data: pointer to userdef data structure for storage
 473 *
 474 * Returns 0 on success, negative error value on failure
 475 */
 476static int
 477ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 478			    struct ice_rx_flow_userdef *data)
 479{
 480	u64 value, mask;
 481
 482	memset(data, 0, sizeof(*data));
 483	if (!(fsp->flow_type & FLOW_EXT))
 484		return 0;
 485
 486	value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
 487	mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
 488	if (!mask)
 489		return 0;
 490
 491#define ICE_USERDEF_FLEX_WORD_M	GENMASK_ULL(15, 0)
 492#define ICE_USERDEF_FLEX_OFFS_S	16
 493#define ICE_USERDEF_FLEX_OFFS_M	GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
 494#define ICE_USERDEF_FLEX_FLTR_M	GENMASK_ULL(31, 0)
 495
 496	/* 0x1fe is the maximum value for offsets stored in the internal
 497	 * filtering tables.
 498	 */
 499#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
 500
 501	if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
 502	    value > ICE_USERDEF_FLEX_FLTR_M)
 503		return -EINVAL;
 504
 505	data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
 506	data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value);
 
 507	if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
 508		return -EINVAL;
 509
 510	data->flex_fltr = true;
 511
 512	return 0;
 513}
 514
 515/**
 516 * ice_fdir_num_avail_fltr - return the number of unused flow director filters
 517 * @hw: pointer to hardware structure
 518 * @vsi: software VSI structure
 519 *
 520 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
 521 * use filters from either pool. The guaranteed pool is divided between VSIs.
 522 * The best effort filter pool is common to all VSIs and is a device shared
 523 * resource pool. The number of filters available to this VSI is the sum of
 524 * the VSIs guaranteed filter pool and the global available best effort
 525 * filter pool.
 526 *
 527 * Returns the number of available flow director filters to this VSI
 528 */
 529static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
 530{
 531	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
 532	u16 num_guar;
 533	u16 num_be;
 534
 535	/* total guaranteed filters assigned to this VSI */
 536	num_guar = vsi->num_gfltr;
 537
 
 
 
 
 538	/* total global best effort filters */
 539	num_be = hw->func_caps.fd_fltr_best_effort;
 540
 541	/* Subtract the number of programmed filters from the global values */
 542	switch (hw->mac_type) {
 543	case ICE_MAC_E830:
 544		num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
 545				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
 546		num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
 547				    rd32(hw, GLQF_FD_CNT));
 548		break;
 549	case ICE_MAC_E810:
 550	default:
 551		num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
 552				      rd32(hw, VSIQF_FD_CNT(vsi_num)));
 553		num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
 554				    rd32(hw, GLQF_FD_CNT));
 555	}
 556
 557	return num_guar + num_be;
 558}
 559
 560/**
 561 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
 562 * @hw: HW structure containing the FDir flow profile structure(s)
 563 * @flow: flow type to allocate the flow profile for
 564 *
 565 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
 566 * on success and negative on error.
 567 */
 568static int
 569ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
 570{
 571	if (!hw)
 572		return -EINVAL;
 573
 574	if (!hw->fdir_prof) {
 575		hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
 576					     ICE_FLTR_PTYPE_MAX,
 577					     sizeof(*hw->fdir_prof),
 578					     GFP_KERNEL);
 579		if (!hw->fdir_prof)
 580			return -ENOMEM;
 581	}
 582
 583	if (!hw->fdir_prof[flow]) {
 584		hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
 585						   sizeof(**hw->fdir_prof),
 586						   GFP_KERNEL);
 587		if (!hw->fdir_prof[flow])
 588			return -ENOMEM;
 589	}
 590
 591	return 0;
 592}
 593
 594/**
 595 * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
 596 * @prof: pointer to flow director HW profile
 597 * @vsi_idx: vsi_idx to locate
 598 *
 599 * return the index of the vsi_idx. if vsi_idx is not found insert it
 600 * into the vsi_h table.
 601 */
 602static u16
 603ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
 604{
 605	u16 idx = 0;
 606
 607	for (idx = 0; idx < prof->cnt; idx++)
 608		if (prof->vsi_h[idx] == vsi_idx)
 609			return idx;
 610
 611	if (idx == prof->cnt)
 612		prof->vsi_h[prof->cnt++] = vsi_idx;
 613	return idx;
 614}
 615
 616/**
 617 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
 618 * @pf: pointer to the PF structure
 619 * @seg: protocol header description pointer
 620 * @flow: filter enum
 621 * @tun: FDir segment to program
 622 */
 623static int
 624ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
 625			  enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
 626{
 627	struct device *dev = ice_pf_to_dev(pf);
 628	struct ice_vsi *main_vsi, *ctrl_vsi;
 629	struct ice_flow_seg_info *old_seg;
 630	struct ice_flow_prof *prof = NULL;
 631	struct ice_fd_hw_prof *hw_prof;
 632	struct ice_hw *hw = &pf->hw;
 
 633	u64 entry1_h = 0;
 634	u64 entry2_h = 0;
 635	bool del_last;
 636	int err;
 637	int idx;
 638
 639	main_vsi = ice_get_main_vsi(pf);
 640	if (!main_vsi)
 641		return -EINVAL;
 642
 643	ctrl_vsi = ice_get_ctrl_vsi(pf);
 644	if (!ctrl_vsi)
 645		return -EINVAL;
 646
 647	err = ice_fdir_alloc_flow_prof(hw, flow);
 648	if (err)
 649		return err;
 650
 651	hw_prof = hw->fdir_prof[flow];
 652	old_seg = hw_prof->fdir_seg[tun];
 653	if (old_seg) {
 654		/* This flow_type already has a changed input set.
 655		 * If it matches the requested input set then we are
 656		 * done. Or, if it's different then it's an error.
 657		 */
 658		if (!memcmp(old_seg, seg, sizeof(*seg)))
 659			return -EEXIST;
 660
 661		/* if there are FDir filters using this flow,
 662		 * then return error.
 663		 */
 664		if (hw->fdir_fltr_cnt[flow]) {
 665			dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
 666			return -EINVAL;
 667		}
 668
 669		if (ice_is_arfs_using_perfect_flow(hw, flow)) {
 670			dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
 671				flow);
 672			return -EINVAL;
 673		}
 674
 675		/* remove HW filter definition */
 676		ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
 677	}
 678
 679	/* Adding a profile, but there is only one header supported.
 680	 * That is the final parameters are 1 header (segment), no
 681	 * actions (NULL) and zero actions 0.
 682	 */
 683	err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
 684				TNL_SEG_CNT(tun), false, &prof);
 685	if (err)
 686		return err;
 687	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
 688				 main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 689				 seg, &entry1_h);
 690	if (err)
 
 
 691		goto err_prof;
 692	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
 693				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 694				 seg, &entry2_h);
 695	if (err)
 
 
 696		goto err_entry;
 
 697
 698	hw_prof->fdir_seg[tun] = seg;
 699	hw_prof->prof_id[tun] = prof->id;
 700	hw_prof->entry_h[0][tun] = entry1_h;
 701	hw_prof->entry_h[1][tun] = entry2_h;
 702	hw_prof->vsi_h[0] = main_vsi->idx;
 703	hw_prof->vsi_h[1] = ctrl_vsi->idx;
 704	if (!hw_prof->cnt)
 705		hw_prof->cnt = 2;
 706
 707	for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
 708		u16 vsi_idx;
 709		u16 vsi_h;
 710
 711		if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
 712			continue;
 713
 714		entry1_h = 0;
 715		vsi_h = main_vsi->tc_map_vsi[idx]->idx;
 716		err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
 717					 main_vsi->idx, vsi_h,
 718					 ICE_FLOW_PRIO_NORMAL, seg,
 719					 &entry1_h);
 720		if (err) {
 721			dev_err(dev, "Could not add Channel VSI %d to flow group\n",
 722				idx);
 723			goto err_unroll;
 724		}
 725
 726		vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
 727						main_vsi->tc_map_vsi[idx]->idx);
 728		hw_prof->entry_h[vsi_idx][tun] = entry1_h;
 729	}
 730
 731	return 0;
 732
 733err_unroll:
 734	entry1_h = 0;
 735	hw_prof->fdir_seg[tun] = NULL;
 736
 737	/* The variable del_last will be used to determine when to clean up
 738	 * the VSI group data. The VSI data is not needed if there are no
 739	 * segments.
 740	 */
 741	del_last = true;
 742	for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
 743		if (hw_prof->fdir_seg[idx]) {
 744			del_last = false;
 745			break;
 746		}
 747
 748	for (idx = 0; idx < hw_prof->cnt; idx++) {
 749		u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
 750
 751		if (!hw_prof->entry_h[idx][tun])
 752			continue;
 753		ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
 754		ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
 755		hw_prof->entry_h[idx][tun] = 0;
 756		if (del_last)
 757			hw_prof->vsi_h[idx] = 0;
 758	}
 759	if (del_last)
 760		hw_prof->cnt = 0;
 761err_entry:
 762	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 763			     ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
 764	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 765err_prof:
 766	ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
 767	dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
 768
 769	return err;
 770}
 771
 772/**
 773 * ice_set_init_fdir_seg
 774 * @seg: flow segment for programming
 775 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
 776 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
 777 *
 778 * Set the configuration for perfect filters to the provided flow segment for
 779 * programming the HW filter. This is to be called only when initializing
 780 * filters as this function it assumes no filters exist.
 781 */
 782static int
 783ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
 784		      enum ice_flow_seg_hdr l3_proto,
 785		      enum ice_flow_seg_hdr l4_proto)
 786{
 787	enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
 788
 789	if (!seg)
 790		return -EINVAL;
 791
 792	if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
 793		src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
 794		dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
 795	} else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
 796		src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
 797		dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
 798	} else {
 799		return -EINVAL;
 800	}
 801
 802	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 803		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 804		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 805	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 806		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 807		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 808	} else {
 809		return -EINVAL;
 810	}
 811
 812	ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
 813
 814	/* IP source address */
 815	ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
 816			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 817
 818	/* IP destination address */
 819	ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
 820			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 821
 822	/* Layer 4 source port */
 823	ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 824			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 825
 826	/* Layer 4 destination port */
 827	ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 828			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 829
 830	return 0;
 831}
 832
 833/**
 834 * ice_create_init_fdir_rule
 835 * @pf: PF structure
 836 * @flow: filter enum
 837 *
 838 * Return error value or 0 on success.
 839 */
 840static int
 841ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
 842{
 843	struct ice_flow_seg_info *seg, *tun_seg;
 844	struct device *dev = ice_pf_to_dev(pf);
 845	struct ice_hw *hw = &pf->hw;
 846	int ret;
 847
 848	/* if there is already a filter rule for kind return -EINVAL */
 849	if (hw->fdir_prof && hw->fdir_prof[flow] &&
 850	    hw->fdir_prof[flow]->fdir_seg[0])
 851		return -EINVAL;
 852
 853	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 854	if (!seg)
 855		return -ENOMEM;
 856
 857	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
 858			       GFP_KERNEL);
 859	if (!tun_seg) {
 860		devm_kfree(dev, seg);
 861		return -ENOMEM;
 862	}
 863
 864	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
 865		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 866					    ICE_FLOW_SEG_HDR_TCP);
 867	else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
 868		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 869					    ICE_FLOW_SEG_HDR_UDP);
 870	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
 871		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 872					    ICE_FLOW_SEG_HDR_TCP);
 873	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
 874		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 875					    ICE_FLOW_SEG_HDR_UDP);
 876	else
 877		ret = -EINVAL;
 878	if (ret)
 879		goto err_exit;
 880
 881	/* add filter for outer headers */
 882	ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
 883	if (ret)
 884		/* could not write filter, free memory */
 885		goto err_exit;
 886
 887	/* make tunneled filter HW entries if possible */
 888	memcpy(&tun_seg[1], seg, sizeof(*seg));
 889	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
 890	if (ret)
 891		/* could not write tunnel filter, but outer header filter
 892		 * exists
 893		 */
 894		devm_kfree(dev, tun_seg);
 895
 896	set_bit(flow, hw->fdir_perfect_fltr);
 897	return ret;
 898err_exit:
 899	devm_kfree(dev, tun_seg);
 900	devm_kfree(dev, seg);
 901
 902	return -EOPNOTSUPP;
 903}
 904
 905/**
 906 * ice_set_fdir_ip4_seg
 907 * @seg: flow segment for programming
 908 * @tcp_ip4_spec: mask data from ethtool
 909 * @l4_proto: Layer 4 protocol to program
 910 * @perfect_fltr: only valid on success; returns true if perfect filter,
 911 *		  false if not
 912 *
 913 * Set the mask data into the flow segment to be used to program HW
 914 * table based on provided L4 protocol for IPv4
 915 */
 916static int
 917ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
 918		     struct ethtool_tcpip4_spec *tcp_ip4_spec,
 919		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
 920{
 921	enum ice_flow_field src_port, dst_port;
 922
 923	/* make sure we don't have any empty rule */
 924	if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
 925	    !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
 926		return -EINVAL;
 927
 928	/* filtering on TOS not supported */
 929	if (tcp_ip4_spec->tos)
 930		return -EOPNOTSUPP;
 931
 932	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 933		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 934		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 935	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 936		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 937		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 938	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
 939		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
 940		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
 941	} else {
 942		return -EOPNOTSUPP;
 943	}
 944
 945	*perfect_fltr = true;
 946	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
 947
 948	/* IP source address */
 949	if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
 950		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
 951				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 952				 ICE_FLOW_FLD_OFF_INVAL, false);
 953	else if (!tcp_ip4_spec->ip4src)
 954		*perfect_fltr = false;
 955	else
 956		return -EOPNOTSUPP;
 957
 958	/* IP destination address */
 959	if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
 960		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
 961				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 962				 ICE_FLOW_FLD_OFF_INVAL, false);
 963	else if (!tcp_ip4_spec->ip4dst)
 964		*perfect_fltr = false;
 965	else
 966		return -EOPNOTSUPP;
 967
 968	/* Layer 4 source port */
 969	if (tcp_ip4_spec->psrc == htons(0xFFFF))
 970		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 971				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 972				 false);
 973	else if (!tcp_ip4_spec->psrc)
 974		*perfect_fltr = false;
 975	else
 976		return -EOPNOTSUPP;
 977
 978	/* Layer 4 destination port */
 979	if (tcp_ip4_spec->pdst == htons(0xFFFF))
 980		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 981				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 982				 false);
 983	else if (!tcp_ip4_spec->pdst)
 984		*perfect_fltr = false;
 985	else
 986		return -EOPNOTSUPP;
 987
 988	return 0;
 989}
 990
 991/**
 992 * ice_set_fdir_ip4_usr_seg
 993 * @seg: flow segment for programming
 994 * @usr_ip4_spec: ethtool userdef packet offset
 995 * @perfect_fltr: only valid on success; returns true if perfect filter,
 996 *		  false if not
 997 *
 998 * Set the offset data into the flow segment to be used to program HW
 999 * table for IPv4
1000 */
1001static int
1002ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
1003			 struct ethtool_usrip4_spec *usr_ip4_spec,
1004			 bool *perfect_fltr)
1005{
1006	/* first 4 bytes of Layer 4 header */
1007	if (usr_ip4_spec->l4_4_bytes)
1008		return -EINVAL;
1009	if (usr_ip4_spec->tos)
1010		return -EINVAL;
1011	if (usr_ip4_spec->ip_ver)
1012		return -EINVAL;
1013	/* Filtering on Layer 4 protocol not supported */
1014	if (usr_ip4_spec->proto)
1015		return -EOPNOTSUPP;
1016	/* empty rules are not valid */
1017	if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
1018		return -EINVAL;
1019
1020	*perfect_fltr = true;
1021	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
1022
1023	/* IP source address */
1024	if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
1025		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
1026				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1027				 ICE_FLOW_FLD_OFF_INVAL, false);
1028	else if (!usr_ip4_spec->ip4src)
1029		*perfect_fltr = false;
1030	else
1031		return -EOPNOTSUPP;
1032
1033	/* IP destination address */
1034	if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
1035		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
1036				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1037				 ICE_FLOW_FLD_OFF_INVAL, false);
1038	else if (!usr_ip4_spec->ip4dst)
1039		*perfect_fltr = false;
1040	else
1041		return -EOPNOTSUPP;
1042
1043	return 0;
1044}
1045
1046/**
1047 * ice_set_fdir_ip6_seg
1048 * @seg: flow segment for programming
1049 * @tcp_ip6_spec: mask data from ethtool
1050 * @l4_proto: Layer 4 protocol to program
1051 * @perfect_fltr: only valid on success; returns true if perfect filter,
1052 *		  false if not
1053 *
1054 * Set the mask data into the flow segment to be used to program HW
1055 * table based on provided L4 protocol for IPv6
1056 */
1057static int
1058ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
1059		     struct ethtool_tcpip6_spec *tcp_ip6_spec,
1060		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
1061{
1062	enum ice_flow_field src_port, dst_port;
1063
1064	/* make sure we don't have any empty rule */
1065	if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1066		    sizeof(struct in6_addr)) &&
1067	    !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1068		    sizeof(struct in6_addr)) &&
1069	    !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
1070		return -EINVAL;
1071
1072	/* filtering on TC not supported */
1073	if (tcp_ip6_spec->tclass)
1074		return -EOPNOTSUPP;
1075
1076	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
1077		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
1078		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
1079	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
1080		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
1081		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
1082	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
1083		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
1084		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
1085	} else {
1086		return -EINVAL;
1087	}
1088
1089	*perfect_fltr = true;
1090	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
1091
1092	if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
1093		    sizeof(struct in6_addr)))
1094		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1095				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1096				 ICE_FLOW_FLD_OFF_INVAL, false);
1097	else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1098			 sizeof(struct in6_addr)))
1099		*perfect_fltr = false;
1100	else
1101		return -EOPNOTSUPP;
1102
1103	if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1104		    sizeof(struct in6_addr)))
1105		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1106				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1107				 ICE_FLOW_FLD_OFF_INVAL, false);
1108	else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1109			 sizeof(struct in6_addr)))
1110		*perfect_fltr = false;
1111	else
1112		return -EOPNOTSUPP;
1113
1114	/* Layer 4 source port */
1115	if (tcp_ip6_spec->psrc == htons(0xFFFF))
1116		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
1117				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1118				 false);
1119	else if (!tcp_ip6_spec->psrc)
1120		*perfect_fltr = false;
1121	else
1122		return -EOPNOTSUPP;
1123
1124	/* Layer 4 destination port */
1125	if (tcp_ip6_spec->pdst == htons(0xFFFF))
1126		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
1127				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1128				 false);
1129	else if (!tcp_ip6_spec->pdst)
1130		*perfect_fltr = false;
1131	else
1132		return -EOPNOTSUPP;
1133
1134	return 0;
1135}
1136
1137/**
1138 * ice_set_fdir_ip6_usr_seg
1139 * @seg: flow segment for programming
1140 * @usr_ip6_spec: ethtool userdef packet offset
1141 * @perfect_fltr: only valid on success; returns true if perfect filter,
1142 *		  false if not
1143 *
1144 * Set the offset data into the flow segment to be used to program HW
1145 * table for IPv6
1146 */
1147static int
1148ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
1149			 struct ethtool_usrip6_spec *usr_ip6_spec,
1150			 bool *perfect_fltr)
1151{
1152	/* filtering on Layer 4 bytes not supported */
1153	if (usr_ip6_spec->l4_4_bytes)
1154		return -EOPNOTSUPP;
1155	/* filtering on TC not supported */
1156	if (usr_ip6_spec->tclass)
1157		return -EOPNOTSUPP;
1158	/* filtering on Layer 4 protocol not supported */
1159	if (usr_ip6_spec->l4_proto)
1160		return -EOPNOTSUPP;
1161	/* empty rules are not valid */
1162	if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1163		    sizeof(struct in6_addr)) &&
1164	    !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1165		    sizeof(struct in6_addr)))
1166		return -EINVAL;
1167
1168	*perfect_fltr = true;
1169	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
1170
1171	if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
1172		    sizeof(struct in6_addr)))
1173		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1174				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1175				 ICE_FLOW_FLD_OFF_INVAL, false);
1176	else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1177			 sizeof(struct in6_addr)))
1178		*perfect_fltr = false;
1179	else
1180		return -EOPNOTSUPP;
1181
1182	if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1183		    sizeof(struct in6_addr)))
1184		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1185				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1186				 ICE_FLOW_FLD_OFF_INVAL, false);
1187	else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1188			 sizeof(struct in6_addr)))
1189		*perfect_fltr = false;
1190	else
1191		return -EOPNOTSUPP;
1192
1193	return 0;
1194}
1195
1196/**
1197 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
1198 * @pf: PF structure
1199 * @fsp: pointer to ethtool Rx flow specification
1200 * @user: user defined data from flow specification
1201 *
1202 * Returns 0 on success.
1203 */
1204static int
1205ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
1206		       struct ice_rx_flow_userdef *user)
1207{
1208	struct ice_flow_seg_info *seg, *tun_seg;
1209	struct device *dev = ice_pf_to_dev(pf);
1210	enum ice_fltr_ptype fltr_idx;
1211	struct ice_hw *hw = &pf->hw;
1212	bool perfect_filter;
1213	int ret;
1214
1215	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
1216	if (!seg)
1217		return -ENOMEM;
1218
1219	tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
1220			       GFP_KERNEL);
1221	if (!tun_seg) {
1222		devm_kfree(dev, seg);
1223		return -ENOMEM;
1224	}
1225
1226	switch (fsp->flow_type & ~FLOW_EXT) {
1227	case TCP_V4_FLOW:
1228		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1229					   ICE_FLOW_SEG_HDR_TCP,
1230					   &perfect_filter);
1231		break;
1232	case UDP_V4_FLOW:
1233		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1234					   ICE_FLOW_SEG_HDR_UDP,
1235					   &perfect_filter);
1236		break;
1237	case SCTP_V4_FLOW:
1238		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1239					   ICE_FLOW_SEG_HDR_SCTP,
1240					   &perfect_filter);
1241		break;
1242	case IPV4_USER_FLOW:
1243		ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
1244					       &perfect_filter);
1245		break;
1246	case TCP_V6_FLOW:
1247		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1248					   ICE_FLOW_SEG_HDR_TCP,
1249					   &perfect_filter);
1250		break;
1251	case UDP_V6_FLOW:
1252		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1253					   ICE_FLOW_SEG_HDR_UDP,
1254					   &perfect_filter);
1255		break;
1256	case SCTP_V6_FLOW:
1257		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1258					   ICE_FLOW_SEG_HDR_SCTP,
1259					   &perfect_filter);
1260		break;
1261	case IPV6_USER_FLOW:
1262		ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
1263					       &perfect_filter);
1264		break;
1265	default:
1266		ret = -EINVAL;
1267	}
1268	if (ret)
1269		goto err_exit;
1270
1271	/* tunnel segments are shifted up one. */
1272	memcpy(&tun_seg[1], seg, sizeof(*seg));
1273
1274	if (user && user->flex_fltr) {
1275		perfect_filter = false;
1276		ice_flow_add_fld_raw(seg, user->flex_offset,
1277				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1278				     ICE_FLOW_FLD_OFF_INVAL,
1279				     ICE_FLOW_FLD_OFF_INVAL);
1280		ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
1281				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1282				     ICE_FLOW_FLD_OFF_INVAL,
1283				     ICE_FLOW_FLD_OFF_INVAL);
1284	}
1285
1286	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
1287
1288	assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
1289
1290	/* add filter for outer headers */
 
1291	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
1292					ICE_FD_HW_SEG_NON_TUN);
1293	if (ret == -EEXIST) {
1294		/* Rule already exists, free memory and count as success */
1295		ret = 0;
1296		goto err_exit;
1297	} else if (ret) {
1298		/* could not write filter, free memory */
1299		goto err_exit;
1300	}
1301
1302	/* make tunneled filter HW entries if possible */
1303	memcpy(&tun_seg[1], seg, sizeof(*seg));
1304	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
1305					ICE_FD_HW_SEG_TUN);
1306	if (ret == -EEXIST) {
1307		/* Rule already exists, free memory and count as success */
1308		devm_kfree(dev, tun_seg);
1309		ret = 0;
1310	} else if (ret) {
1311		/* could not write tunnel filter, but outer filter exists */
1312		devm_kfree(dev, tun_seg);
1313	}
1314
 
 
 
 
 
1315	return ret;
1316
1317err_exit:
1318	devm_kfree(dev, tun_seg);
1319	devm_kfree(dev, seg);
1320
1321	return ret;
1322}
1323
1324/**
1325 * ice_update_per_q_fltr
1326 * @vsi: ptr to VSI
1327 * @q_index: queue index
1328 * @inc: true to increment or false to decrement per queue filter count
1329 *
1330 * This function is used to keep track of per queue sideband filters
1331 */
1332static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
1333{
1334	struct ice_rx_ring *rx_ring;
1335
1336	if (!vsi->num_rxq || q_index >= vsi->num_rxq)
1337		return;
1338
1339	rx_ring = vsi->rx_rings[q_index];
1340	if (!rx_ring || !rx_ring->ch)
1341		return;
1342
1343	if (inc)
1344		atomic_inc(&rx_ring->ch->num_sb_fltr);
1345	else
1346		atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
1347}
1348
1349/**
1350 * ice_fdir_write_fltr - send a flow director filter to the hardware
1351 * @pf: PF data structure
1352 * @input: filter structure
1353 * @add: true adds filter and false removed filter
1354 * @is_tun: true adds inner filter on tunnel and false outer headers
1355 *
1356 * returns 0 on success and negative value on error
1357 */
1358int
1359ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
1360		    bool is_tun)
1361{
1362	struct device *dev = ice_pf_to_dev(pf);
1363	struct ice_hw *hw = &pf->hw;
1364	struct ice_fltr_desc desc;
1365	struct ice_vsi *ctrl_vsi;
 
1366	u8 *pkt, *frag_pkt;
1367	bool has_frag;
1368	int err;
1369
1370	ctrl_vsi = ice_get_ctrl_vsi(pf);
1371	if (!ctrl_vsi)
1372		return -EINVAL;
1373
1374	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1375	if (!pkt)
1376		return -ENOMEM;
1377	frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1378	if (!frag_pkt) {
1379		err = -ENOMEM;
1380		goto err_free;
1381	}
1382
1383	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1384	err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1385	if (err)
 
1386		goto err_free_all;
 
1387	err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1388	if (err)
1389		goto err_free_all;
1390
1391	/* repeat for fragment packet */
1392	has_frag = ice_fdir_has_frag(input->flow_type);
1393	if (has_frag) {
1394		/* does not return error */
1395		ice_fdir_get_prgm_desc(hw, input, &desc, add);
1396		err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
1397						is_tun);
1398		if (err)
 
1399			goto err_frag;
 
1400		err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
1401		if (err)
1402			goto err_frag;
1403	} else {
1404		devm_kfree(dev, frag_pkt);
1405	}
1406
1407	return 0;
1408
1409err_free_all:
1410	devm_kfree(dev, frag_pkt);
1411err_free:
1412	devm_kfree(dev, pkt);
1413	return err;
1414
1415err_frag:
1416	devm_kfree(dev, frag_pkt);
1417	return err;
1418}
1419
1420/**
1421 * ice_fdir_write_all_fltr - send a flow director filter to the hardware
1422 * @pf: PF data structure
1423 * @input: filter structure
1424 * @add: true adds filter and false removed filter
1425 *
1426 * returns 0 on success and negative value on error
1427 */
1428static int
1429ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
1430			bool add)
1431{
1432	u16 port_num;
1433	int tun;
1434
1435	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
1436		bool is_tun = tun == ICE_FD_HW_SEG_TUN;
1437		int err;
1438
1439		if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
1440			continue;
1441		err = ice_fdir_write_fltr(pf, input, add, is_tun);
1442		if (err)
1443			return err;
1444	}
1445	return 0;
1446}
1447
1448/**
1449 * ice_fdir_replay_fltrs - replay filters from the HW filter list
1450 * @pf: board private structure
1451 */
1452void ice_fdir_replay_fltrs(struct ice_pf *pf)
1453{
1454	struct ice_fdir_fltr *f_rule;
1455	struct ice_hw *hw = &pf->hw;
1456
1457	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
1458		int err = ice_fdir_write_all_fltr(pf, f_rule, true);
1459
1460		if (err)
1461			dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
1462				err, f_rule->fltr_id);
1463	}
1464}
1465
1466/**
1467 * ice_fdir_create_dflt_rules - create default perfect filters
1468 * @pf: PF data structure
1469 *
1470 * Returns 0 for success or error.
1471 */
1472int ice_fdir_create_dflt_rules(struct ice_pf *pf)
1473{
1474	int err;
1475
1476	/* Create perfect TCP and UDP rules in hardware. */
1477	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
1478	if (err)
1479		return err;
1480
1481	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
1482	if (err)
1483		return err;
1484
1485	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
1486	if (err)
1487		return err;
1488
1489	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
1490
1491	return err;
1492}
1493
1494/**
1495 * ice_fdir_del_all_fltrs - Delete all flow director filters
1496 * @vsi: the VSI being changed
1497 *
1498 * This function needs to be called while holding hw->fdir_fltr_lock
1499 */
1500void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
1501{
1502	struct ice_fdir_fltr *f_rule, *tmp;
1503	struct ice_pf *pf = vsi->back;
1504	struct ice_hw *hw = &pf->hw;
1505
1506	list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
1507		ice_fdir_write_all_fltr(pf, f_rule, false);
1508		ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
1509		list_del(&f_rule->fltr_node);
1510		devm_kfree(ice_pf_to_dev(pf), f_rule);
1511	}
1512}
1513
1514/**
1515 * ice_vsi_manage_fdir - turn on/off flow director
1516 * @vsi: the VSI being changed
1517 * @ena: boolean value indicating if this is an enable or disable request
1518 */
1519void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
1520{
 
1521	struct ice_pf *pf = vsi->back;
1522	struct ice_hw *hw = &pf->hw;
1523	enum ice_fltr_ptype flow;
1524
1525	if (ena) {
1526		set_bit(ICE_FLAG_FD_ENA, pf->flags);
1527		ice_fdir_create_dflt_rules(pf);
1528		return;
1529	}
1530
1531	mutex_lock(&hw->fdir_fltr_lock);
1532	if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
1533		goto release_lock;
1534
1535	ice_fdir_del_all_fltrs(vsi);
 
 
 
 
 
1536
1537	if (hw->fdir_prof)
1538		for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
1539		     flow++)
1540			if (hw->fdir_prof[flow])
1541				ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
1542
1543release_lock:
1544	mutex_unlock(&hw->fdir_fltr_lock);
1545}
1546
1547/**
1548 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
1549 * @pf: PF structure
1550 * @flow_type: FDir flow type to release
1551 */
1552static void
1553ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
1554{
1555	struct ice_hw *hw = &pf->hw;
1556	bool need_perfect = false;
1557
1558	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
1559	    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
1560	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
1561	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1562		need_perfect = true;
1563
1564	if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
1565		return;
1566
1567	ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
1568	if (need_perfect)
1569		ice_create_init_fdir_rule(pf, flow_type);
1570}
1571
1572/**
1573 * ice_fdir_update_list_entry - add or delete a filter from the filter list
1574 * @pf: PF structure
1575 * @input: filter structure
1576 * @fltr_idx: ethtool index of filter to modify
1577 *
1578 * returns 0 on success and negative on errors
1579 */
1580static int
1581ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
1582			   int fltr_idx)
1583{
1584	struct ice_fdir_fltr *old_fltr;
1585	struct ice_hw *hw = &pf->hw;
1586	struct ice_vsi *vsi;
1587	int err = -ENOENT;
1588
1589	/* Do not update filters during reset */
1590	if (ice_is_reset_in_progress(pf->state))
1591		return -EBUSY;
1592
1593	vsi = ice_get_main_vsi(pf);
1594	if (!vsi)
1595		return -EINVAL;
1596
1597	old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
1598	if (old_fltr) {
1599		err = ice_fdir_write_all_fltr(pf, old_fltr, false);
1600		if (err)
1601			return err;
1602		ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
1603		/* update sb-filters count, specific to ring->channel */
1604		ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
1605		if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
1606			/* we just deleted the last filter of flow_type so we
1607			 * should also delete the HW filter info.
1608			 */
1609			ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
1610		list_del(&old_fltr->fltr_node);
1611		devm_kfree(ice_hw_to_dev(hw), old_fltr);
1612	}
1613	if (!input)
1614		return err;
1615	ice_fdir_list_add_fltr(hw, input);
1616	/* update sb-filters count, specific to ring->channel */
1617	ice_update_per_q_fltr(vsi, input->orig_q_index, true);
1618	ice_fdir_update_cntrs(hw, input->flow_type, true);
1619	return 0;
1620}
1621
1622/**
1623 * ice_del_fdir_ethtool - delete Flow Director filter
1624 * @vsi: pointer to target VSI
1625 * @cmd: command to add or delete Flow Director filter
1626 *
1627 * Returns 0 on success and negative values for failure
1628 */
1629int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1630{
1631	struct ethtool_rx_flow_spec *fsp =
1632		(struct ethtool_rx_flow_spec *)&cmd->fs;
1633	struct ice_pf *pf = vsi->back;
1634	struct ice_hw *hw = &pf->hw;
1635	int val;
1636
1637	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1638		return -EOPNOTSUPP;
1639
1640	/* Do not delete filters during reset */
1641	if (ice_is_reset_in_progress(pf->state)) {
1642		dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
1643		return -EBUSY;
1644	}
1645
1646	if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
1647		return -EBUSY;
1648
1649	mutex_lock(&hw->fdir_fltr_lock);
1650	val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
1651	mutex_unlock(&hw->fdir_fltr_lock);
1652
1653	return val;
1654}
1655
1656/**
1657 * ice_update_ring_dest_vsi - update dest ring and dest VSI
1658 * @vsi: pointer to target VSI
1659 * @dest_vsi: ptr to dest VSI index
1660 * @ring: ptr to dest ring
1661 *
1662 * This function updates destination VSI and queue if user specifies
1663 * target queue which falls in channel's (aka ADQ) queue region
1664 */
1665static void
1666ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
1667{
1668	struct ice_channel *ch;
1669
1670	list_for_each_entry(ch, &vsi->ch_list, list) {
1671		if (!ch->ch_vsi)
1672			continue;
1673
1674		/* make sure to locate corresponding channel based on "queue"
1675		 * specified
1676		 */
1677		if ((*ring < ch->base_q) ||
1678		    (*ring >= (ch->base_q + ch->num_rxq)))
1679			continue;
1680
1681		/* update the dest_vsi based on channel */
1682		*dest_vsi = ch->ch_vsi->idx;
1683
1684		/* update the "ring" to be correct based on channel */
1685		*ring -= ch->base_q;
1686	}
1687}
1688
1689/**
1690 * ice_set_fdir_input_set - Set the input set for Flow Director
1691 * @vsi: pointer to target VSI
1692 * @fsp: pointer to ethtool Rx flow specification
1693 * @input: filter structure
1694 */
1695static int
1696ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
1697		       struct ice_fdir_fltr *input)
1698{
1699	u16 dest_vsi, q_index = 0;
1700	u16 orig_q_index = 0;
1701	struct ice_pf *pf;
1702	struct ice_hw *hw;
1703	int flow_type;
1704	u8 dest_ctl;
1705
1706	if (!vsi || !fsp || !input)
1707		return -EINVAL;
1708
1709	pf = vsi->back;
1710	hw = &pf->hw;
1711
1712	dest_vsi = vsi->idx;
1713	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1714		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1715	} else {
1716		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1717		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1718
1719		if (vf) {
1720			dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
1721			return -EINVAL;
1722		}
1723
1724		if (ring >= vsi->num_rxq)
1725			return -EINVAL;
1726
1727		orig_q_index = ring;
1728		ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
1729		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1730		q_index = ring;
1731	}
1732
1733	input->fltr_id = fsp->location;
1734	input->q_index = q_index;
1735	flow_type = fsp->flow_type & ~FLOW_EXT;
1736
1737	/* Record the original queue index as specified by user.
1738	 * with channel configuration 'q_index' becomes relative
1739	 * to TC (channel).
1740	 */
1741	input->orig_q_index = orig_q_index;
1742	input->dest_vsi = dest_vsi;
1743	input->dest_ctl = dest_ctl;
1744	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
1745	input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
1746	input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
1747
1748	if (fsp->flow_type & FLOW_EXT) {
1749		memcpy(input->ext_data.usr_def, fsp->h_ext.data,
1750		       sizeof(input->ext_data.usr_def));
1751		input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
1752		input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
1753		memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
1754		       sizeof(input->ext_mask.usr_def));
1755		input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
1756		input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
1757	}
1758
1759	switch (flow_type) {
1760	case TCP_V4_FLOW:
1761	case UDP_V4_FLOW:
1762	case SCTP_V4_FLOW:
1763		input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1764		input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1765		input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1766		input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1767		input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1768		input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1769		input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1770		input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1771		break;
1772	case IPV4_USER_FLOW:
1773		input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1774		input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1775		input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1776		input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
1777		input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
1778		input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
1779		input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1780		input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1781		input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1782		input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
1783		input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
1784		input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
1785		break;
1786	case TCP_V6_FLOW:
1787	case UDP_V6_FLOW:
1788	case SCTP_V6_FLOW:
1789		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1790		       sizeof(struct in6_addr));
1791		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1792		       sizeof(struct in6_addr));
1793		input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1794		input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1795		input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
1796		memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
1797		       sizeof(struct in6_addr));
1798		memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
1799		       sizeof(struct in6_addr));
1800		input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1801		input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1802		input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
1803		break;
1804	case IPV6_USER_FLOW:
1805		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1806		       sizeof(struct in6_addr));
1807		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1808		       sizeof(struct in6_addr));
1809		input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1810		input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
1811
1812		/* if no protocol requested, use IPPROTO_NONE */
1813		if (!fsp->m_u.usr_ip6_spec.l4_proto)
1814			input->ip.v6.proto = IPPROTO_NONE;
1815		else
1816			input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1817
1818		memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1819		       sizeof(struct in6_addr));
1820		memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1821		       sizeof(struct in6_addr));
1822		input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1823		input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
1824		input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1825		break;
1826	default:
1827		/* not doing un-parsed flow types */
1828		return -EINVAL;
1829	}
1830
1831	return 0;
1832}
1833
1834/**
1835 * ice_add_fdir_ethtool - Add/Remove Flow Director filter
1836 * @vsi: pointer to target VSI
1837 * @cmd: command to add or delete Flow Director filter
1838 *
1839 * Returns 0 on success and negative values for failure
1840 */
1841int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1842{
1843	struct ice_rx_flow_userdef userdata;
1844	struct ethtool_rx_flow_spec *fsp;
1845	struct ice_fdir_fltr *input;
1846	struct device *dev;
1847	struct ice_pf *pf;
1848	struct ice_hw *hw;
1849	int fltrs_needed;
1850	u32 max_location;
1851	u16 tunnel_port;
1852	int ret;
1853
1854	if (!vsi)
1855		return -EINVAL;
1856
1857	pf = vsi->back;
1858	hw = &pf->hw;
1859	dev = ice_pf_to_dev(pf);
1860
1861	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1862		return -EOPNOTSUPP;
1863
1864	/* Do not program filters during reset */
1865	if (ice_is_reset_in_progress(pf->state)) {
1866		dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
1867		return -EBUSY;
1868	}
1869
1870	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1871
1872	if (ice_parse_rx_flow_user_data(fsp, &userdata))
1873		return -EINVAL;
1874
1875	if (fsp->flow_type & FLOW_MAC_EXT)
1876		return -EINVAL;
1877
1878	ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
1879	if (ret)
1880		return ret;
1881
1882	max_location = ice_get_fdir_cnt_all(hw);
1883	if (fsp->location >= max_location) {
1884		dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
1885			max_location);
1886		return -ENOSPC;
1887	}
1888
1889	/* return error if not an update and no available filters */
1890	fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
1891	if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
1892	    ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
1893		dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
1894		return -ENOSPC;
1895	}
1896
1897	input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
1898	if (!input)
1899		return -ENOMEM;
1900
1901	ret = ice_set_fdir_input_set(vsi, fsp, input);
1902	if (ret)
1903		goto free_input;
1904
1905	mutex_lock(&hw->fdir_fltr_lock);
1906	if (ice_fdir_is_dup_fltr(hw, input)) {
1907		ret = -EINVAL;
1908		goto release_lock;
1909	}
1910
1911	if (userdata.flex_fltr) {
1912		input->flex_fltr = true;
1913		input->flex_word = cpu_to_be16(userdata.flex_word);
1914		input->flex_offset = userdata.flex_offset;
1915	}
1916
1917	input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1918	input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1919	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
1920
1921	/* input struct is added to the HW filter list */
1922	ret = ice_fdir_update_list_entry(pf, input, fsp->location);
1923	if (ret)
1924		goto release_lock;
1925
1926	ret = ice_fdir_write_all_fltr(pf, input, true);
1927	if (ret)
1928		goto remove_sw_rule;
1929
1930	goto release_lock;
1931
1932remove_sw_rule:
1933	ice_fdir_update_cntrs(hw, input->flow_type, false);
1934	/* update sb-filters count, specific to ring->channel */
1935	ice_update_per_q_fltr(vsi, input->orig_q_index, false);
1936	list_del(&input->fltr_node);
1937release_lock:
1938	mutex_unlock(&hw->fdir_fltr_lock);
1939free_input:
1940	if (ret)
1941		devm_kfree(dev, input);
1942
1943	return ret;
1944}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2018-2020, Intel Corporation. */
   3
   4/* flow director ethtool support for ice */
   5
   6#include "ice.h"
   7#include "ice_lib.h"
 
   8#include "ice_flow.h"
   9
  10static struct in6_addr full_ipv6_addr_mask = {
  11	.in6_u = {
  12		.u6_addr8 = {
  13			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  14			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  15		}
  16	}
  17};
  18
  19static struct in6_addr zero_ipv6_addr_mask = {
  20	.in6_u = {
  21		.u6_addr8 = {
  22			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  23			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  24		}
  25	}
  26};
  27
  28/* calls to ice_flow_add_prof require the number of segments in the array
  29 * for segs_cnt. In this code that is one more than the index.
  30 */
  31#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
  32
  33/**
  34 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
  35 * flow type values
  36 * @flow: filter type to be converted
  37 *
  38 * Returns the corresponding ethtool flow type.
  39 */
  40static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
  41{
  42	switch (flow) {
  43	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
  44		return TCP_V4_FLOW;
  45	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
  46		return UDP_V4_FLOW;
  47	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
  48		return SCTP_V4_FLOW;
  49	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
  50		return IPV4_USER_FLOW;
  51	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
  52		return TCP_V6_FLOW;
  53	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
  54		return UDP_V6_FLOW;
  55	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
  56		return SCTP_V6_FLOW;
  57	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
  58		return IPV6_USER_FLOW;
  59	default:
  60		/* 0 is undefined ethtool flow */
  61		return 0;
  62	}
  63}
  64
  65/**
  66 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
  67 * @eth: Ethtool flow type to be converted
  68 *
  69 * Returns flow enum
  70 */
  71static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
  72{
  73	switch (eth) {
  74	case TCP_V4_FLOW:
  75		return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
  76	case UDP_V4_FLOW:
  77		return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
  78	case SCTP_V4_FLOW:
  79		return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
  80	case IPV4_USER_FLOW:
  81		return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
  82	case TCP_V6_FLOW:
  83		return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
  84	case UDP_V6_FLOW:
  85		return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
  86	case SCTP_V6_FLOW:
  87		return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
  88	case IPV6_USER_FLOW:
  89		return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
  90	default:
  91		return ICE_FLTR_PTYPE_NONF_NONE;
  92	}
  93}
  94
  95/**
  96 * ice_is_mask_valid - check mask field set
  97 * @mask: full mask to check
  98 * @field: field for which mask should be valid
  99 *
 100 * If the mask is fully set return true. If it is not valid for field return
 101 * false.
 102 */
 103static bool ice_is_mask_valid(u64 mask, u64 field)
 104{
 105	return (mask & field) == field;
 106}
 107
 108/**
 109 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
 110 * @hw: hardware structure that contains filter list
 111 * @cmd: ethtool command data structure to receive the filter data
 112 *
 113 * Returns 0 on success and -EINVAL on failure
 114 */
 115int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
 116{
 117	struct ethtool_rx_flow_spec *fsp;
 118	struct ice_fdir_fltr *rule;
 119	int ret = 0;
 120	u16 idx;
 121
 122	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
 123
 124	mutex_lock(&hw->fdir_fltr_lock);
 125
 126	rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
 127
 128	if (!rule || fsp->location != rule->fltr_id) {
 129		ret = -EINVAL;
 130		goto release_lock;
 131	}
 132
 133	fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
 134
 135	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
 136	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
 137
 138	switch (fsp->flow_type) {
 139	case IPV4_USER_FLOW:
 140		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 141		fsp->h_u.usr_ip4_spec.proto = 0;
 142		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
 143		fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
 144		fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
 145		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 146		fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
 147		fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 148		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
 149		fsp->m_u.usr_ip4_spec.proto = 0;
 150		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
 151		fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
 152		break;
 153	case TCP_V4_FLOW:
 154	case UDP_V4_FLOW:
 155	case SCTP_V4_FLOW:
 156		fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
 157		fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
 158		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
 159		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
 160		fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
 161		fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
 162		fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
 163		fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
 164		break;
 165	case IPV6_USER_FLOW:
 166		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
 167		fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
 168		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
 169		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 170		       sizeof(struct in6_addr));
 171		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 172		       sizeof(struct in6_addr));
 173		memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
 174		       sizeof(struct in6_addr));
 175		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
 176		       sizeof(struct in6_addr));
 177		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
 178		fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
 179		fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
 180		break;
 181	case TCP_V6_FLOW:
 182	case UDP_V6_FLOW:
 183	case SCTP_V6_FLOW:
 184		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
 185		       sizeof(struct in6_addr));
 186		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
 187		       sizeof(struct in6_addr));
 188		fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
 189		fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
 190		memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
 191		       rule->mask.v6.src_ip,
 192		       sizeof(struct in6_addr));
 193		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
 194		       rule->mask.v6.dst_ip,
 195		       sizeof(struct in6_addr));
 196		fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
 197		fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
 198		fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
 199		fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
 200		break;
 201	default:
 202		break;
 203	}
 204
 205	if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
 206		fsp->ring_cookie = RX_CLS_FLOW_DISC;
 207	else
 208		fsp->ring_cookie = rule->q_index;
 209
 210	idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
 211	if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
 212		dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
 213			rule->flow_type);
 214		ret = -EINVAL;
 215	}
 216
 217release_lock:
 218	mutex_unlock(&hw->fdir_fltr_lock);
 219	return ret;
 220}
 221
 222/**
 223 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
 224 * @hw: hardware structure containing the filter list
 225 * @cmd: ethtool command data structure
 226 * @rule_locs: ethtool array passed in from OS to receive filter IDs
 227 *
 228 * Returns 0 as expected for success by ethtool
 229 */
 230int
 231ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
 232		      u32 *rule_locs)
 233{
 234	struct ice_fdir_fltr *f_rule;
 235	unsigned int cnt = 0;
 236	int val = 0;
 237
 238	/* report total rule count */
 239	cmd->data = ice_get_fdir_cnt_all(hw);
 240
 241	mutex_lock(&hw->fdir_fltr_lock);
 242
 243	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
 244		if (cnt == cmd->rule_cnt) {
 245			val = -EMSGSIZE;
 246			goto release_lock;
 247		}
 248		rule_locs[cnt] = f_rule->fltr_id;
 249		cnt++;
 250	}
 251
 252release_lock:
 253	mutex_unlock(&hw->fdir_fltr_lock);
 254	if (!val)
 255		cmd->rule_cnt = cnt;
 256	return val;
 257}
 258
 259/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 260 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
 261 * @hw: hardware structure containing the filter list
 262 * @blk: hardware block
 263 * @flow: FDir flow type to release
 264 */
 265static struct ice_fd_hw_prof *
 266ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
 267{
 268	if (blk == ICE_BLK_FD && hw->fdir_prof)
 269		return hw->fdir_prof[flow];
 270
 271	return NULL;
 272}
 273
 274/**
 275 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
 276 * @hw: hardware structure containing the filter list
 277 * @blk: hardware block
 278 * @flow: FDir flow type to release
 279 */
 280static void
 281ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
 282{
 283	struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
 284	int tun;
 285
 286	if (!prof)
 287		return;
 288
 289	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 290		u64 prof_id;
 291		int j;
 292
 293		prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
 294		for (j = 0; j < prof->cnt; j++) {
 295			u16 vsi_num;
 296
 297			if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
 298				continue;
 299			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
 300			ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
 301			ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
 302			prof->entry_h[j][tun] = 0;
 303		}
 304		ice_flow_rem_prof(hw, blk, prof_id);
 305	}
 306}
 307
 308/**
 309 * ice_fdir_rem_flow - release the ice_flow structures for a filter type
 310 * @hw: hardware structure containing the filter list
 311 * @blk: hardware block
 312 * @flow_type: FDir flow type to release
 313 */
 314static void
 315ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
 316		  enum ice_fltr_ptype flow_type)
 317{
 318	int flow = (int)flow_type & ~FLOW_EXT;
 319	struct ice_fd_hw_prof *prof;
 320	int tun, i;
 321
 322	prof = ice_fdir_get_hw_prof(hw, blk, flow);
 323	if (!prof)
 324		return;
 325
 326	ice_fdir_erase_flow_from_hw(hw, blk, flow);
 327	for (i = 0; i < prof->cnt; i++)
 328		prof->vsi_h[i] = 0;
 329	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 330		if (!prof->fdir_seg[tun])
 331			continue;
 332		devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
 333		prof->fdir_seg[tun] = NULL;
 334	}
 335	prof->cnt = 0;
 336}
 337
 338/**
 339 * ice_fdir_release_flows - release all flows in use for later replay
 340 * @hw: pointer to HW instance
 341 */
 342void ice_fdir_release_flows(struct ice_hw *hw)
 343{
 344	int flow;
 345
 346	/* release Flow Director HW table entries */
 347	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
 348		ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
 349}
 350
 351/**
 352 * ice_fdir_replay_flows - replay HW Flow Director filter info
 353 * @hw: pointer to HW instance
 354 */
 355void ice_fdir_replay_flows(struct ice_hw *hw)
 356{
 357	int flow;
 358
 359	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
 360		int tun;
 361
 362		if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
 363			continue;
 364		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
 365			struct ice_flow_prof *hw_prof;
 366			struct ice_fd_hw_prof *prof;
 367			u64 prof_id;
 368			int j;
 369
 370			prof = hw->fdir_prof[flow];
 371			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
 372			ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
 373					  prof->fdir_seg[tun], TNL_SEG_CNT(tun),
 374					  &hw_prof);
 375			for (j = 0; j < prof->cnt; j++) {
 376				enum ice_flow_priority prio;
 377				u64 entry_h = 0;
 378				int err;
 379
 380				prio = ICE_FLOW_PRIO_NORMAL;
 381				err = ice_flow_add_entry(hw, ICE_BLK_FD,
 382							 prof_id,
 383							 prof->vsi_h[0],
 384							 prof->vsi_h[j],
 385							 prio, prof->fdir_seg,
 386							 &entry_h);
 387				if (err) {
 388					dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
 389						flow);
 390					continue;
 391				}
 
 392				prof->entry_h[j][tun] = entry_h;
 393			}
 394		}
 395	}
 396}
 397
 398/**
 399 * ice_parse_rx_flow_user_data - deconstruct user-defined data
 400 * @fsp: pointer to ethtool Rx flow specification
 401 * @data: pointer to userdef data structure for storage
 402 *
 403 * Returns 0 on success, negative error value on failure
 404 */
 405static int
 406ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 407			    struct ice_rx_flow_userdef *data)
 408{
 409	u64 value, mask;
 410
 411	memset(data, 0, sizeof(*data));
 412	if (!(fsp->flow_type & FLOW_EXT))
 413		return 0;
 414
 415	value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
 416	mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
 417	if (!mask)
 418		return 0;
 419
 420#define ICE_USERDEF_FLEX_WORD_M	GENMASK_ULL(15, 0)
 421#define ICE_USERDEF_FLEX_OFFS_S	16
 422#define ICE_USERDEF_FLEX_OFFS_M	GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
 423#define ICE_USERDEF_FLEX_FLTR_M	GENMASK_ULL(31, 0)
 424
 425	/* 0x1fe is the maximum value for offsets stored in the internal
 426	 * filtering tables.
 427	 */
 428#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
 429
 430	if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
 431	    value > ICE_USERDEF_FLEX_FLTR_M)
 432		return -EINVAL;
 433
 434	data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
 435	data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
 436			     ICE_USERDEF_FLEX_OFFS_S;
 437	if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
 438		return -EINVAL;
 439
 440	data->flex_fltr = true;
 441
 442	return 0;
 443}
 444
 445/**
 446 * ice_fdir_num_avail_fltr - return the number of unused flow director filters
 447 * @hw: pointer to hardware structure
 448 * @vsi: software VSI structure
 449 *
 450 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
 451 * use filters from either pool. The guaranteed pool is divided between VSIs.
 452 * The best effort filter pool is common to all VSIs and is a device shared
 453 * resource pool. The number of filters available to this VSI is the sum of
 454 * the VSIs guaranteed filter pool and the global available best effort
 455 * filter pool.
 456 *
 457 * Returns the number of available flow director filters to this VSI
 458 */
 459static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
 460{
 461	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
 462	u16 num_guar;
 463	u16 num_be;
 464
 465	/* total guaranteed filters assigned to this VSI */
 466	num_guar = vsi->num_gfltr;
 467
 468	/* minus the guaranteed filters programed by this VSI */
 469	num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
 470		     VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
 471
 472	/* total global best effort filters */
 473	num_be = hw->func_caps.fd_fltr_best_effort;
 474
 475	/* minus the global best effort filters programmed */
 476	num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
 477		   GLQF_FD_CNT_FD_BCNT_S;
 
 
 
 
 
 
 
 
 
 
 
 
 478
 479	return num_guar + num_be;
 480}
 481
 482/**
 483 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
 484 * @hw: HW structure containing the FDir flow profile structure(s)
 485 * @flow: flow type to allocate the flow profile for
 486 *
 487 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
 488 * on success and negative on error.
 489 */
 490static int
 491ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
 492{
 493	if (!hw)
 494		return -EINVAL;
 495
 496	if (!hw->fdir_prof) {
 497		hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
 498					     ICE_FLTR_PTYPE_MAX,
 499					     sizeof(*hw->fdir_prof),
 500					     GFP_KERNEL);
 501		if (!hw->fdir_prof)
 502			return -ENOMEM;
 503	}
 504
 505	if (!hw->fdir_prof[flow]) {
 506		hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
 507						   sizeof(**hw->fdir_prof),
 508						   GFP_KERNEL);
 509		if (!hw->fdir_prof[flow])
 510			return -ENOMEM;
 511	}
 512
 513	return 0;
 514}
 515
 516/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
 518 * @pf: pointer to the PF structure
 519 * @seg: protocol header description pointer
 520 * @flow: filter enum
 521 * @tun: FDir segment to program
 522 */
 523static int
 524ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
 525			  enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
 526{
 527	struct device *dev = ice_pf_to_dev(pf);
 528	struct ice_vsi *main_vsi, *ctrl_vsi;
 529	struct ice_flow_seg_info *old_seg;
 530	struct ice_flow_prof *prof = NULL;
 531	struct ice_fd_hw_prof *hw_prof;
 532	struct ice_hw *hw = &pf->hw;
 533	enum ice_status status;
 534	u64 entry1_h = 0;
 535	u64 entry2_h = 0;
 536	u64 prof_id;
 537	int err;
 
 538
 539	main_vsi = ice_get_main_vsi(pf);
 540	if (!main_vsi)
 541		return -EINVAL;
 542
 543	ctrl_vsi = ice_get_ctrl_vsi(pf);
 544	if (!ctrl_vsi)
 545		return -EINVAL;
 546
 547	err = ice_fdir_alloc_flow_prof(hw, flow);
 548	if (err)
 549		return err;
 550
 551	hw_prof = hw->fdir_prof[flow];
 552	old_seg = hw_prof->fdir_seg[tun];
 553	if (old_seg) {
 554		/* This flow_type already has a changed input set.
 555		 * If it matches the requested input set then we are
 556		 * done. Or, if it's different then it's an error.
 557		 */
 558		if (!memcmp(old_seg, seg, sizeof(*seg)))
 559			return -EEXIST;
 560
 561		/* if there are FDir filters using this flow,
 562		 * then return error.
 563		 */
 564		if (hw->fdir_fltr_cnt[flow]) {
 565			dev_err(dev, "Failed to add filter.  Flow director filters on each port must have the same input set.\n");
 566			return -EINVAL;
 567		}
 568
 569		if (ice_is_arfs_using_perfect_flow(hw, flow)) {
 570			dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
 571				flow);
 572			return -EINVAL;
 573		}
 574
 575		/* remove HW filter definition */
 576		ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
 577	}
 578
 579	/* Adding a profile, but there is only one header supported.
 580	 * That is the final parameters are 1 header (segment), no
 581	 * actions (NULL) and zero actions 0.
 582	 */
 583	prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
 584	status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
 585				   TNL_SEG_CNT(tun), &prof);
 586	if (status)
 587		return ice_status_to_errno(status);
 588	status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
 589				    main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 590				    seg, &entry1_h);
 591	if (status) {
 592		err = ice_status_to_errno(status);
 593		goto err_prof;
 594	}
 595	status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
 596				    ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
 597				    seg, &entry2_h);
 598	if (status) {
 599		err = ice_status_to_errno(status);
 600		goto err_entry;
 601	}
 602
 603	hw_prof->fdir_seg[tun] = seg;
 
 604	hw_prof->entry_h[0][tun] = entry1_h;
 605	hw_prof->entry_h[1][tun] = entry2_h;
 606	hw_prof->vsi_h[0] = main_vsi->idx;
 607	hw_prof->vsi_h[1] = ctrl_vsi->idx;
 608	if (!hw_prof->cnt)
 609		hw_prof->cnt = 2;
 610
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611	return 0;
 612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613err_entry:
 614	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
 615			     ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
 616	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
 617err_prof:
 618	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
 619	dev_err(dev, "Failed to add filter.  Flow director filters on each port must have the same input set.\n");
 620
 621	return err;
 622}
 623
 624/**
 625 * ice_set_init_fdir_seg
 626 * @seg: flow segment for programming
 627 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
 628 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
 629 *
 630 * Set the configuration for perfect filters to the provided flow segment for
 631 * programming the HW filter. This is to be called only when initializing
 632 * filters as this function it assumes no filters exist.
 633 */
 634static int
 635ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
 636		      enum ice_flow_seg_hdr l3_proto,
 637		      enum ice_flow_seg_hdr l4_proto)
 638{
 639	enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
 640
 641	if (!seg)
 642		return -EINVAL;
 643
 644	if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
 645		src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
 646		dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
 647	} else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
 648		src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
 649		dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
 650	} else {
 651		return -EINVAL;
 652	}
 653
 654	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 655		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 656		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 657	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 658		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 659		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 660	} else {
 661		return -EINVAL;
 662	}
 663
 664	ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
 665
 666	/* IP source address */
 667	ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
 668			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 669
 670	/* IP destination address */
 671	ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
 672			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 673
 674	/* Layer 4 source port */
 675	ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 676			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 677
 678	/* Layer 4 destination port */
 679	ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 680			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
 681
 682	return 0;
 683}
 684
 685/**
 686 * ice_create_init_fdir_rule
 687 * @pf: PF structure
 688 * @flow: filter enum
 689 *
 690 * Return error value or 0 on success.
 691 */
 692static int
 693ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
 694{
 695	struct ice_flow_seg_info *seg, *tun_seg;
 696	struct device *dev = ice_pf_to_dev(pf);
 697	struct ice_hw *hw = &pf->hw;
 698	int ret;
 699
 700	/* if there is already a filter rule for kind return -EINVAL */
 701	if (hw->fdir_prof && hw->fdir_prof[flow] &&
 702	    hw->fdir_prof[flow]->fdir_seg[0])
 703		return -EINVAL;
 704
 705	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
 706	if (!seg)
 707		return -ENOMEM;
 708
 709	tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
 710			       GFP_KERNEL);
 711	if (!tun_seg) {
 712		devm_kfree(dev, seg);
 713		return -ENOMEM;
 714	}
 715
 716	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
 717		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 718					    ICE_FLOW_SEG_HDR_TCP);
 719	else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
 720		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
 721					    ICE_FLOW_SEG_HDR_UDP);
 722	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
 723		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 724					    ICE_FLOW_SEG_HDR_TCP);
 725	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
 726		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
 727					    ICE_FLOW_SEG_HDR_UDP);
 728	else
 729		ret = -EINVAL;
 730	if (ret)
 731		goto err_exit;
 732
 733	/* add filter for outer headers */
 734	ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
 735	if (ret)
 736		/* could not write filter, free memory */
 737		goto err_exit;
 738
 739	/* make tunneled filter HW entries if possible */
 740	memcpy(&tun_seg[1], seg, sizeof(*seg));
 741	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
 742	if (ret)
 743		/* could not write tunnel filter, but outer header filter
 744		 * exists
 745		 */
 746		devm_kfree(dev, tun_seg);
 747
 748	set_bit(flow, hw->fdir_perfect_fltr);
 749	return ret;
 750err_exit:
 751	devm_kfree(dev, tun_seg);
 752	devm_kfree(dev, seg);
 753
 754	return -EOPNOTSUPP;
 755}
 756
 757/**
 758 * ice_set_fdir_ip4_seg
 759 * @seg: flow segment for programming
 760 * @tcp_ip4_spec: mask data from ethtool
 761 * @l4_proto: Layer 4 protocol to program
 762 * @perfect_fltr: only valid on success; returns true if perfect filter,
 763 *		  false if not
 764 *
 765 * Set the mask data into the flow segment to be used to program HW
 766 * table based on provided L4 protocol for IPv4
 767 */
 768static int
 769ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
 770		     struct ethtool_tcpip4_spec *tcp_ip4_spec,
 771		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
 772{
 773	enum ice_flow_field src_port, dst_port;
 774
 775	/* make sure we don't have any empty rule */
 776	if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
 777	    !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
 778		return -EINVAL;
 779
 780	/* filtering on TOS not supported */
 781	if (tcp_ip4_spec->tos)
 782		return -EOPNOTSUPP;
 783
 784	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 785		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 786		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 787	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 788		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 789		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 790	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
 791		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
 792		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
 793	} else {
 794		return -EOPNOTSUPP;
 795	}
 796
 797	*perfect_fltr = true;
 798	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
 799
 800	/* IP source address */
 801	if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
 802		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
 803				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 804				 ICE_FLOW_FLD_OFF_INVAL, false);
 805	else if (!tcp_ip4_spec->ip4src)
 806		*perfect_fltr = false;
 807	else
 808		return -EOPNOTSUPP;
 809
 810	/* IP destination address */
 811	if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
 812		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
 813				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 814				 ICE_FLOW_FLD_OFF_INVAL, false);
 815	else if (!tcp_ip4_spec->ip4dst)
 816		*perfect_fltr = false;
 817	else
 818		return -EOPNOTSUPP;
 819
 820	/* Layer 4 source port */
 821	if (tcp_ip4_spec->psrc == htons(0xFFFF))
 822		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 823				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 824				 false);
 825	else if (!tcp_ip4_spec->psrc)
 826		*perfect_fltr = false;
 827	else
 828		return -EOPNOTSUPP;
 829
 830	/* Layer 4 destination port */
 831	if (tcp_ip4_spec->pdst == htons(0xFFFF))
 832		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 833				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 834				 false);
 835	else if (!tcp_ip4_spec->pdst)
 836		*perfect_fltr = false;
 837	else
 838		return -EOPNOTSUPP;
 839
 840	return 0;
 841}
 842
 843/**
 844 * ice_set_fdir_ip4_usr_seg
 845 * @seg: flow segment for programming
 846 * @usr_ip4_spec: ethtool userdef packet offset
 847 * @perfect_fltr: only valid on success; returns true if perfect filter,
 848 *		  false if not
 849 *
 850 * Set the offset data into the flow segment to be used to program HW
 851 * table for IPv4
 852 */
 853static int
 854ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
 855			 struct ethtool_usrip4_spec *usr_ip4_spec,
 856			 bool *perfect_fltr)
 857{
 858	/* first 4 bytes of Layer 4 header */
 859	if (usr_ip4_spec->l4_4_bytes)
 860		return -EINVAL;
 861	if (usr_ip4_spec->tos)
 862		return -EINVAL;
 863	if (usr_ip4_spec->ip_ver)
 864		return -EINVAL;
 865	/* Filtering on Layer 4 protocol not supported */
 866	if (usr_ip4_spec->proto)
 867		return -EOPNOTSUPP;
 868	/* empty rules are not valid */
 869	if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
 870		return -EINVAL;
 871
 872	*perfect_fltr = true;
 873	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
 874
 875	/* IP source address */
 876	if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
 877		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
 878				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 879				 ICE_FLOW_FLD_OFF_INVAL, false);
 880	else if (!usr_ip4_spec->ip4src)
 881		*perfect_fltr = false;
 882	else
 883		return -EOPNOTSUPP;
 884
 885	/* IP destination address */
 886	if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
 887		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
 888				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 889				 ICE_FLOW_FLD_OFF_INVAL, false);
 890	else if (!usr_ip4_spec->ip4dst)
 891		*perfect_fltr = false;
 892	else
 893		return -EOPNOTSUPP;
 894
 895	return 0;
 896}
 897
 898/**
 899 * ice_set_fdir_ip6_seg
 900 * @seg: flow segment for programming
 901 * @tcp_ip6_spec: mask data from ethtool
 902 * @l4_proto: Layer 4 protocol to program
 903 * @perfect_fltr: only valid on success; returns true if perfect filter,
 904 *		  false if not
 905 *
 906 * Set the mask data into the flow segment to be used to program HW
 907 * table based on provided L4 protocol for IPv6
 908 */
 909static int
 910ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
 911		     struct ethtool_tcpip6_spec *tcp_ip6_spec,
 912		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
 913{
 914	enum ice_flow_field src_port, dst_port;
 915
 916	/* make sure we don't have any empty rule */
 917	if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
 918		    sizeof(struct in6_addr)) &&
 919	    !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
 920		    sizeof(struct in6_addr)) &&
 921	    !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
 922		return -EINVAL;
 923
 924	/* filtering on TC not supported */
 925	if (tcp_ip6_spec->tclass)
 926		return -EOPNOTSUPP;
 927
 928	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
 929		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
 930		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
 931	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
 932		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
 933		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
 934	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
 935		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
 936		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
 937	} else {
 938		return -EINVAL;
 939	}
 940
 941	*perfect_fltr = true;
 942	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
 943
 944	if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
 945		    sizeof(struct in6_addr)))
 946		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
 947				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 948				 ICE_FLOW_FLD_OFF_INVAL, false);
 949	else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
 950			 sizeof(struct in6_addr)))
 951		*perfect_fltr = false;
 952	else
 953		return -EOPNOTSUPP;
 954
 955	if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
 956		    sizeof(struct in6_addr)))
 957		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
 958				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 959				 ICE_FLOW_FLD_OFF_INVAL, false);
 960	else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
 961			 sizeof(struct in6_addr)))
 962		*perfect_fltr = false;
 963	else
 964		return -EOPNOTSUPP;
 965
 966	/* Layer 4 source port */
 967	if (tcp_ip6_spec->psrc == htons(0xFFFF))
 968		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
 969				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 970				 false);
 971	else if (!tcp_ip6_spec->psrc)
 972		*perfect_fltr = false;
 973	else
 974		return -EOPNOTSUPP;
 975
 976	/* Layer 4 destination port */
 977	if (tcp_ip6_spec->pdst == htons(0xFFFF))
 978		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
 979				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 980				 false);
 981	else if (!tcp_ip6_spec->pdst)
 982		*perfect_fltr = false;
 983	else
 984		return -EOPNOTSUPP;
 985
 986	return 0;
 987}
 988
 989/**
 990 * ice_set_fdir_ip6_usr_seg
 991 * @seg: flow segment for programming
 992 * @usr_ip6_spec: ethtool userdef packet offset
 993 * @perfect_fltr: only valid on success; returns true if perfect filter,
 994 *		  false if not
 995 *
 996 * Set the offset data into the flow segment to be used to program HW
 997 * table for IPv6
 998 */
 999static int
1000ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
1001			 struct ethtool_usrip6_spec *usr_ip6_spec,
1002			 bool *perfect_fltr)
1003{
1004	/* filtering on Layer 4 bytes not supported */
1005	if (usr_ip6_spec->l4_4_bytes)
1006		return -EOPNOTSUPP;
1007	/* filtering on TC not supported */
1008	if (usr_ip6_spec->tclass)
1009		return -EOPNOTSUPP;
1010	/* filtering on Layer 4 protocol not supported */
1011	if (usr_ip6_spec->l4_proto)
1012		return -EOPNOTSUPP;
1013	/* empty rules are not valid */
1014	if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1015		    sizeof(struct in6_addr)) &&
1016	    !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1017		    sizeof(struct in6_addr)))
1018		return -EINVAL;
1019
1020	*perfect_fltr = true;
1021	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
1022
1023	if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
1024		    sizeof(struct in6_addr)))
1025		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
1026				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1027				 ICE_FLOW_FLD_OFF_INVAL, false);
1028	else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
1029			 sizeof(struct in6_addr)))
1030		*perfect_fltr = false;
1031	else
1032		return -EOPNOTSUPP;
1033
1034	if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
1035		    sizeof(struct in6_addr)))
1036		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
1037				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1038				 ICE_FLOW_FLD_OFF_INVAL, false);
1039	else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
1040			 sizeof(struct in6_addr)))
1041		*perfect_fltr = false;
1042	else
1043		return -EOPNOTSUPP;
1044
1045	return 0;
1046}
1047
1048/**
1049 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
1050 * @pf: PF structure
1051 * @fsp: pointer to ethtool Rx flow specification
1052 * @user: user defined data from flow specification
1053 *
1054 * Returns 0 on success.
1055 */
1056static int
1057ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
1058		       struct ice_rx_flow_userdef *user)
1059{
1060	struct ice_flow_seg_info *seg, *tun_seg;
1061	struct device *dev = ice_pf_to_dev(pf);
1062	enum ice_fltr_ptype fltr_idx;
1063	struct ice_hw *hw = &pf->hw;
1064	bool perfect_filter;
1065	int ret;
1066
1067	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
1068	if (!seg)
1069		return -ENOMEM;
1070
1071	tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
1072			       GFP_KERNEL);
1073	if (!tun_seg) {
1074		devm_kfree(dev, seg);
1075		return -ENOMEM;
1076	}
1077
1078	switch (fsp->flow_type & ~FLOW_EXT) {
1079	case TCP_V4_FLOW:
1080		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1081					   ICE_FLOW_SEG_HDR_TCP,
1082					   &perfect_filter);
1083		break;
1084	case UDP_V4_FLOW:
1085		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1086					   ICE_FLOW_SEG_HDR_UDP,
1087					   &perfect_filter);
1088		break;
1089	case SCTP_V4_FLOW:
1090		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
1091					   ICE_FLOW_SEG_HDR_SCTP,
1092					   &perfect_filter);
1093		break;
1094	case IPV4_USER_FLOW:
1095		ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
1096					       &perfect_filter);
1097		break;
1098	case TCP_V6_FLOW:
1099		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1100					   ICE_FLOW_SEG_HDR_TCP,
1101					   &perfect_filter);
1102		break;
1103	case UDP_V6_FLOW:
1104		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1105					   ICE_FLOW_SEG_HDR_UDP,
1106					   &perfect_filter);
1107		break;
1108	case SCTP_V6_FLOW:
1109		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
1110					   ICE_FLOW_SEG_HDR_SCTP,
1111					   &perfect_filter);
1112		break;
1113	case IPV6_USER_FLOW:
1114		ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
1115					       &perfect_filter);
1116		break;
1117	default:
1118		ret = -EINVAL;
1119	}
1120	if (ret)
1121		goto err_exit;
1122
1123	/* tunnel segments are shifted up one. */
1124	memcpy(&tun_seg[1], seg, sizeof(*seg));
1125
1126	if (user && user->flex_fltr) {
1127		perfect_filter = false;
1128		ice_flow_add_fld_raw(seg, user->flex_offset,
1129				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1130				     ICE_FLOW_FLD_OFF_INVAL,
1131				     ICE_FLOW_FLD_OFF_INVAL);
1132		ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
1133				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1134				     ICE_FLOW_FLD_OFF_INVAL,
1135				     ICE_FLOW_FLD_OFF_INVAL);
1136	}
1137
 
 
 
 
1138	/* add filter for outer headers */
1139	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
1140	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
1141					ICE_FD_HW_SEG_NON_TUN);
1142	if (ret == -EEXIST)
1143		/* Rule already exists, free memory and continue */
1144		devm_kfree(dev, seg);
1145	else if (ret)
 
1146		/* could not write filter, free memory */
1147		goto err_exit;
 
1148
1149	/* make tunneled filter HW entries if possible */
1150	memcpy(&tun_seg[1], seg, sizeof(*seg));
1151	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
1152					ICE_FD_HW_SEG_TUN);
1153	if (ret == -EEXIST) {
1154		/* Rule already exists, free memory and count as success */
1155		devm_kfree(dev, tun_seg);
1156		ret = 0;
1157	} else if (ret) {
1158		/* could not write tunnel filter, but outer filter exists */
1159		devm_kfree(dev, tun_seg);
1160	}
1161
1162	if (perfect_filter)
1163		set_bit(fltr_idx, hw->fdir_perfect_fltr);
1164	else
1165		clear_bit(fltr_idx, hw->fdir_perfect_fltr);
1166
1167	return ret;
1168
1169err_exit:
1170	devm_kfree(dev, tun_seg);
1171	devm_kfree(dev, seg);
1172
1173	return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174}
1175
1176/**
1177 * ice_fdir_write_fltr - send a flow director filter to the hardware
1178 * @pf: PF data structure
1179 * @input: filter structure
1180 * @add: true adds filter and false removed filter
1181 * @is_tun: true adds inner filter on tunnel and false outer headers
1182 *
1183 * returns 0 on success and negative value on error
1184 */
1185int
1186ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
1187		    bool is_tun)
1188{
1189	struct device *dev = ice_pf_to_dev(pf);
1190	struct ice_hw *hw = &pf->hw;
1191	struct ice_fltr_desc desc;
1192	struct ice_vsi *ctrl_vsi;
1193	enum ice_status status;
1194	u8 *pkt, *frag_pkt;
1195	bool has_frag;
1196	int err;
1197
1198	ctrl_vsi = ice_get_ctrl_vsi(pf);
1199	if (!ctrl_vsi)
1200		return -EINVAL;
1201
1202	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1203	if (!pkt)
1204		return -ENOMEM;
1205	frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1206	if (!frag_pkt) {
1207		err = -ENOMEM;
1208		goto err_free;
1209	}
1210
1211	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1212	status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1213	if (status) {
1214		err = ice_status_to_errno(status);
1215		goto err_free_all;
1216	}
1217	err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1218	if (err)
1219		goto err_free_all;
1220
1221	/* repeat for fragment packet */
1222	has_frag = ice_fdir_has_frag(input->flow_type);
1223	if (has_frag) {
1224		/* does not return error */
1225		ice_fdir_get_prgm_desc(hw, input, &desc, add);
1226		status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
1227						   is_tun);
1228		if (status) {
1229			err = ice_status_to_errno(status);
1230			goto err_frag;
1231		}
1232		err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
1233		if (err)
1234			goto err_frag;
1235	} else {
1236		devm_kfree(dev, frag_pkt);
1237	}
1238
1239	return 0;
1240
1241err_free_all:
1242	devm_kfree(dev, frag_pkt);
1243err_free:
1244	devm_kfree(dev, pkt);
1245	return err;
1246
1247err_frag:
1248	devm_kfree(dev, frag_pkt);
1249	return err;
1250}
1251
1252/**
1253 * ice_fdir_write_all_fltr - send a flow director filter to the hardware
1254 * @pf: PF data structure
1255 * @input: filter structure
1256 * @add: true adds filter and false removed filter
1257 *
1258 * returns 0 on success and negative value on error
1259 */
1260static int
1261ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
1262			bool add)
1263{
1264	u16 port_num;
1265	int tun;
1266
1267	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
1268		bool is_tun = tun == ICE_FD_HW_SEG_TUN;
1269		int err;
1270
1271		if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
1272			continue;
1273		err = ice_fdir_write_fltr(pf, input, add, is_tun);
1274		if (err)
1275			return err;
1276	}
1277	return 0;
1278}
1279
1280/**
1281 * ice_fdir_replay_fltrs - replay filters from the HW filter list
1282 * @pf: board private structure
1283 */
1284void ice_fdir_replay_fltrs(struct ice_pf *pf)
1285{
1286	struct ice_fdir_fltr *f_rule;
1287	struct ice_hw *hw = &pf->hw;
1288
1289	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
1290		int err = ice_fdir_write_all_fltr(pf, f_rule, true);
1291
1292		if (err)
1293			dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
1294				err, f_rule->fltr_id);
1295	}
1296}
1297
1298/**
1299 * ice_fdir_create_dflt_rules - create default perfect filters
1300 * @pf: PF data structure
1301 *
1302 * Returns 0 for success or error.
1303 */
1304int ice_fdir_create_dflt_rules(struct ice_pf *pf)
1305{
1306	int err;
1307
1308	/* Create perfect TCP and UDP rules in hardware. */
1309	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
1310	if (err)
1311		return err;
1312
1313	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
1314	if (err)
1315		return err;
1316
1317	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
1318	if (err)
1319		return err;
1320
1321	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
1322
1323	return err;
1324}
1325
1326/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1327 * ice_vsi_manage_fdir - turn on/off flow director
1328 * @vsi: the VSI being changed
1329 * @ena: boolean value indicating if this is an enable or disable request
1330 */
1331void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
1332{
1333	struct ice_fdir_fltr *f_rule, *tmp;
1334	struct ice_pf *pf = vsi->back;
1335	struct ice_hw *hw = &pf->hw;
1336	enum ice_fltr_ptype flow;
1337
1338	if (ena) {
1339		set_bit(ICE_FLAG_FD_ENA, pf->flags);
1340		ice_fdir_create_dflt_rules(pf);
1341		return;
1342	}
1343
1344	mutex_lock(&hw->fdir_fltr_lock);
1345	if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
1346		goto release_lock;
1347	list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
1348		/* ignore return value */
1349		ice_fdir_write_all_fltr(pf, f_rule, false);
1350		ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
1351		list_del(&f_rule->fltr_node);
1352		devm_kfree(ice_hw_to_dev(hw), f_rule);
1353	}
1354
1355	if (hw->fdir_prof)
1356		for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
1357		     flow++)
1358			if (hw->fdir_prof[flow])
1359				ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
1360
1361release_lock:
1362	mutex_unlock(&hw->fdir_fltr_lock);
1363}
1364
1365/**
1366 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
1367 * @pf: PF structure
1368 * @flow_type: FDir flow type to release
1369 */
1370static void
1371ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
1372{
1373	struct ice_hw *hw = &pf->hw;
1374	bool need_perfect = false;
1375
1376	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
1377	    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
1378	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
1379	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1380		need_perfect = true;
1381
1382	if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
1383		return;
1384
1385	ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
1386	if (need_perfect)
1387		ice_create_init_fdir_rule(pf, flow_type);
1388}
1389
1390/**
1391 * ice_fdir_update_list_entry - add or delete a filter from the filter list
1392 * @pf: PF structure
1393 * @input: filter structure
1394 * @fltr_idx: ethtool index of filter to modify
1395 *
1396 * returns 0 on success and negative on errors
1397 */
1398static int
1399ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
1400			   int fltr_idx)
1401{
1402	struct ice_fdir_fltr *old_fltr;
1403	struct ice_hw *hw = &pf->hw;
 
1404	int err = -ENOENT;
1405
1406	/* Do not update filters during reset */
1407	if (ice_is_reset_in_progress(pf->state))
1408		return -EBUSY;
1409
 
 
 
 
1410	old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
1411	if (old_fltr) {
1412		err = ice_fdir_write_all_fltr(pf, old_fltr, false);
1413		if (err)
1414			return err;
1415		ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
 
 
1416		if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
1417			/* we just deleted the last filter of flow_type so we
1418			 * should also delete the HW filter info.
1419			 */
1420			ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
1421		list_del(&old_fltr->fltr_node);
1422		devm_kfree(ice_hw_to_dev(hw), old_fltr);
1423	}
1424	if (!input)
1425		return err;
1426	ice_fdir_list_add_fltr(hw, input);
 
 
1427	ice_fdir_update_cntrs(hw, input->flow_type, true);
1428	return 0;
1429}
1430
1431/**
1432 * ice_del_fdir_ethtool - delete Flow Director filter
1433 * @vsi: pointer to target VSI
1434 * @cmd: command to add or delete Flow Director filter
1435 *
1436 * Returns 0 on success and negative values for failure
1437 */
1438int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1439{
1440	struct ethtool_rx_flow_spec *fsp =
1441		(struct ethtool_rx_flow_spec *)&cmd->fs;
1442	struct ice_pf *pf = vsi->back;
1443	struct ice_hw *hw = &pf->hw;
1444	int val;
1445
1446	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1447		return -EOPNOTSUPP;
1448
1449	/* Do not delete filters during reset */
1450	if (ice_is_reset_in_progress(pf->state)) {
1451		dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
1452		return -EBUSY;
1453	}
1454
1455	if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
1456		return -EBUSY;
1457
1458	mutex_lock(&hw->fdir_fltr_lock);
1459	val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
1460	mutex_unlock(&hw->fdir_fltr_lock);
1461
1462	return val;
1463}
1464
1465/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1466 * ice_set_fdir_input_set - Set the input set for Flow Director
1467 * @vsi: pointer to target VSI
1468 * @fsp: pointer to ethtool Rx flow specification
1469 * @input: filter structure
1470 */
1471static int
1472ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
1473		       struct ice_fdir_fltr *input)
1474{
1475	u16 dest_vsi, q_index = 0;
 
1476	struct ice_pf *pf;
1477	struct ice_hw *hw;
1478	int flow_type;
1479	u8 dest_ctl;
1480
1481	if (!vsi || !fsp || !input)
1482		return -EINVAL;
1483
1484	pf = vsi->back;
1485	hw = &pf->hw;
1486
1487	dest_vsi = vsi->idx;
1488	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1489		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1490	} else {
1491		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1492		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1493
1494		if (vf) {
1495			dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
1496			return -EINVAL;
1497		}
1498
1499		if (ring >= vsi->num_rxq)
1500			return -EINVAL;
1501
 
 
1502		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1503		q_index = ring;
1504	}
1505
1506	input->fltr_id = fsp->location;
1507	input->q_index = q_index;
1508	flow_type = fsp->flow_type & ~FLOW_EXT;
1509
 
 
 
 
 
1510	input->dest_vsi = dest_vsi;
1511	input->dest_ctl = dest_ctl;
1512	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
1513	input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
1514	input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
1515
1516	if (fsp->flow_type & FLOW_EXT) {
1517		memcpy(input->ext_data.usr_def, fsp->h_ext.data,
1518		       sizeof(input->ext_data.usr_def));
1519		input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
1520		input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
1521		memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
1522		       sizeof(input->ext_mask.usr_def));
1523		input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
1524		input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
1525	}
1526
1527	switch (flow_type) {
1528	case TCP_V4_FLOW:
1529	case UDP_V4_FLOW:
1530	case SCTP_V4_FLOW:
1531		input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1532		input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1533		input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1534		input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1535		input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1536		input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1537		input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1538		input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1539		break;
1540	case IPV4_USER_FLOW:
1541		input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1542		input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1543		input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1544		input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
1545		input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
1546		input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
1547		input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1548		input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1549		input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1550		input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
1551		input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
1552		input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
1553		break;
1554	case TCP_V6_FLOW:
1555	case UDP_V6_FLOW:
1556	case SCTP_V6_FLOW:
1557		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1558		       sizeof(struct in6_addr));
1559		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1560		       sizeof(struct in6_addr));
1561		input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1562		input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1563		input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
1564		memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
1565		       sizeof(struct in6_addr));
1566		memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
1567		       sizeof(struct in6_addr));
1568		input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1569		input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1570		input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
1571		break;
1572	case IPV6_USER_FLOW:
1573		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1574		       sizeof(struct in6_addr));
1575		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1576		       sizeof(struct in6_addr));
1577		input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1578		input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
1579
1580		/* if no protocol requested, use IPPROTO_NONE */
1581		if (!fsp->m_u.usr_ip6_spec.l4_proto)
1582			input->ip.v6.proto = IPPROTO_NONE;
1583		else
1584			input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1585
1586		memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1587		       sizeof(struct in6_addr));
1588		memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1589		       sizeof(struct in6_addr));
1590		input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1591		input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
1592		input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1593		break;
1594	default:
1595		/* not doing un-parsed flow types */
1596		return -EINVAL;
1597	}
1598
1599	return 0;
1600}
1601
1602/**
1603 * ice_add_fdir_ethtool - Add/Remove Flow Director filter
1604 * @vsi: pointer to target VSI
1605 * @cmd: command to add or delete Flow Director filter
1606 *
1607 * Returns 0 on success and negative values for failure
1608 */
1609int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1610{
1611	struct ice_rx_flow_userdef userdata;
1612	struct ethtool_rx_flow_spec *fsp;
1613	struct ice_fdir_fltr *input;
1614	struct device *dev;
1615	struct ice_pf *pf;
1616	struct ice_hw *hw;
1617	int fltrs_needed;
 
1618	u16 tunnel_port;
1619	int ret;
1620
1621	if (!vsi)
1622		return -EINVAL;
1623
1624	pf = vsi->back;
1625	hw = &pf->hw;
1626	dev = ice_pf_to_dev(pf);
1627
1628	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1629		return -EOPNOTSUPP;
1630
1631	/* Do not program filters during reset */
1632	if (ice_is_reset_in_progress(pf->state)) {
1633		dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
1634		return -EBUSY;
1635	}
1636
1637	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1638
1639	if (ice_parse_rx_flow_user_data(fsp, &userdata))
1640		return -EINVAL;
1641
1642	if (fsp->flow_type & FLOW_MAC_EXT)
1643		return -EINVAL;
1644
1645	ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
1646	if (ret)
1647		return ret;
1648
1649	if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
1650		dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
 
 
1651		return -ENOSPC;
1652	}
1653
1654	/* return error if not an update and no available filters */
1655	fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
1656	if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
1657	    ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
1658		dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
1659		return -ENOSPC;
1660	}
1661
1662	input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
1663	if (!input)
1664		return -ENOMEM;
1665
1666	ret = ice_set_fdir_input_set(vsi, fsp, input);
1667	if (ret)
1668		goto free_input;
1669
1670	mutex_lock(&hw->fdir_fltr_lock);
1671	if (ice_fdir_is_dup_fltr(hw, input)) {
1672		ret = -EINVAL;
1673		goto release_lock;
1674	}
1675
1676	if (userdata.flex_fltr) {
1677		input->flex_fltr = true;
1678		input->flex_word = cpu_to_be16(userdata.flex_word);
1679		input->flex_offset = userdata.flex_offset;
1680	}
1681
1682	input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1683	input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1684	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
1685
1686	/* input struct is added to the HW filter list */
1687	ice_fdir_update_list_entry(pf, input, fsp->location);
 
 
1688
1689	ret = ice_fdir_write_all_fltr(pf, input, true);
1690	if (ret)
1691		goto remove_sw_rule;
1692
1693	goto release_lock;
1694
1695remove_sw_rule:
1696	ice_fdir_update_cntrs(hw, input->flow_type, false);
 
 
1697	list_del(&input->fltr_node);
1698release_lock:
1699	mutex_unlock(&hw->fdir_fltr_lock);
1700free_input:
1701	if (ret)
1702		devm_kfree(dev, input);
1703
1704	return ret;
1705}