Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019, Intel Corporation. */
   3
   4#include "ice_common.h"
   5#include "ice_flex_pipe.h"
   6#include "ice_flow.h"
   7#include "ice.h"
 
 
 
 
 
 
 
 
   8
   9static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
  10	/* SWITCH */
  11	{
  12		ICE_SID_XLT0_SW,
  13		ICE_SID_XLT_KEY_BUILDER_SW,
  14		ICE_SID_XLT1_SW,
  15		ICE_SID_XLT2_SW,
  16		ICE_SID_PROFID_TCAM_SW,
  17		ICE_SID_PROFID_REDIR_SW,
  18		ICE_SID_FLD_VEC_SW,
  19		ICE_SID_CDID_KEY_BUILDER_SW,
  20		ICE_SID_CDID_REDIR_SW
  21	},
  22
  23	/* ACL */
  24	{
  25		ICE_SID_XLT0_ACL,
  26		ICE_SID_XLT_KEY_BUILDER_ACL,
  27		ICE_SID_XLT1_ACL,
  28		ICE_SID_XLT2_ACL,
  29		ICE_SID_PROFID_TCAM_ACL,
  30		ICE_SID_PROFID_REDIR_ACL,
  31		ICE_SID_FLD_VEC_ACL,
  32		ICE_SID_CDID_KEY_BUILDER_ACL,
  33		ICE_SID_CDID_REDIR_ACL
  34	},
  35
  36	/* FD */
  37	{
  38		ICE_SID_XLT0_FD,
  39		ICE_SID_XLT_KEY_BUILDER_FD,
  40		ICE_SID_XLT1_FD,
  41		ICE_SID_XLT2_FD,
  42		ICE_SID_PROFID_TCAM_FD,
  43		ICE_SID_PROFID_REDIR_FD,
  44		ICE_SID_FLD_VEC_FD,
  45		ICE_SID_CDID_KEY_BUILDER_FD,
  46		ICE_SID_CDID_REDIR_FD
  47	},
  48
  49	/* RSS */
  50	{
  51		ICE_SID_XLT0_RSS,
  52		ICE_SID_XLT_KEY_BUILDER_RSS,
  53		ICE_SID_XLT1_RSS,
  54		ICE_SID_XLT2_RSS,
  55		ICE_SID_PROFID_TCAM_RSS,
  56		ICE_SID_PROFID_REDIR_RSS,
  57		ICE_SID_FLD_VEC_RSS,
  58		ICE_SID_CDID_KEY_BUILDER_RSS,
  59		ICE_SID_CDID_REDIR_RSS
  60	},
  61
  62	/* PE */
  63	{
  64		ICE_SID_XLT0_PE,
  65		ICE_SID_XLT_KEY_BUILDER_PE,
  66		ICE_SID_XLT1_PE,
  67		ICE_SID_XLT2_PE,
  68		ICE_SID_PROFID_TCAM_PE,
  69		ICE_SID_PROFID_REDIR_PE,
  70		ICE_SID_FLD_VEC_PE,
  71		ICE_SID_CDID_KEY_BUILDER_PE,
  72		ICE_SID_CDID_REDIR_PE
  73	}
  74};
  75
  76/**
  77 * ice_sect_id - returns section ID
  78 * @blk: block type
  79 * @sect: section type
  80 *
  81 * This helper function returns the proper section ID given a block type and a
  82 * section type.
  83 */
  84static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
  85{
  86	return ice_sect_lkup[blk][sect];
  87}
  88
  89/**
  90 * ice_hw_ptype_ena - check if the PTYPE is enabled or not
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  91 * @hw: pointer to the HW structure
  92 * @ptype: the hardware PTYPE
 
 
 
 
 
  93 */
  94bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
  95{
  96	return ptype < ICE_FLOW_PTYPE_MAX &&
  97	       test_bit(ptype, hw->hw_ptype);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  98}
  99
 100/* Key creation */
 101
 102#define ICE_DC_KEY	0x1	/* don't care */
 103#define ICE_DC_KEYINV	0x1
 104#define ICE_NM_KEY	0x0	/* never match */
 105#define ICE_NM_KEYINV	0x0
 106#define ICE_0_KEY	0x1	/* match 0 */
 107#define ICE_0_KEYINV	0x0
 108#define ICE_1_KEY	0x0	/* match 1 */
 109#define ICE_1_KEYINV	0x1
 110
 111/**
 112 * ice_gen_key_word - generate 16-bits of a key/mask word
 113 * @val: the value
 114 * @valid: valid bits mask (change only the valid bits)
 115 * @dont_care: don't care mask
 116 * @nvr_mtch: never match mask
 117 * @key: pointer to an array of where the resulting key portion
 118 * @key_inv: pointer to an array of where the resulting key invert portion
 119 *
 120 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
 121 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
 122 * of key and 8 bits of key invert.
 123 *
 124 *     '0' =    b01, always match a 0 bit
 125 *     '1' =    b10, always match a 1 bit
 126 *     '?' =    b11, don't care bit (always matches)
 127 *     '~' =    b00, never match bit
 128 *
 129 * Input:
 130 *          val:         b0  1  0  1  0  1
 131 *          dont_care:   b0  0  1  1  0  0
 132 *          never_mtch:  b0  0  0  0  1  1
 133 *          ------------------------------
 134 * Result:  key:        b01 10 11 11 00 00
 135 */
 136static int
 137ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
 138		 u8 *key_inv)
 139{
 140	u8 in_key = *key, in_key_inv = *key_inv;
 141	u8 i;
 142
 143	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
 144	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
 145		return -EIO;
 146
 147	*key = 0;
 148	*key_inv = 0;
 149
 150	/* encode the 8 bits into 8-bit key and 8-bit key invert */
 151	for (i = 0; i < 8; i++) {
 152		*key >>= 1;
 153		*key_inv >>= 1;
 154
 155		if (!(valid & 0x1)) { /* change only valid bits */
 156			*key |= (in_key & 0x1) << 7;
 157			*key_inv |= (in_key_inv & 0x1) << 7;
 158		} else if (dont_care & 0x1) { /* don't care bit */
 159			*key |= ICE_DC_KEY << 7;
 160			*key_inv |= ICE_DC_KEYINV << 7;
 161		} else if (nvr_mtch & 0x1) { /* never match bit */
 162			*key |= ICE_NM_KEY << 7;
 163			*key_inv |= ICE_NM_KEYINV << 7;
 164		} else if (val & 0x01) { /* exact 1 match */
 165			*key |= ICE_1_KEY << 7;
 166			*key_inv |= ICE_1_KEYINV << 7;
 167		} else { /* exact 0 match */
 168			*key |= ICE_0_KEY << 7;
 169			*key_inv |= ICE_0_KEYINV << 7;
 170		}
 171
 172		dont_care >>= 1;
 173		nvr_mtch >>= 1;
 174		valid >>= 1;
 175		val >>= 1;
 176		in_key >>= 1;
 177		in_key_inv >>= 1;
 178	}
 179
 180	return 0;
 181}
 182
 183/**
 184 * ice_bits_max_set - determine if the number of bits set is within a maximum
 185 * @mask: pointer to the byte array which is the mask
 186 * @size: the number of bytes in the mask
 187 * @max: the max number of set bits
 188 *
 189 * This function determines if there are at most 'max' number of bits set in an
 190 * array. Returns true if the number for bits set is <= max or will return false
 191 * otherwise.
 192 */
 193static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
 194{
 195	u16 count = 0;
 196	u16 i;
 197
 198	/* check each byte */
 199	for (i = 0; i < size; i++) {
 200		/* if 0, go to next byte */
 201		if (!mask[i])
 202			continue;
 203
 204		/* We know there is at least one set bit in this byte because of
 205		 * the above check; if we already have found 'max' number of
 206		 * bits set, then we can return failure now.
 207		 */
 208		if (count == max)
 209			return false;
 210
 211		/* count the bits in this byte, checking threshold */
 212		count += hweight8(mask[i]);
 213		if (count > max)
 214			return false;
 215	}
 216
 217	return true;
 218}
 219
 220/**
 221 * ice_set_key - generate a variable sized key with multiples of 16-bits
 222 * @key: pointer to where the key will be stored
 223 * @size: the size of the complete key in bytes (must be even)
 224 * @val: array of 8-bit values that makes up the value portion of the key
 225 * @upd: array of 8-bit masks that determine what key portion to update
 226 * @dc: array of 8-bit masks that make up the don't care mask
 227 * @nm: array of 8-bit masks that make up the never match mask
 228 * @off: the offset of the first byte in the key to update
 229 * @len: the number of bytes in the key update
 230 *
 231 * This function generates a key from a value, a don't care mask and a never
 232 * match mask.
 233 * upd, dc, and nm are optional parameters, and can be NULL:
 234 *	upd == NULL --> upd mask is all 1's (update all bits)
 235 *	dc == NULL --> dc mask is all 0's (no don't care bits)
 236 *	nm == NULL --> nm mask is all 0's (no never match bits)
 237 */
 238static int
 239ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
 240	    u16 len)
 241{
 242	u16 half_size;
 243	u16 i;
 244
 245	/* size must be a multiple of 2 bytes. */
 246	if (size % 2)
 247		return -EIO;
 248
 249	half_size = size / 2;
 250	if (off + len > half_size)
 251		return -EIO;
 252
 253	/* Make sure at most one bit is set in the never match mask. Having more
 254	 * than one never match mask bit set will cause HW to consume excessive
 255	 * power otherwise; this is a power management efficiency check.
 256	 */
 257#define ICE_NVR_MTCH_BITS_MAX	1
 258	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
 259		return -EIO;
 260
 261	for (i = 0; i < len; i++)
 262		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
 263				     dc ? dc[i] : 0, nm ? nm[i] : 0,
 264				     key + off + i, key + half_size + off + i))
 265			return -EIO;
 266
 267	return 0;
 268}
 269
 270/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271 * ice_acquire_change_lock
 272 * @hw: pointer to the HW structure
 273 * @access: access type (read or write)
 274 *
 275 * This function will request ownership of the change lock.
 276 */
 277int
 278ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
 279{
 280	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
 281			       ICE_CHANGE_LOCK_TIMEOUT);
 282}
 283
 284/**
 285 * ice_release_change_lock
 286 * @hw: pointer to the HW structure
 287 *
 288 * This function will release the change lock using the proper Admin Command.
 289 */
 290void ice_release_change_lock(struct ice_hw *hw)
 291{
 292	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
 293}
 294
 295/**
 296 * ice_get_open_tunnel_port - retrieve an open tunnel port
 297 * @hw: pointer to the HW structure
 298 * @port: returns open port
 299 * @type: type of tunnel, can be TNL_LAST if it doesn't matter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300 */
 301bool
 302ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
 303			 enum ice_tunnel_type type)
 304{
 305	bool res = false;
 306	u16 i;
 307
 308	mutex_lock(&hw->tnl_lock);
 
 
 
 309
 310	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
 311		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
 312		    (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
 313			*port = hw->tnl.tbl[i].port;
 314			res = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315			break;
 316		}
 
 317
 318	mutex_unlock(&hw->tnl_lock);
 319
 320	return res;
 321}
 322
 323/**
 324 * ice_upd_dvm_boost_entry
 325 * @hw: pointer to the HW structure
 326 * @entry: pointer to double vlan boost entry info
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327 */
 328static int
 329ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
 
 
 330{
 331	struct ice_boost_tcam_section *sect_rx, *sect_tx;
 332	int status = -ENOSPC;
 333	struct ice_buf_build *bld;
 334	u8 val, dc, nm;
 335
 336	bld = ice_pkg_buf_alloc(hw);
 337	if (!bld)
 338		return -ENOMEM;
 339
 340	/* allocate 2 sections, one for Rx parser, one for Tx parser */
 341	if (ice_pkg_buf_reserve_section(bld, 2))
 342		goto ice_upd_dvm_boost_entry_err;
 343
 344	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
 345					    struct_size(sect_rx, tcam, 1));
 346	if (!sect_rx)
 347		goto ice_upd_dvm_boost_entry_err;
 348	sect_rx->count = cpu_to_le16(1);
 
 
 
 
 
 
 349
 350	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
 351					    struct_size(sect_tx, tcam, 1));
 352	if (!sect_tx)
 353		goto ice_upd_dvm_boost_entry_err;
 354	sect_tx->count = cpu_to_le16(1);
 355
 356	/* copy original boost entry to update package buffer */
 357	memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam));
 
 
 
 358
 359	/* re-write the don't care and never match bits accordingly */
 360	if (entry->enable) {
 361		/* all bits are don't care */
 362		val = 0x00;
 363		dc = 0xFF;
 364		nm = 0x00;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 365	} else {
 366		/* disable, one never match bit, the rest are don't care */
 367		val = 0x00;
 368		dc = 0xF7;
 369		nm = 0x08;
 370	}
 371
 372	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
 373		    &val, NULL, &dc, &nm, 0, sizeof(u8));
 374
 375	/* exact copy of entry to Tx section entry */
 376	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
 
 
 
 
 
 
 
 
 
 
 377
 378	status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
 
 
 
 379
 380ice_upd_dvm_boost_entry_err:
 381	ice_pkg_buf_free(hw, bld);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 382
 383	return status;
 384}
 385
 386/**
 387 * ice_set_dvm_boost_entries
 388 * @hw: pointer to the HW structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389 *
 390 * Enable double vlan by updating the appropriate boost tcam entries.
 
 391 */
 392int ice_set_dvm_boost_entries(struct ice_hw *hw)
 393{
 394	u16 i;
 
 
 
 
 
 
 395
 396	for (i = 0; i < hw->dvm_upd.count; i++) {
 397		int status;
 
 
 
 
 
 
 
 398
 399		status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
 400		if (status)
 401			return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 402	}
 403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404	return 0;
 405}
 406
 407/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 408 * ice_tunnel_idx_to_entry - convert linear index to the sparse one
 409 * @hw: pointer to the HW structure
 410 * @type: type of tunnel
 411 * @idx: linear index
 412 *
 413 * Stack assumes we have 2 linear tables with indexes [0, count_valid),
 414 * but really the port table may be sprase, and types are mixed, so convert
 415 * the stack index into the device index.
 416 */
 417static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
 418				   u16 idx)
 419{
 420	u16 i;
 421
 422	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
 423		if (hw->tnl.tbl[i].valid &&
 424		    hw->tnl.tbl[i].type == type &&
 425		    idx-- == 0)
 426			return i;
 427
 428	WARN_ON_ONCE(1);
 429	return 0;
 430}
 431
 432/**
 433 * ice_create_tunnel
 434 * @hw: pointer to the HW structure
 435 * @index: device table entry
 436 * @type: type of tunnel
 437 * @port: port of tunnel to create
 438 *
 439 * Create a tunnel by updating the parse graph in the parser. We do that by
 440 * creating a package buffer with the tunnel info and issuing an update package
 441 * command.
 442 */
 443static int
 444ice_create_tunnel(struct ice_hw *hw, u16 index,
 445		  enum ice_tunnel_type type, u16 port)
 446{
 447	struct ice_boost_tcam_section *sect_rx, *sect_tx;
 
 448	struct ice_buf_build *bld;
 449	int status = -ENOSPC;
 450
 451	mutex_lock(&hw->tnl_lock);
 452
 453	bld = ice_pkg_buf_alloc(hw);
 454	if (!bld) {
 455		status = -ENOMEM;
 456		goto ice_create_tunnel_end;
 457	}
 458
 459	/* allocate 2 sections, one for Rx parser, one for Tx parser */
 460	if (ice_pkg_buf_reserve_section(bld, 2))
 461		goto ice_create_tunnel_err;
 462
 463	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
 464					    struct_size(sect_rx, tcam, 1));
 465	if (!sect_rx)
 466		goto ice_create_tunnel_err;
 467	sect_rx->count = cpu_to_le16(1);
 468
 469	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
 470					    struct_size(sect_tx, tcam, 1));
 471	if (!sect_tx)
 472		goto ice_create_tunnel_err;
 473	sect_tx->count = cpu_to_le16(1);
 474
 475	/* copy original boost entry to update package buffer */
 476	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
 477	       sizeof(*sect_rx->tcam));
 478
 479	/* over-write the never-match dest port key bits with the encoded port
 480	 * bits
 481	 */
 482	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
 483		    (u8 *)&port, NULL, NULL, NULL,
 484		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
 485		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
 486
 487	/* exact copy of entry to Tx section entry */
 488	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
 489
 490	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
 491	if (!status)
 492		hw->tnl.tbl[index].port = port;
 493
 494ice_create_tunnel_err:
 495	ice_pkg_buf_free(hw, bld);
 496
 497ice_create_tunnel_end:
 498	mutex_unlock(&hw->tnl_lock);
 499
 500	return status;
 501}
 502
 503/**
 504 * ice_destroy_tunnel
 505 * @hw: pointer to the HW structure
 506 * @index: device table entry
 507 * @type: type of tunnel
 508 * @port: port of tunnel to destroy (ignored if the all parameter is true)
 509 *
 510 * Destroys a tunnel or all tunnels by creating an update package buffer
 511 * targeting the specific updates requested and then performing an update
 512 * package.
 513 */
 514static int
 515ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
 516		   u16 port)
 517{
 518	struct ice_boost_tcam_section *sect_rx, *sect_tx;
 
 519	struct ice_buf_build *bld;
 520	int status = -ENOSPC;
 521
 522	mutex_lock(&hw->tnl_lock);
 523
 524	if (WARN_ON(!hw->tnl.tbl[index].valid ||
 525		    hw->tnl.tbl[index].type != type ||
 526		    hw->tnl.tbl[index].port != port)) {
 527		status = -EIO;
 528		goto ice_destroy_tunnel_end;
 529	}
 530
 531	bld = ice_pkg_buf_alloc(hw);
 532	if (!bld) {
 533		status = -ENOMEM;
 534		goto ice_destroy_tunnel_end;
 535	}
 536
 537	/* allocate 2 sections, one for Rx parser, one for Tx parser */
 538	if (ice_pkg_buf_reserve_section(bld, 2))
 539		goto ice_destroy_tunnel_err;
 540
 541	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
 542					    struct_size(sect_rx, tcam, 1));
 543	if (!sect_rx)
 544		goto ice_destroy_tunnel_err;
 545	sect_rx->count = cpu_to_le16(1);
 546
 547	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
 548					    struct_size(sect_tx, tcam, 1));
 549	if (!sect_tx)
 550		goto ice_destroy_tunnel_err;
 551	sect_tx->count = cpu_to_le16(1);
 552
 553	/* copy original boost entry to update package buffer, one copy to Rx
 554	 * section, another copy to the Tx section
 555	 */
 556	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
 557	       sizeof(*sect_rx->tcam));
 558	memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
 559	       sizeof(*sect_tx->tcam));
 560
 561	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
 562	if (!status)
 563		hw->tnl.tbl[index].port = 0;
 564
 565ice_destroy_tunnel_err:
 566	ice_pkg_buf_free(hw, bld);
 567
 568ice_destroy_tunnel_end:
 569	mutex_unlock(&hw->tnl_lock);
 570
 571	return status;
 572}
 573
 574int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
 575			    unsigned int idx, struct udp_tunnel_info *ti)
 576{
 577	struct ice_netdev_priv *np = netdev_priv(netdev);
 578	struct ice_vsi *vsi = np->vsi;
 579	struct ice_pf *pf = vsi->back;
 580	enum ice_tunnel_type tnl_type;
 581	int status;
 582	u16 index;
 583
 584	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
 585	index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
 586
 587	status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
 588	if (status) {
 589		netdev_err(netdev, "Error adding UDP tunnel - %d\n",
 590			   status);
 591		return -EIO;
 592	}
 593
 594	udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
 595	return 0;
 596}
 597
 598int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
 599			      unsigned int idx, struct udp_tunnel_info *ti)
 600{
 601	struct ice_netdev_priv *np = netdev_priv(netdev);
 602	struct ice_vsi *vsi = np->vsi;
 603	struct ice_pf *pf = vsi->back;
 604	enum ice_tunnel_type tnl_type;
 605	int status;
 606
 607	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
 608
 609	status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
 610				    ntohs(ti->port));
 611	if (status) {
 612		netdev_err(netdev, "Error removing UDP tunnel - %d\n",
 613			   status);
 614		return -EIO;
 615	}
 616
 617	return 0;
 618}
 619
 620/**
 621 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
 622 * @hw: pointer to the hardware structure
 623 * @blk: hardware block
 624 * @prof: profile ID
 625 * @fv_idx: field vector word index
 626 * @prot: variable to receive the protocol ID
 627 * @off: variable to receive the protocol offset
 628 */
 629int
 630ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
 631		  u8 *prot, u16 *off)
 632{
 633	struct ice_fv_word *fv_ext;
 634
 635	if (prof >= hw->blk[blk].es.count)
 636		return -EINVAL;
 637
 638	if (fv_idx >= hw->blk[blk].es.fvw)
 639		return -EINVAL;
 640
 641	fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
 642
 643	*prot = fv_ext[fv_idx].prot_id;
 644	*off = fv_ext[fv_idx].off;
 645
 646	return 0;
 647}
 648
 649/* PTG Management */
 650
 651/**
 652 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
 653 * @hw: pointer to the hardware structure
 654 * @blk: HW block
 655 * @ptype: the ptype to search for
 656 * @ptg: pointer to variable that receives the PTG
 657 *
 658 * This function will search the PTGs for a particular ptype, returning the
 659 * PTG ID that contains it through the PTG parameter, with the value of
 660 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
 661 */
 662static int
 663ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
 664{
 665	if (ptype >= ICE_XLT1_CNT || !ptg)
 666		return -EINVAL;
 667
 668	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
 669	return 0;
 670}
 671
 672/**
 673 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
 674 * @hw: pointer to the hardware structure
 675 * @blk: HW block
 676 * @ptg: the PTG to allocate
 677 *
 678 * This function allocates a given packet type group ID specified by the PTG
 679 * parameter.
 680 */
 681static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
 682{
 683	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
 684}
 685
 686/**
 687 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
 688 * @hw: pointer to the hardware structure
 689 * @blk: HW block
 690 * @ptype: the ptype to remove
 691 * @ptg: the PTG to remove the ptype from
 692 *
 693 * This function will remove the ptype from the specific PTG, and move it to
 694 * the default PTG (ICE_DEFAULT_PTG).
 695 */
 696static int
 697ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
 698{
 699	struct ice_ptg_ptype **ch;
 700	struct ice_ptg_ptype *p;
 701
 702	if (ptype > ICE_XLT1_CNT - 1)
 703		return -EINVAL;
 704
 705	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
 706		return -ENOENT;
 707
 708	/* Should not happen if .in_use is set, bad config */
 709	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
 710		return -EIO;
 711
 712	/* find the ptype within this PTG, and bypass the link over it */
 713	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
 714	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
 715	while (p) {
 716		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
 717			*ch = p->next_ptype;
 718			break;
 719		}
 720
 721		ch = &p->next_ptype;
 722		p = p->next_ptype;
 723	}
 724
 725	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
 726	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
 727
 728	return 0;
 729}
 730
 731/**
 732 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
 733 * @hw: pointer to the hardware structure
 734 * @blk: HW block
 735 * @ptype: the ptype to add or move
 736 * @ptg: the PTG to add or move the ptype to
 737 *
 738 * This function will either add or move a ptype to a particular PTG depending
 739 * on if the ptype is already part of another group. Note that using a
 740 * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
 741 * default PTG.
 742 */
 743static int
 744ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
 745{
 
 746	u8 original_ptg;
 747	int status;
 748
 749	if (ptype > ICE_XLT1_CNT - 1)
 750		return -EINVAL;
 751
 752	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
 753		return -ENOENT;
 754
 755	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
 756	if (status)
 757		return status;
 758
 759	/* Is ptype already in the correct PTG? */
 760	if (original_ptg == ptg)
 761		return 0;
 762
 763	/* Remove from original PTG and move back to the default PTG */
 764	if (original_ptg != ICE_DEFAULT_PTG)
 765		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
 766
 767	/* Moving to default PTG? Then we're done with this request */
 768	if (ptg == ICE_DEFAULT_PTG)
 769		return 0;
 770
 771	/* Add ptype to PTG at beginning of list */
 772	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
 773		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
 774	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
 775		&hw->blk[blk].xlt1.ptypes[ptype];
 776
 777	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
 778	hw->blk[blk].xlt1.t[ptype] = ptg;
 779
 780	return 0;
 781}
 782
 783/* Block / table size info */
 784struct ice_blk_size_details {
 785	u16 xlt1;			/* # XLT1 entries */
 786	u16 xlt2;			/* # XLT2 entries */
 787	u16 prof_tcam;			/* # profile ID TCAM entries */
 788	u16 prof_id;			/* # profile IDs */
 789	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
 790	u16 prof_redir;			/* # profile redirection entries */
 791	u16 es;				/* # extraction sequence entries */
 792	u16 fvw;			/* # field vector words */
 793	u8 overwrite;			/* overwrite existing entries allowed */
 794	u8 reverse;			/* reverse FV order */
 795};
 796
 797static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
 798	/**
 799	 * Table Definitions
 800	 * XLT1 - Number of entries in XLT1 table
 801	 * XLT2 - Number of entries in XLT2 table
 802	 * TCAM - Number of entries Profile ID TCAM table
 803	 * CDID - Control Domain ID of the hardware block
 804	 * PRED - Number of entries in the Profile Redirection Table
 805	 * FV   - Number of entries in the Field Vector
 806	 * FVW  - Width (in WORDs) of the Field Vector
 807	 * OVR  - Overwrite existing table entries
 808	 * REV  - Reverse FV
 809	 */
 810	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
 811	/*          Overwrite   , Reverse FV */
 812	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
 813		    false, false },
 814	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
 815		    false, false },
 816	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
 817		    false, true  },
 818	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
 819		    true,  true  },
 820	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
 821		    false, false },
 822};
 823
 824enum ice_sid_all {
 825	ICE_SID_XLT1_OFF = 0,
 826	ICE_SID_XLT2_OFF,
 827	ICE_SID_PR_OFF,
 828	ICE_SID_PR_REDIR_OFF,
 829	ICE_SID_ES_OFF,
 830	ICE_SID_OFF_COUNT,
 831};
 832
 833/* Characteristic handling */
 834
 835/**
 836 * ice_match_prop_lst - determine if properties of two lists match
 837 * @list1: first properties list
 838 * @list2: second properties list
 839 *
 840 * Count, cookies and the order must match in order to be considered equivalent.
 841 */
 842static bool
 843ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
 844{
 845	struct ice_vsig_prof *tmp1;
 846	struct ice_vsig_prof *tmp2;
 847	u16 chk_count = 0;
 848	u16 count = 0;
 849
 850	/* compare counts */
 851	list_for_each_entry(tmp1, list1, list)
 852		count++;
 853	list_for_each_entry(tmp2, list2, list)
 854		chk_count++;
 
 855	if (!count || count != chk_count)
 856		return false;
 857
 858	tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
 859	tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
 860
 861	/* profile cookies must compare, and in the exact same order to take
 862	 * into account priority
 863	 */
 864	while (count--) {
 865		if (tmp2->profile_cookie != tmp1->profile_cookie)
 866			return false;
 867
 868		tmp1 = list_next_entry(tmp1, list);
 869		tmp2 = list_next_entry(tmp2, list);
 870	}
 871
 872	return true;
 873}
 874
 875/* VSIG Management */
 876
 877/**
 878 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
 879 * @hw: pointer to the hardware structure
 880 * @blk: HW block
 881 * @vsi: VSI of interest
 882 * @vsig: pointer to receive the VSI group
 883 *
 884 * This function will lookup the VSI entry in the XLT2 list and return
 885 * the VSI group its associated with.
 886 */
 887static int
 888ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
 889{
 890	if (!vsig || vsi >= ICE_MAX_VSI)
 891		return -EINVAL;
 892
 893	/* As long as there's a default or valid VSIG associated with the input
 894	 * VSI, the functions returns a success. Any handling of VSIG will be
 895	 * done by the following add, update or remove functions.
 896	 */
 897	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
 898
 899	return 0;
 900}
 901
 902/**
 903 * ice_vsig_alloc_val - allocate a new VSIG by value
 904 * @hw: pointer to the hardware structure
 905 * @blk: HW block
 906 * @vsig: the VSIG to allocate
 907 *
 908 * This function will allocate a given VSIG specified by the VSIG parameter.
 909 */
 910static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
 911{
 912	u16 idx = vsig & ICE_VSIG_IDX_M;
 913
 914	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
 915		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
 916		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
 917	}
 918
 919	return ICE_VSIG_VALUE(idx, hw->pf_id);
 920}
 921
 922/**
 923 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
 924 * @hw: pointer to the hardware structure
 925 * @blk: HW block
 926 *
 927 * This function will iterate through the VSIG list and mark the first
 928 * unused entry for the new VSIG entry as used and return that value.
 929 */
 930static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
 931{
 932	u16 i;
 933
 934	for (i = 1; i < ICE_MAX_VSIGS; i++)
 935		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
 936			return ice_vsig_alloc_val(hw, blk, i);
 937
 938	return ICE_DEFAULT_VSIG;
 939}
 940
 941/**
 942 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
 943 * @hw: pointer to the hardware structure
 944 * @blk: HW block
 945 * @chs: characteristic list
 946 * @vsig: returns the VSIG with the matching profiles, if found
 947 *
 948 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
 949 * a group have the same characteristic set. To check if there exists a VSIG
 950 * which has the same characteristics as the input characteristics; this
 951 * function will iterate through the XLT2 list and return the VSIG that has a
 952 * matching configuration. In order to make sure that priorities are accounted
 953 * for, the list must match exactly, including the order in which the
 954 * characteristics are listed.
 955 */
 956static int
 957ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
 958			struct list_head *chs, u16 *vsig)
 959{
 960	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
 961	u16 i;
 962
 963	for (i = 0; i < xlt2->count; i++)
 964		if (xlt2->vsig_tbl[i].in_use &&
 965		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
 966			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
 967			return 0;
 968		}
 969
 970	return -ENOENT;
 971}
 972
 973/**
 974 * ice_vsig_free - free VSI group
 975 * @hw: pointer to the hardware structure
 976 * @blk: HW block
 977 * @vsig: VSIG to remove
 978 *
 979 * The function will remove all VSIs associated with the input VSIG and move
 980 * them to the DEFAULT_VSIG and mark the VSIG available.
 981 */
 982static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
 
 983{
 984	struct ice_vsig_prof *dtmp, *del;
 985	struct ice_vsig_vsi *vsi_cur;
 986	u16 idx;
 987
 988	idx = vsig & ICE_VSIG_IDX_M;
 989	if (idx >= ICE_MAX_VSIGS)
 990		return -EINVAL;
 991
 992	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
 993		return -ENOENT;
 994
 995	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
 996
 997	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
 998	/* If the VSIG has at least 1 VSI then iterate through the
 999	 * list and remove the VSIs before deleting the group.
1000	 */
1001	if (vsi_cur) {
1002		/* remove all vsis associated with this VSIG XLT2 entry */
1003		do {
1004			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
1005
1006			vsi_cur->vsig = ICE_DEFAULT_VSIG;
1007			vsi_cur->changed = 1;
1008			vsi_cur->next_vsi = NULL;
1009			vsi_cur = tmp;
1010		} while (vsi_cur);
1011
1012		/* NULL terminate head of VSI list */
1013		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
1014	}
1015
1016	/* free characteristic list */
1017	list_for_each_entry_safe(del, dtmp,
1018				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
1019				 list) {
1020		list_del(&del->list);
1021		devm_kfree(ice_hw_to_dev(hw), del);
1022	}
1023
1024	/* if VSIG characteristic list was cleared for reset
1025	 * re-initialize the list head
1026	 */
1027	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
1028
1029	return 0;
1030}
1031
1032/**
1033 * ice_vsig_remove_vsi - remove VSI from VSIG
1034 * @hw: pointer to the hardware structure
1035 * @blk: HW block
1036 * @vsi: VSI to remove
1037 * @vsig: VSI group to remove from
1038 *
1039 * The function will remove the input VSI from its VSI group and move it
1040 * to the DEFAULT_VSIG.
1041 */
1042static int
1043ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1044{
1045	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
1046	u16 idx;
1047
1048	idx = vsig & ICE_VSIG_IDX_M;
1049
1050	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1051		return -EINVAL;
1052
1053	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1054		return -ENOENT;
1055
1056	/* entry already in default VSIG, don't have to remove */
1057	if (idx == ICE_DEFAULT_VSIG)
1058		return 0;
1059
1060	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1061	if (!(*vsi_head))
1062		return -EIO;
1063
1064	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
1065	vsi_cur = (*vsi_head);
1066
1067	/* iterate the VSI list, skip over the entry to be removed */
1068	while (vsi_cur) {
1069		if (vsi_tgt == vsi_cur) {
1070			(*vsi_head) = vsi_cur->next_vsi;
1071			break;
1072		}
1073		vsi_head = &vsi_cur->next_vsi;
1074		vsi_cur = vsi_cur->next_vsi;
1075	}
1076
1077	/* verify if VSI was removed from group list */
1078	if (!vsi_cur)
1079		return -ENOENT;
1080
1081	vsi_cur->vsig = ICE_DEFAULT_VSIG;
1082	vsi_cur->changed = 1;
1083	vsi_cur->next_vsi = NULL;
1084
1085	return 0;
1086}
1087
1088/**
1089 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
1090 * @hw: pointer to the hardware structure
1091 * @blk: HW block
1092 * @vsi: VSI to move
1093 * @vsig: destination VSI group
1094 *
1095 * This function will move or add the input VSI to the target VSIG.
1096 * The function will find the original VSIG the VSI belongs to and
1097 * move the entry to the DEFAULT_VSIG, update the original VSIG and
1098 * then move entry to the new VSIG.
1099 */
1100static int
1101ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1102{
1103	struct ice_vsig_vsi *tmp;
 
1104	u16 orig_vsig, idx;
1105	int status;
1106
1107	idx = vsig & ICE_VSIG_IDX_M;
1108
1109	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1110		return -EINVAL;
1111
1112	/* if VSIG not in use and VSIG is not default type this VSIG
1113	 * doesn't exist.
1114	 */
1115	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
1116	    vsig != ICE_DEFAULT_VSIG)
1117		return -ENOENT;
1118
1119	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
1120	if (status)
1121		return status;
1122
1123	/* no update required if vsigs match */
1124	if (orig_vsig == vsig)
1125		return 0;
1126
1127	if (orig_vsig != ICE_DEFAULT_VSIG) {
1128		/* remove entry from orig_vsig and add to default VSIG */
1129		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
1130		if (status)
1131			return status;
1132	}
1133
1134	if (idx == ICE_DEFAULT_VSIG)
1135		return 0;
1136
1137	/* Create VSI entry and add VSIG and prop_mask values */
1138	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
1139	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
1140
1141	/* Add new entry to the head of the VSIG list */
1142	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1143	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
1144		&hw->blk[blk].xlt2.vsis[vsi];
1145	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
1146	hw->blk[blk].xlt2.t[vsi] = vsig;
1147
1148	return 0;
1149}
1150
1151/**
1152 * ice_prof_has_mask_idx - determine if profile index masking is identical
1153 * @hw: pointer to the hardware structure
1154 * @blk: HW block
1155 * @prof: profile to check
1156 * @idx: profile index to check
1157 * @mask: mask to match
1158 */
1159static bool
1160ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
1161		      u16 mask)
1162{
1163	bool expect_no_mask = false;
1164	bool found = false;
1165	bool match = false;
1166	u16 i;
1167
1168	/* If mask is 0x0000 or 0xffff, then there is no masking */
1169	if (mask == 0 || mask == 0xffff)
1170		expect_no_mask = true;
1171
1172	/* Scan the enabled masks on this profile, for the specified idx */
1173	for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
1174	     hw->blk[blk].masks.count; i++)
1175		if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
1176			if (hw->blk[blk].masks.masks[i].in_use &&
1177			    hw->blk[blk].masks.masks[i].idx == idx) {
1178				found = true;
1179				if (hw->blk[blk].masks.masks[i].mask == mask)
1180					match = true;
1181				break;
1182			}
1183
1184	if (expect_no_mask) {
1185		if (found)
1186			return false;
1187	} else {
1188		if (!match)
1189			return false;
1190	}
1191
1192	return true;
1193}
1194
1195/**
1196 * ice_prof_has_mask - determine if profile masking is identical
1197 * @hw: pointer to the hardware structure
1198 * @blk: HW block
1199 * @prof: profile to check
1200 * @masks: masks to match
1201 */
1202static bool
1203ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
1204{
1205	u16 i;
1206
1207	/* es->mask_ena[prof] will have the mask */
1208	for (i = 0; i < hw->blk[blk].es.fvw; i++)
1209		if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
1210			return false;
1211
1212	return true;
1213}
1214
1215/**
1216 * ice_find_prof_id_with_mask - find profile ID for a given field vector
1217 * @hw: pointer to the hardware structure
1218 * @blk: HW block
1219 * @fv: field vector to search for
1220 * @masks: masks for FV
1221 * @symm: symmetric setting for RSS flows
1222 * @prof_id: receives the profile ID
1223 */
1224static int
1225ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
1226			   struct ice_fv_word *fv, u16 *masks, bool symm,
1227			   u8 *prof_id)
1228{
1229	struct ice_es *es = &hw->blk[blk].es;
1230	u8 i;
1231
1232	/* For FD, we don't want to re-use a existed profile with the same
1233	 * field vector and mask. This will cause rule interference.
1234	 */
1235	if (blk == ICE_BLK_FD)
1236		return -ENOENT;
1237
1238	for (i = 0; i < (u8)es->count; i++) {
1239		u16 off = i * es->fvw;
1240
1241		if (blk == ICE_BLK_RSS && es->symm[i] != symm)
1242			continue;
1243
1244		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
1245			continue;
1246
1247		/* check if masks settings are the same for this profile */
1248		if (masks && !ice_prof_has_mask(hw, blk, i, masks))
1249			continue;
1250
1251		*prof_id = i;
1252		return 0;
1253	}
1254
1255	return -ENOENT;
1256}
1257
1258/**
1259 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
1260 * @blk: the block type
1261 * @rsrc_type: pointer to variable to receive the resource type
1262 */
1263static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1264{
1265	switch (blk) {
1266	case ICE_BLK_FD:
1267		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
1268		break;
1269	case ICE_BLK_RSS:
1270		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
1271		break;
1272	default:
1273		return false;
1274	}
1275	return true;
1276}
1277
1278/**
1279 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
1280 * @blk: the block type
1281 * @rsrc_type: pointer to variable to receive the resource type
1282 */
1283static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1284{
1285	switch (blk) {
1286	case ICE_BLK_FD:
1287		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
1288		break;
1289	case ICE_BLK_RSS:
1290		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
1291		break;
1292	default:
1293		return false;
1294	}
1295	return true;
1296}
1297
1298/**
1299 * ice_alloc_tcam_ent - allocate hardware TCAM entry
1300 * @hw: pointer to the HW struct
1301 * @blk: the block to allocate the TCAM for
1302 * @btm: true to allocate from bottom of table, false to allocate from top
1303 * @tcam_idx: pointer to variable to receive the TCAM entry
1304 *
1305 * This function allocates a new entry in a Profile ID TCAM for a specific
1306 * block.
1307 */
1308static int
1309ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
1310		   u16 *tcam_idx)
1311{
1312	u16 res_type;
1313
1314	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1315		return -EINVAL;
1316
1317	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
1318}
1319
1320/**
1321 * ice_free_tcam_ent - free hardware TCAM entry
1322 * @hw: pointer to the HW struct
1323 * @blk: the block from which to free the TCAM entry
1324 * @tcam_idx: the TCAM entry to free
1325 *
1326 * This function frees an entry in a Profile ID TCAM for a specific block.
1327 */
1328static int
1329ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
1330{
1331	u16 res_type;
1332
1333	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1334		return -EINVAL;
1335
1336	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
1337}
1338
1339/**
1340 * ice_alloc_prof_id - allocate profile ID
1341 * @hw: pointer to the HW struct
1342 * @blk: the block to allocate the profile ID for
1343 * @prof_id: pointer to variable to receive the profile ID
1344 *
1345 * This function allocates a new profile ID, which also corresponds to a Field
1346 * Vector (Extraction Sequence) entry.
1347 */
1348static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
 
1349{
 
1350	u16 res_type;
1351	u16 get_prof;
1352	int status;
1353
1354	if (!ice_prof_id_rsrc_type(blk, &res_type))
1355		return -EINVAL;
1356
1357	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
1358	if (!status)
1359		*prof_id = (u8)get_prof;
1360
1361	return status;
1362}
1363
1364/**
1365 * ice_free_prof_id - free profile ID
1366 * @hw: pointer to the HW struct
1367 * @blk: the block from which to free the profile ID
1368 * @prof_id: the profile ID to free
1369 *
1370 * This function frees a profile ID, which also corresponds to a Field Vector.
1371 */
1372static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
 
1373{
1374	u16 tmp_prof_id = (u16)prof_id;
1375	u16 res_type;
1376
1377	if (!ice_prof_id_rsrc_type(blk, &res_type))
1378		return -EINVAL;
1379
1380	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
1381}
1382
1383/**
1384 * ice_prof_inc_ref - increment reference count for profile
1385 * @hw: pointer to the HW struct
1386 * @blk: the block from which to free the profile ID
1387 * @prof_id: the profile ID for which to increment the reference count
1388 */
1389static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
 
1390{
1391	if (prof_id > hw->blk[blk].es.count)
1392		return -EINVAL;
1393
1394	hw->blk[blk].es.ref_count[prof_id]++;
1395
1396	return 0;
1397}
1398
1399/**
1400 * ice_write_prof_mask_reg - write profile mask register
1401 * @hw: pointer to the HW struct
1402 * @blk: hardware block
1403 * @mask_idx: mask index
1404 * @idx: index of the FV which will use the mask
1405 * @mask: the 16-bit mask
1406 */
1407static void
1408ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
1409			u16 idx, u16 mask)
1410{
1411	u32 offset;
1412	u32 val;
1413
1414	switch (blk) {
1415	case ICE_BLK_RSS:
1416		offset = GLQF_HMASK(mask_idx);
1417		val = FIELD_PREP(GLQF_HMASK_MSK_INDEX_M, idx);
1418		val |= FIELD_PREP(GLQF_HMASK_MASK_M, mask);
1419		break;
1420	case ICE_BLK_FD:
1421		offset = GLQF_FDMASK(mask_idx);
1422		val = FIELD_PREP(GLQF_FDMASK_MSK_INDEX_M, idx);
1423		val |= FIELD_PREP(GLQF_FDMASK_MASK_M, mask);
1424		break;
1425	default:
1426		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
1427			  blk);
1428		return;
1429	}
1430
1431	wr32(hw, offset, val);
1432	ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
1433		  blk, idx, offset, val);
1434}
1435
1436/**
1437 * ice_write_prof_mask_enable_res - write profile mask enable register
1438 * @hw: pointer to the HW struct
1439 * @blk: hardware block
1440 * @prof_id: profile ID
1441 * @enable_mask: enable mask
1442 */
1443static void
1444ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
1445			       u16 prof_id, u32 enable_mask)
1446{
1447	u32 offset;
1448
1449	switch (blk) {
1450	case ICE_BLK_RSS:
1451		offset = GLQF_HMASK_SEL(prof_id);
1452		break;
1453	case ICE_BLK_FD:
1454		offset = GLQF_FDMASK_SEL(prof_id);
1455		break;
1456	default:
1457		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
1458			  blk);
1459		return;
1460	}
1461
1462	wr32(hw, offset, enable_mask);
1463	ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
1464		  blk, prof_id, offset, enable_mask);
1465}
1466
1467/**
1468 * ice_init_prof_masks - initial prof masks
1469 * @hw: pointer to the HW struct
1470 * @blk: hardware block
1471 */
1472static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
1473{
1474	u16 per_pf;
1475	u16 i;
1476
1477	mutex_init(&hw->blk[blk].masks.lock);
1478
1479	per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
1480
1481	hw->blk[blk].masks.count = per_pf;
1482	hw->blk[blk].masks.first = hw->pf_id * per_pf;
1483
1484	memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
1485
1486	for (i = hw->blk[blk].masks.first;
1487	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
1488		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
1489}
1490
1491/**
1492 * ice_init_all_prof_masks - initialize all prof masks
1493 * @hw: pointer to the HW struct
1494 */
1495static void ice_init_all_prof_masks(struct ice_hw *hw)
1496{
1497	ice_init_prof_masks(hw, ICE_BLK_RSS);
1498	ice_init_prof_masks(hw, ICE_BLK_FD);
1499}
1500
1501/**
1502 * ice_alloc_prof_mask - allocate profile mask
1503 * @hw: pointer to the HW struct
1504 * @blk: hardware block
1505 * @idx: index of FV which will use the mask
1506 * @mask: the 16-bit mask
1507 * @mask_idx: variable to receive the mask index
1508 */
1509static int
1510ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
1511		    u16 *mask_idx)
1512{
1513	bool found_unused = false, found_copy = false;
 
1514	u16 unused_idx = 0, copy_idx = 0;
1515	int status = -ENOSPC;
1516	u16 i;
1517
1518	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1519		return -EINVAL;
1520
1521	mutex_lock(&hw->blk[blk].masks.lock);
1522
1523	for (i = hw->blk[blk].masks.first;
1524	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
1525		if (hw->blk[blk].masks.masks[i].in_use) {
1526			/* if mask is in use and it exactly duplicates the
1527			 * desired mask and index, then in can be reused
1528			 */
1529			if (hw->blk[blk].masks.masks[i].mask == mask &&
1530			    hw->blk[blk].masks.masks[i].idx == idx) {
1531				found_copy = true;
1532				copy_idx = i;
1533				break;
1534			}
1535		} else {
1536			/* save off unused index, but keep searching in case
1537			 * there is an exact match later on
1538			 */
1539			if (!found_unused) {
1540				found_unused = true;
1541				unused_idx = i;
1542			}
1543		}
1544
1545	if (found_copy)
1546		i = copy_idx;
1547	else if (found_unused)
1548		i = unused_idx;
1549	else
1550		goto err_ice_alloc_prof_mask;
1551
1552	/* update mask for a new entry */
1553	if (found_unused) {
1554		hw->blk[blk].masks.masks[i].in_use = true;
1555		hw->blk[blk].masks.masks[i].mask = mask;
1556		hw->blk[blk].masks.masks[i].idx = idx;
1557		hw->blk[blk].masks.masks[i].ref = 0;
1558		ice_write_prof_mask_reg(hw, blk, i, idx, mask);
1559	}
1560
1561	hw->blk[blk].masks.masks[i].ref++;
1562	*mask_idx = i;
1563	status = 0;
1564
1565err_ice_alloc_prof_mask:
1566	mutex_unlock(&hw->blk[blk].masks.lock);
1567
1568	return status;
1569}
1570
1571/**
1572 * ice_free_prof_mask - free profile mask
1573 * @hw: pointer to the HW struct
1574 * @blk: hardware block
1575 * @mask_idx: index of mask
1576 */
1577static int
1578ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
1579{
1580	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1581		return -EINVAL;
1582
1583	if (!(mask_idx >= hw->blk[blk].masks.first &&
1584	      mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
1585		return -ENOENT;
1586
1587	mutex_lock(&hw->blk[blk].masks.lock);
1588
1589	if (!hw->blk[blk].masks.masks[mask_idx].in_use)
1590		goto exit_ice_free_prof_mask;
1591
1592	if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
1593		hw->blk[blk].masks.masks[mask_idx].ref--;
1594		goto exit_ice_free_prof_mask;
1595	}
1596
1597	/* remove mask */
1598	hw->blk[blk].masks.masks[mask_idx].in_use = false;
1599	hw->blk[blk].masks.masks[mask_idx].mask = 0;
1600	hw->blk[blk].masks.masks[mask_idx].idx = 0;
1601
1602	/* update mask as unused entry */
1603	ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
1604		  mask_idx);
1605	ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
1606
1607exit_ice_free_prof_mask:
1608	mutex_unlock(&hw->blk[blk].masks.lock);
1609
1610	return 0;
1611}
1612
1613/**
1614 * ice_free_prof_masks - free all profile masks for a profile
1615 * @hw: pointer to the HW struct
1616 * @blk: hardware block
1617 * @prof_id: profile ID
1618 */
1619static int
1620ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
1621{
1622	u32 mask_bm;
1623	u16 i;
1624
1625	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1626		return -EINVAL;
1627
1628	mask_bm = hw->blk[blk].es.mask_ena[prof_id];
1629	for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
1630		if (mask_bm & BIT(i))
1631			ice_free_prof_mask(hw, blk, i);
1632
1633	return 0;
1634}
1635
1636/**
1637 * ice_shutdown_prof_masks - releases lock for masking
1638 * @hw: pointer to the HW struct
1639 * @blk: hardware block
1640 *
1641 * This should be called before unloading the driver
1642 */
1643static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
1644{
1645	u16 i;
1646
1647	mutex_lock(&hw->blk[blk].masks.lock);
1648
1649	for (i = hw->blk[blk].masks.first;
1650	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
1651		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
1652
1653		hw->blk[blk].masks.masks[i].in_use = false;
1654		hw->blk[blk].masks.masks[i].idx = 0;
1655		hw->blk[blk].masks.masks[i].mask = 0;
1656	}
1657
1658	mutex_unlock(&hw->blk[blk].masks.lock);
1659	mutex_destroy(&hw->blk[blk].masks.lock);
1660}
1661
1662/**
1663 * ice_shutdown_all_prof_masks - releases all locks for masking
1664 * @hw: pointer to the HW struct
1665 *
1666 * This should be called before unloading the driver
1667 */
1668static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
1669{
1670	ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
1671	ice_shutdown_prof_masks(hw, ICE_BLK_FD);
1672}
1673
1674/**
1675 * ice_update_prof_masking - set registers according to masking
1676 * @hw: pointer to the HW struct
1677 * @blk: hardware block
1678 * @prof_id: profile ID
1679 * @masks: masks
1680 */
1681static int
1682ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
1683			u16 *masks)
1684{
1685	bool err = false;
1686	u32 ena_mask = 0;
1687	u16 idx;
1688	u16 i;
1689
1690	/* Only support FD and RSS masking, otherwise nothing to be done */
1691	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1692		return 0;
1693
1694	for (i = 0; i < hw->blk[blk].es.fvw; i++)
1695		if (masks[i] && masks[i] != 0xFFFF) {
1696			if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
1697				ena_mask |= BIT(idx);
1698			} else {
1699				/* not enough bitmaps */
1700				err = true;
1701				break;
1702			}
1703		}
1704
1705	if (err) {
1706		/* free any bitmaps we have allocated */
1707		for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
1708			if (ena_mask & BIT(i))
1709				ice_free_prof_mask(hw, blk, i);
1710
1711		return -EIO;
1712	}
1713
1714	/* enable the masks for this profile */
1715	ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
1716
1717	/* store enabled masks with profile so that they can be freed later */
1718	hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
1719
1720	return 0;
1721}
1722
1723/**
1724 * ice_write_es - write an extraction sequence and symmetric setting to hardware
1725 * @hw: pointer to the HW struct
1726 * @blk: the block in which to write the extraction sequence
1727 * @prof_id: the profile ID to write
1728 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
1729 * @symm: symmetric setting for RSS profiles
1730 */
1731static void
1732ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
1733	     struct ice_fv_word *fv, bool symm)
1734{
1735	u16 off;
1736
1737	off = prof_id * hw->blk[blk].es.fvw;
1738	if (!fv) {
1739		memset(&hw->blk[blk].es.t[off], 0,
1740		       hw->blk[blk].es.fvw * sizeof(*fv));
1741		hw->blk[blk].es.written[prof_id] = false;
1742	} else {
1743		memcpy(&hw->blk[blk].es.t[off], fv,
1744		       hw->blk[blk].es.fvw * sizeof(*fv));
1745	}
1746
1747	if (blk == ICE_BLK_RSS)
1748		hw->blk[blk].es.symm[prof_id] = symm;
1749}
1750
1751/**
1752 * ice_prof_dec_ref - decrement reference count for profile
1753 * @hw: pointer to the HW struct
1754 * @blk: the block from which to free the profile ID
1755 * @prof_id: the profile ID for which to decrement the reference count
1756 */
1757static int
1758ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1759{
1760	if (prof_id > hw->blk[blk].es.count)
1761		return -EINVAL;
1762
1763	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
1764		if (!--hw->blk[blk].es.ref_count[prof_id]) {
1765			ice_write_es(hw, blk, prof_id, NULL, false);
1766			ice_free_prof_masks(hw, blk, prof_id);
1767			return ice_free_prof_id(hw, blk, prof_id);
1768		}
1769	}
1770
1771	return 0;
1772}
1773
1774/* Block / table section IDs */
1775static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
1776	/* SWITCH */
1777	{	ICE_SID_XLT1_SW,
1778		ICE_SID_XLT2_SW,
1779		ICE_SID_PROFID_TCAM_SW,
1780		ICE_SID_PROFID_REDIR_SW,
1781		ICE_SID_FLD_VEC_SW
1782	},
1783
1784	/* ACL */
1785	{	ICE_SID_XLT1_ACL,
1786		ICE_SID_XLT2_ACL,
1787		ICE_SID_PROFID_TCAM_ACL,
1788		ICE_SID_PROFID_REDIR_ACL,
1789		ICE_SID_FLD_VEC_ACL
1790	},
1791
1792	/* FD */
1793	{	ICE_SID_XLT1_FD,
1794		ICE_SID_XLT2_FD,
1795		ICE_SID_PROFID_TCAM_FD,
1796		ICE_SID_PROFID_REDIR_FD,
1797		ICE_SID_FLD_VEC_FD
1798	},
1799
1800	/* RSS */
1801	{	ICE_SID_XLT1_RSS,
1802		ICE_SID_XLT2_RSS,
1803		ICE_SID_PROFID_TCAM_RSS,
1804		ICE_SID_PROFID_REDIR_RSS,
1805		ICE_SID_FLD_VEC_RSS
1806	},
1807
1808	/* PE */
1809	{	ICE_SID_XLT1_PE,
1810		ICE_SID_XLT2_PE,
1811		ICE_SID_PROFID_TCAM_PE,
1812		ICE_SID_PROFID_REDIR_PE,
1813		ICE_SID_FLD_VEC_PE
1814	}
1815};
1816
1817/**
1818 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
1819 * @hw: pointer to the hardware structure
1820 * @blk: the HW block to initialize
1821 */
1822static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
1823{
1824	u16 pt;
1825
1826	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
1827		u8 ptg;
1828
1829		ptg = hw->blk[blk].xlt1.t[pt];
1830		if (ptg != ICE_DEFAULT_PTG) {
1831			ice_ptg_alloc_val(hw, blk, ptg);
1832			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
1833		}
1834	}
1835}
1836
1837/**
1838 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
1839 * @hw: pointer to the hardware structure
1840 * @blk: the HW block to initialize
1841 */
1842static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
1843{
1844	u16 vsi;
1845
1846	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
1847		u16 vsig;
1848
1849		vsig = hw->blk[blk].xlt2.t[vsi];
1850		if (vsig) {
1851			ice_vsig_alloc_val(hw, blk, vsig);
1852			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
1853			/* no changes at this time, since this has been
1854			 * initialized from the original package
1855			 */
1856			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1857		}
1858	}
1859}
1860
1861/**
1862 * ice_init_sw_db - init software database from HW tables
1863 * @hw: pointer to the hardware structure
1864 */
1865static void ice_init_sw_db(struct ice_hw *hw)
1866{
1867	u16 i;
1868
1869	for (i = 0; i < ICE_BLK_COUNT; i++) {
1870		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
1871		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
1872	}
1873}
1874
1875/**
1876 * ice_fill_tbl - Reads content of a single table type into database
1877 * @hw: pointer to the hardware structure
1878 * @block_id: Block ID of the table to copy
1879 * @sid: Section ID of the table to copy
1880 *
1881 * Will attempt to read the entire content of a given table of a single block
1882 * into the driver database. We assume that the buffer will always
1883 * be as large or larger than the data contained in the package. If
1884 * this condition is not met, there is most likely an error in the package
1885 * contents.
1886 */
1887static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
1888{
1889	u32 dst_len, sect_len, offset = 0;
1890	struct ice_prof_redir_section *pr;
1891	struct ice_prof_id_section *pid;
1892	struct ice_xlt1_section *xlt1;
1893	struct ice_xlt2_section *xlt2;
1894	struct ice_sw_fv_section *es;
1895	struct ice_pkg_enum state;
1896	u8 *src, *dst;
1897	void *sect;
1898
1899	/* if the HW segment pointer is null then the first iteration of
1900	 * ice_pkg_enum_section() will fail. In this case the HW tables will
1901	 * not be filled and return success.
1902	 */
1903	if (!hw->seg) {
1904		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
1905		return;
1906	}
1907
1908	memset(&state, 0, sizeof(state));
1909
1910	sect = ice_pkg_enum_section(hw->seg, &state, sid);
1911
1912	while (sect) {
1913		switch (sid) {
1914		case ICE_SID_XLT1_SW:
1915		case ICE_SID_XLT1_FD:
1916		case ICE_SID_XLT1_RSS:
1917		case ICE_SID_XLT1_ACL:
1918		case ICE_SID_XLT1_PE:
1919			xlt1 = sect;
1920			src = xlt1->value;
1921			sect_len = le16_to_cpu(xlt1->count) *
1922				sizeof(*hw->blk[block_id].xlt1.t);
1923			dst = hw->blk[block_id].xlt1.t;
1924			dst_len = hw->blk[block_id].xlt1.count *
1925				sizeof(*hw->blk[block_id].xlt1.t);
1926			break;
1927		case ICE_SID_XLT2_SW:
1928		case ICE_SID_XLT2_FD:
1929		case ICE_SID_XLT2_RSS:
1930		case ICE_SID_XLT2_ACL:
1931		case ICE_SID_XLT2_PE:
1932			xlt2 = sect;
1933			src = (__force u8 *)xlt2->value;
1934			sect_len = le16_to_cpu(xlt2->count) *
1935				sizeof(*hw->blk[block_id].xlt2.t);
1936			dst = (u8 *)hw->blk[block_id].xlt2.t;
1937			dst_len = hw->blk[block_id].xlt2.count *
1938				sizeof(*hw->blk[block_id].xlt2.t);
1939			break;
1940		case ICE_SID_PROFID_TCAM_SW:
1941		case ICE_SID_PROFID_TCAM_FD:
1942		case ICE_SID_PROFID_TCAM_RSS:
1943		case ICE_SID_PROFID_TCAM_ACL:
1944		case ICE_SID_PROFID_TCAM_PE:
1945			pid = sect;
1946			src = (u8 *)pid->entry;
1947			sect_len = le16_to_cpu(pid->count) *
1948				sizeof(*hw->blk[block_id].prof.t);
1949			dst = (u8 *)hw->blk[block_id].prof.t;
1950			dst_len = hw->blk[block_id].prof.count *
1951				sizeof(*hw->blk[block_id].prof.t);
1952			break;
1953		case ICE_SID_PROFID_REDIR_SW:
1954		case ICE_SID_PROFID_REDIR_FD:
1955		case ICE_SID_PROFID_REDIR_RSS:
1956		case ICE_SID_PROFID_REDIR_ACL:
1957		case ICE_SID_PROFID_REDIR_PE:
1958			pr = sect;
1959			src = pr->redir_value;
1960			sect_len = le16_to_cpu(pr->count) *
1961				sizeof(*hw->blk[block_id].prof_redir.t);
1962			dst = hw->blk[block_id].prof_redir.t;
1963			dst_len = hw->blk[block_id].prof_redir.count *
1964				sizeof(*hw->blk[block_id].prof_redir.t);
1965			break;
1966		case ICE_SID_FLD_VEC_SW:
1967		case ICE_SID_FLD_VEC_FD:
1968		case ICE_SID_FLD_VEC_RSS:
1969		case ICE_SID_FLD_VEC_ACL:
1970		case ICE_SID_FLD_VEC_PE:
1971			es = sect;
1972			src = (u8 *)es->fv;
1973			sect_len = (u32)(le16_to_cpu(es->count) *
1974					 hw->blk[block_id].es.fvw) *
1975				sizeof(*hw->blk[block_id].es.t);
1976			dst = (u8 *)hw->blk[block_id].es.t;
1977			dst_len = (u32)(hw->blk[block_id].es.count *
1978					hw->blk[block_id].es.fvw) *
1979				sizeof(*hw->blk[block_id].es.t);
1980			break;
1981		default:
1982			return;
1983		}
1984
1985		/* if the section offset exceeds destination length, terminate
1986		 * table fill.
1987		 */
1988		if (offset > dst_len)
1989			return;
1990
1991		/* if the sum of section size and offset exceed destination size
1992		 * then we are out of bounds of the HW table size for that PF.
1993		 * Changing section length to fill the remaining table space
1994		 * of that PF.
1995		 */
1996		if ((offset + sect_len) > dst_len)
1997			sect_len = dst_len - offset;
1998
1999		memcpy(dst + offset, src, sect_len);
2000		offset += sect_len;
2001		sect = ice_pkg_enum_section(NULL, &state, sid);
2002	}
2003}
2004
2005/**
2006 * ice_fill_blk_tbls - Read package context for tables
2007 * @hw: pointer to the hardware structure
2008 *
2009 * Reads the current package contents and populates the driver
2010 * database with the data iteratively for all advanced feature
2011 * blocks. Assume that the HW tables have been allocated.
2012 */
2013void ice_fill_blk_tbls(struct ice_hw *hw)
2014{
2015	u8 i;
2016
2017	for (i = 0; i < ICE_BLK_COUNT; i++) {
2018		enum ice_block blk_id = (enum ice_block)i;
2019
2020		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
2021		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
2022		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
2023		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
2024		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
2025	}
2026
2027	ice_init_sw_db(hw);
2028}
2029
2030/**
2031 * ice_free_prof_map - free profile map
2032 * @hw: pointer to the hardware structure
2033 * @blk_idx: HW block index
2034 */
2035static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
2036{
2037	struct ice_es *es = &hw->blk[blk_idx].es;
2038	struct ice_prof_map *del, *tmp;
2039
2040	mutex_lock(&es->prof_map_lock);
2041	list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
2042		list_del(&del->list);
2043		devm_kfree(ice_hw_to_dev(hw), del);
2044	}
2045	INIT_LIST_HEAD(&es->prof_map);
2046	mutex_unlock(&es->prof_map_lock);
2047}
2048
2049/**
2050 * ice_free_flow_profs - free flow profile entries
2051 * @hw: pointer to the hardware structure
2052 * @blk_idx: HW block index
2053 */
2054static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
2055{
2056	struct ice_flow_prof *p, *tmp;
2057
2058	mutex_lock(&hw->fl_profs_locks[blk_idx]);
2059	list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
2060		struct ice_flow_entry *e, *t;
2061
2062		list_for_each_entry_safe(e, t, &p->entries, l_entry)
2063			ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
2064					   ICE_FLOW_ENTRY_HNDL(e));
2065
2066		list_del(&p->l_entry);
2067
2068		mutex_destroy(&p->entries_lock);
2069		devm_kfree(ice_hw_to_dev(hw), p);
2070	}
2071	mutex_unlock(&hw->fl_profs_locks[blk_idx]);
2072
2073	/* if driver is in reset and tables are being cleared
2074	 * re-initialize the flow profile list heads
2075	 */
2076	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2077}
2078
2079/**
2080 * ice_free_vsig_tbl - free complete VSIG table entries
2081 * @hw: pointer to the hardware structure
2082 * @blk: the HW block on which to free the VSIG table entries
2083 */
2084static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2085{
2086	u16 i;
2087
2088	if (!hw->blk[blk].xlt2.vsig_tbl)
2089		return;
2090
2091	for (i = 1; i < ICE_MAX_VSIGS; i++)
2092		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2093			ice_vsig_free(hw, blk, i);
2094}
2095
2096/**
2097 * ice_free_hw_tbls - free hardware table memory
2098 * @hw: pointer to the hardware structure
2099 */
2100void ice_free_hw_tbls(struct ice_hw *hw)
2101{
2102	struct ice_rss_cfg *r, *rt;
2103	u8 i;
2104
2105	for (i = 0; i < ICE_BLK_COUNT; i++) {
2106		if (hw->blk[i].is_list_init) {
2107			struct ice_es *es = &hw->blk[i].es;
2108
2109			ice_free_prof_map(hw, i);
2110			mutex_destroy(&es->prof_map_lock);
2111
2112			ice_free_flow_profs(hw, i);
2113			mutex_destroy(&hw->fl_profs_locks[i]);
2114
2115			hw->blk[i].is_list_init = false;
2116		}
2117		ice_free_vsig_tbl(hw, (enum ice_block)i);
2118		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
2119		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
2120		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
2121		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
2122		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
2123		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
2124		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
2125		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
2126		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
2127		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
2128		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
2129		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
2130		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
2131		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
2132	}
2133
2134	list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
2135		list_del(&r->l_entry);
2136		devm_kfree(ice_hw_to_dev(hw), r);
2137	}
2138	mutex_destroy(&hw->rss_locks);
2139	ice_shutdown_all_prof_masks(hw);
2140	memset(hw->blk, 0, sizeof(hw->blk));
2141}
2142
2143/**
2144 * ice_init_flow_profs - init flow profile locks and list heads
2145 * @hw: pointer to the hardware structure
2146 * @blk_idx: HW block index
2147 */
2148static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
2149{
2150	mutex_init(&hw->fl_profs_locks[blk_idx]);
2151	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2152}
2153
2154/**
2155 * ice_clear_hw_tbls - clear HW tables and flow profiles
2156 * @hw: pointer to the hardware structure
2157 */
2158void ice_clear_hw_tbls(struct ice_hw *hw)
2159{
2160	u8 i;
2161
2162	for (i = 0; i < ICE_BLK_COUNT; i++) {
2163		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2164		struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
2165		struct ice_prof_tcam *prof = &hw->blk[i].prof;
2166		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2167		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2168		struct ice_es *es = &hw->blk[i].es;
2169
2170		if (hw->blk[i].is_list_init) {
2171			ice_free_prof_map(hw, i);
2172			ice_free_flow_profs(hw, i);
2173		}
2174
2175		ice_free_vsig_tbl(hw, (enum ice_block)i);
2176
2177		memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
2178		memset(xlt1->ptg_tbl, 0,
2179		       ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
2180		memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
2181
2182		memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
2183		memset(xlt2->vsig_tbl, 0,
2184		       xlt2->count * sizeof(*xlt2->vsig_tbl));
2185		memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
2186
2187		memset(prof->t, 0, prof->count * sizeof(*prof->t));
2188		memset(prof_redir->t, 0,
2189		       prof_redir->count * sizeof(*prof_redir->t));
2190
2191		memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
2192		memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
2193		memset(es->symm, 0, es->count * sizeof(*es->symm));
2194		memset(es->written, 0, es->count * sizeof(*es->written));
2195		memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
2196
2197		memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
2198	}
2199}
2200
2201/**
2202 * ice_init_hw_tbls - init hardware table memory
2203 * @hw: pointer to the hardware structure
2204 */
2205int ice_init_hw_tbls(struct ice_hw *hw)
2206{
2207	u8 i;
2208
2209	mutex_init(&hw->rss_locks);
2210	INIT_LIST_HEAD(&hw->rss_list_head);
2211	ice_init_all_prof_masks(hw);
2212	for (i = 0; i < ICE_BLK_COUNT; i++) {
2213		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2214		struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
2215		struct ice_prof_tcam *prof = &hw->blk[i].prof;
2216		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2217		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2218		struct ice_es *es = &hw->blk[i].es;
2219		u16 j;
2220
2221		if (hw->blk[i].is_list_init)
2222			continue;
2223
2224		ice_init_flow_profs(hw, i);
2225		mutex_init(&es->prof_map_lock);
2226		INIT_LIST_HEAD(&es->prof_map);
2227		hw->blk[i].is_list_init = true;
2228
2229		hw->blk[i].overwrite = blk_sizes[i].overwrite;
2230		es->reverse = blk_sizes[i].reverse;
2231
2232		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
2233		xlt1->count = blk_sizes[i].xlt1;
2234
2235		xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
2236					    sizeof(*xlt1->ptypes), GFP_KERNEL);
2237
2238		if (!xlt1->ptypes)
2239			goto err;
2240
2241		xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
2242					     sizeof(*xlt1->ptg_tbl),
2243					     GFP_KERNEL);
2244
2245		if (!xlt1->ptg_tbl)
2246			goto err;
2247
2248		xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
2249				       sizeof(*xlt1->t), GFP_KERNEL);
2250		if (!xlt1->t)
2251			goto err;
2252
2253		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
2254		xlt2->count = blk_sizes[i].xlt2;
2255
2256		xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
2257					  sizeof(*xlt2->vsis), GFP_KERNEL);
2258
2259		if (!xlt2->vsis)
2260			goto err;
2261
2262		xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
2263					      sizeof(*xlt2->vsig_tbl),
2264					      GFP_KERNEL);
2265		if (!xlt2->vsig_tbl)
2266			goto err;
2267
2268		for (j = 0; j < xlt2->count; j++)
2269			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
2270
2271		xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
2272				       sizeof(*xlt2->t), GFP_KERNEL);
2273		if (!xlt2->t)
2274			goto err;
2275
2276		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
2277		prof->count = blk_sizes[i].prof_tcam;
2278		prof->max_prof_id = blk_sizes[i].prof_id;
2279		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
2280		prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
2281				       sizeof(*prof->t), GFP_KERNEL);
2282
2283		if (!prof->t)
2284			goto err;
2285
2286		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
2287		prof_redir->count = blk_sizes[i].prof_redir;
2288		prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
2289					     prof_redir->count,
2290					     sizeof(*prof_redir->t),
2291					     GFP_KERNEL);
2292
2293		if (!prof_redir->t)
2294			goto err;
2295
2296		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
2297		es->count = blk_sizes[i].es;
2298		es->fvw = blk_sizes[i].fvw;
2299		es->t = devm_kcalloc(ice_hw_to_dev(hw),
2300				     (u32)(es->count * es->fvw),
2301				     sizeof(*es->t), GFP_KERNEL);
2302		if (!es->t)
2303			goto err;
2304
2305		es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2306					     sizeof(*es->ref_count),
2307					     GFP_KERNEL);
2308		if (!es->ref_count)
2309			goto err;
2310
2311		es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2312					sizeof(*es->symm), GFP_KERNEL);
2313		if (!es->symm)
2314			goto err;
2315
2316		es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2317					   sizeof(*es->written), GFP_KERNEL);
2318		if (!es->written)
2319			goto err;
2320
2321		es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2322					    sizeof(*es->mask_ena), GFP_KERNEL);
2323		if (!es->mask_ena)
2324			goto err;
2325
2326		prof_id->count = blk_sizes[i].prof_id;
2327		prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
2328					   sizeof(*prof_id->id), GFP_KERNEL);
2329		if (!prof_id->id)
2330			goto err;
2331	}
2332	return 0;
2333
2334err:
2335	ice_free_hw_tbls(hw);
2336	return -ENOMEM;
2337}
2338
2339/**
2340 * ice_prof_gen_key - generate profile ID key
2341 * @hw: pointer to the HW struct
2342 * @blk: the block in which to write profile ID to
2343 * @ptg: packet type group (PTG) portion of key
2344 * @vsig: VSIG portion of key
2345 * @cdid: CDID portion of key
2346 * @flags: flag portion of key
2347 * @vl_msk: valid mask
2348 * @dc_msk: don't care mask
2349 * @nm_msk: never match mask
2350 * @key: output of profile ID key
2351 */
2352static int
2353ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
2354		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
2355		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
2356		 u8 key[ICE_TCAM_KEY_SZ])
2357{
2358	struct ice_prof_id_key inkey;
2359
2360	inkey.xlt1 = ptg;
2361	inkey.xlt2_cdid = cpu_to_le16(vsig);
2362	inkey.flags = cpu_to_le16(flags);
2363
2364	switch (hw->blk[blk].prof.cdid_bits) {
2365	case 0:
2366		break;
2367	case 2:
2368#define ICE_CD_2_M 0xC000U
2369#define ICE_CD_2_S 14
2370		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
2371		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
2372		break;
2373	case 4:
2374#define ICE_CD_4_M 0xF000U
2375#define ICE_CD_4_S 12
2376		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
2377		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
2378		break;
2379	case 8:
2380#define ICE_CD_8_M 0xFF00U
2381#define ICE_CD_8_S 16
2382		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
2383		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
2384		break;
2385	default:
2386		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
2387		break;
2388	}
2389
2390	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
2391			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
2392}
2393
2394/**
2395 * ice_tcam_write_entry - write TCAM entry
2396 * @hw: pointer to the HW struct
2397 * @blk: the block in which to write profile ID to
2398 * @idx: the entry index to write to
2399 * @prof_id: profile ID
2400 * @ptg: packet type group (PTG) portion of key
2401 * @vsig: VSIG portion of key
2402 * @cdid: CDID portion of key
2403 * @flags: flag portion of key
2404 * @vl_msk: valid mask
2405 * @dc_msk: don't care mask
2406 * @nm_msk: never match mask
2407 */
2408static int
2409ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
2410		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
2411		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
2412		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
2413		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
2414{
2415	struct ice_prof_tcam_entry;
2416	int status;
2417
2418	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
2419				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
2420	if (!status) {
2421		hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
2422		hw->blk[blk].prof.t[idx].prof_id = prof_id;
2423	}
2424
2425	return status;
2426}
2427
2428/**
2429 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
2430 * @hw: pointer to the hardware structure
2431 * @blk: HW block
2432 * @vsig: VSIG to query
2433 * @refs: pointer to variable to receive the reference count
2434 */
2435static int
2436ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
2437{
2438	u16 idx = vsig & ICE_VSIG_IDX_M;
2439	struct ice_vsig_vsi *ptr;
2440
2441	*refs = 0;
2442
2443	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2444		return -ENOENT;
2445
2446	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2447	while (ptr) {
2448		(*refs)++;
2449		ptr = ptr->next_vsi;
2450	}
2451
2452	return 0;
2453}
2454
2455/**
2456 * ice_has_prof_vsig - check to see if VSIG has a specific profile
2457 * @hw: pointer to the hardware structure
2458 * @blk: HW block
2459 * @vsig: VSIG to check against
2460 * @hdl: profile handle
2461 */
2462static bool
2463ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
2464{
2465	u16 idx = vsig & ICE_VSIG_IDX_M;
2466	struct ice_vsig_prof *ent;
2467
2468	list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2469			    list)
2470		if (ent->profile_cookie == hdl)
2471			return true;
2472
2473	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
2474		  vsig);
2475	return false;
2476}
2477
2478/**
2479 * ice_prof_bld_es - build profile ID extraction sequence changes
2480 * @hw: pointer to the HW struct
2481 * @blk: hardware block
2482 * @bld: the update package buffer build to add to
2483 * @chgs: the list of changes to make in hardware
2484 */
2485static int
2486ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
2487		struct ice_buf_build *bld, struct list_head *chgs)
2488{
2489	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
2490	struct ice_chs_chg *tmp;
2491
2492	list_for_each_entry(tmp, chgs, list_entry)
2493		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
2494			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
2495			struct ice_pkg_es *p;
2496			u32 id;
2497
2498			id = ice_sect_id(blk, ICE_VEC_TBL);
2499			p = ice_pkg_buf_alloc_section(bld, id,
2500						      struct_size(p, es, 1) +
2501						      vec_size -
2502						      sizeof(p->es[0]));
2503
2504			if (!p)
2505				return -ENOSPC;
2506
2507			p->count = cpu_to_le16(1);
2508			p->offset = cpu_to_le16(tmp->prof_id);
2509
2510			memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
2511		}
2512
2513	return 0;
2514}
2515
2516/**
2517 * ice_prof_bld_tcam - build profile ID TCAM changes
2518 * @hw: pointer to the HW struct
2519 * @blk: hardware block
2520 * @bld: the update package buffer build to add to
2521 * @chgs: the list of changes to make in hardware
2522 */
2523static int
2524ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
2525		  struct ice_buf_build *bld, struct list_head *chgs)
2526{
2527	struct ice_chs_chg *tmp;
2528
2529	list_for_each_entry(tmp, chgs, list_entry)
2530		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
2531			struct ice_prof_id_section *p;
2532			u32 id;
2533
2534			id = ice_sect_id(blk, ICE_PROF_TCAM);
2535			p = ice_pkg_buf_alloc_section(bld, id,
2536						      struct_size(p, entry, 1));
2537
2538			if (!p)
2539				return -ENOSPC;
2540
2541			p->count = cpu_to_le16(1);
2542			p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
2543			p->entry[0].prof_id = tmp->prof_id;
2544
2545			memcpy(p->entry[0].key,
2546			       &hw->blk[blk].prof.t[tmp->tcam_idx].key,
2547			       sizeof(hw->blk[blk].prof.t->key));
2548		}
2549
2550	return 0;
2551}
2552
2553/**
2554 * ice_prof_bld_xlt1 - build XLT1 changes
2555 * @blk: hardware block
2556 * @bld: the update package buffer build to add to
2557 * @chgs: the list of changes to make in hardware
2558 */
2559static int
2560ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
2561		  struct list_head *chgs)
2562{
2563	struct ice_chs_chg *tmp;
2564
2565	list_for_each_entry(tmp, chgs, list_entry)
2566		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
2567			struct ice_xlt1_section *p;
2568			u32 id;
2569
2570			id = ice_sect_id(blk, ICE_XLT1);
2571			p = ice_pkg_buf_alloc_section(bld, id,
2572						      struct_size(p, value, 1));
2573
2574			if (!p)
2575				return -ENOSPC;
2576
2577			p->count = cpu_to_le16(1);
2578			p->offset = cpu_to_le16(tmp->ptype);
2579			p->value[0] = tmp->ptg;
2580		}
2581
2582	return 0;
2583}
2584
2585/**
2586 * ice_prof_bld_xlt2 - build XLT2 changes
2587 * @blk: hardware block
2588 * @bld: the update package buffer build to add to
2589 * @chgs: the list of changes to make in hardware
2590 */
2591static int
2592ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
2593		  struct list_head *chgs)
2594{
2595	struct ice_chs_chg *tmp;
2596
2597	list_for_each_entry(tmp, chgs, list_entry) {
2598		struct ice_xlt2_section *p;
2599		u32 id;
2600
2601		switch (tmp->type) {
2602		case ICE_VSIG_ADD:
2603		case ICE_VSI_MOVE:
2604		case ICE_VSIG_REM:
2605			id = ice_sect_id(blk, ICE_XLT2);
2606			p = ice_pkg_buf_alloc_section(bld, id,
2607						      struct_size(p, value, 1));
2608
2609			if (!p)
2610				return -ENOSPC;
2611
2612			p->count = cpu_to_le16(1);
2613			p->offset = cpu_to_le16(tmp->vsi);
2614			p->value[0] = cpu_to_le16(tmp->vsig);
2615			break;
2616		default:
2617			break;
2618		}
2619	}
2620
2621	return 0;
2622}
2623
2624/**
2625 * ice_upd_prof_hw - update hardware using the change list
2626 * @hw: pointer to the HW struct
2627 * @blk: hardware block
2628 * @chgs: the list of changes to make in hardware
2629 */
2630static int
2631ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
2632		struct list_head *chgs)
2633{
2634	struct ice_buf_build *b;
2635	struct ice_chs_chg *tmp;
 
2636	u16 pkg_sects;
2637	u16 xlt1 = 0;
2638	u16 xlt2 = 0;
2639	u16 tcam = 0;
2640	u16 es = 0;
2641	int status;
2642	u16 sects;
2643
2644	/* count number of sections we need */
2645	list_for_each_entry(tmp, chgs, list_entry) {
2646		switch (tmp->type) {
2647		case ICE_PTG_ES_ADD:
2648			if (tmp->add_ptg)
2649				xlt1++;
2650			if (tmp->add_prof)
2651				es++;
2652			break;
2653		case ICE_TCAM_ADD:
2654			tcam++;
2655			break;
2656		case ICE_VSIG_ADD:
2657		case ICE_VSI_MOVE:
2658		case ICE_VSIG_REM:
2659			xlt2++;
2660			break;
2661		default:
2662			break;
2663		}
2664	}
2665	sects = xlt1 + xlt2 + tcam + es;
2666
2667	if (!sects)
2668		return 0;
2669
2670	/* Build update package buffer */
2671	b = ice_pkg_buf_alloc(hw);
2672	if (!b)
2673		return -ENOMEM;
2674
2675	status = ice_pkg_buf_reserve_section(b, sects);
2676	if (status)
2677		goto error_tmp;
2678
2679	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
2680	if (es) {
2681		status = ice_prof_bld_es(hw, blk, b, chgs);
2682		if (status)
2683			goto error_tmp;
2684	}
2685
2686	if (tcam) {
2687		status = ice_prof_bld_tcam(hw, blk, b, chgs);
2688		if (status)
2689			goto error_tmp;
2690	}
2691
2692	if (xlt1) {
2693		status = ice_prof_bld_xlt1(blk, b, chgs);
2694		if (status)
2695			goto error_tmp;
2696	}
2697
2698	if (xlt2) {
2699		status = ice_prof_bld_xlt2(blk, b, chgs);
2700		if (status)
2701			goto error_tmp;
2702	}
2703
2704	/* After package buffer build check if the section count in buffer is
2705	 * non-zero and matches the number of sections detected for package
2706	 * update.
2707	 */
2708	pkg_sects = ice_pkg_buf_get_active_sections(b);
2709	if (!pkg_sects || pkg_sects != sects) {
2710		status = -EINVAL;
2711		goto error_tmp;
2712	}
2713
2714	/* update package */
2715	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
2716	if (status == -EIO)
2717		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
2718
2719error_tmp:
2720	ice_pkg_buf_free(hw, b);
2721	return status;
2722}
2723
2724/**
2725 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
2726 * @hw: pointer to the HW struct
2727 * @prof_id: profile ID
2728 * @mask_sel: mask select
2729 *
2730 * This function enable any of the masks selected by the mask select parameter
2731 * for the profile specified.
2732 */
2733static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
2734{
2735	wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
2736
2737	ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
2738		  GLQF_FDMASK_SEL(prof_id), mask_sel);
2739}
2740
2741struct ice_fd_src_dst_pair {
2742	u8 prot_id;
2743	u8 count;
2744	u16 off;
2745};
2746
2747static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
2748	/* These are defined in pairs */
2749	{ ICE_PROT_IPV4_OF_OR_S, 2, 12 },
2750	{ ICE_PROT_IPV4_OF_OR_S, 2, 16 },
2751
2752	{ ICE_PROT_IPV4_IL, 2, 12 },
2753	{ ICE_PROT_IPV4_IL, 2, 16 },
2754
2755	{ ICE_PROT_IPV6_OF_OR_S, 8, 8 },
2756	{ ICE_PROT_IPV6_OF_OR_S, 8, 24 },
2757
2758	{ ICE_PROT_IPV6_IL, 8, 8 },
2759	{ ICE_PROT_IPV6_IL, 8, 24 },
2760
2761	{ ICE_PROT_TCP_IL, 1, 0 },
2762	{ ICE_PROT_TCP_IL, 1, 2 },
2763
2764	{ ICE_PROT_UDP_OF, 1, 0 },
2765	{ ICE_PROT_UDP_OF, 1, 2 },
2766
2767	{ ICE_PROT_UDP_IL_OR_S, 1, 0 },
2768	{ ICE_PROT_UDP_IL_OR_S, 1, 2 },
2769
2770	{ ICE_PROT_SCTP_IL, 1, 0 },
2771	{ ICE_PROT_SCTP_IL, 1, 2 }
2772};
2773
2774#define ICE_FD_SRC_DST_PAIR_COUNT	ARRAY_SIZE(ice_fd_pairs)
2775
2776/**
2777 * ice_update_fd_swap - set register appropriately for a FD FV extraction
2778 * @hw: pointer to the HW struct
2779 * @prof_id: profile ID
2780 * @es: extraction sequence (length of array is determined by the block)
2781 */
2782static int
2783ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
2784{
2785	DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
2786	u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
2787#define ICE_FD_FV_NOT_FOUND (-2)
2788	s8 first_free = ICE_FD_FV_NOT_FOUND;
2789	u8 used[ICE_MAX_FV_WORDS] = { 0 };
2790	s8 orig_free, si;
2791	u32 mask_sel = 0;
2792	u8 i, j, k;
2793
2794	bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
2795
2796	/* This code assumes that the Flow Director field vectors are assigned
2797	 * from the end of the FV indexes working towards the zero index, that
2798	 * only complete fields will be included and will be consecutive, and
2799	 * that there are no gaps between valid indexes.
2800	 */
2801
2802	/* Determine swap fields present */
2803	for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
2804		/* Find the first free entry, assuming right to left population.
2805		 * This is where we can start adding additional pairs if needed.
2806		 */
2807		if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
2808		    ICE_PROT_INVALID)
2809			first_free = i - 1;
2810
2811		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
2812			if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
2813			    es[i].off == ice_fd_pairs[j].off) {
2814				__set_bit(j, pair_list);
2815				pair_start[j] = i;
2816			}
2817	}
2818
2819	orig_free = first_free;
2820
2821	/* determine missing swap fields that need to be added */
2822	for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
2823		u8 bit1 = test_bit(i + 1, pair_list);
2824		u8 bit0 = test_bit(i, pair_list);
2825
2826		if (bit0 ^ bit1) {
2827			u8 index;
2828
2829			/* add the appropriate 'paired' entry */
2830			if (!bit0)
2831				index = i;
2832			else
2833				index = i + 1;
2834
2835			/* check for room */
2836			if (first_free + 1 < (s8)ice_fd_pairs[index].count)
2837				return -ENOSPC;
2838
2839			/* place in extraction sequence */
2840			for (k = 0; k < ice_fd_pairs[index].count; k++) {
2841				es[first_free - k].prot_id =
2842					ice_fd_pairs[index].prot_id;
2843				es[first_free - k].off =
2844					ice_fd_pairs[index].off + (k * 2);
2845
2846				if (k > first_free)
2847					return -EIO;
2848
2849				/* keep track of non-relevant fields */
2850				mask_sel |= BIT(first_free - k);
2851			}
2852
2853			pair_start[index] = first_free;
2854			first_free -= ice_fd_pairs[index].count;
2855		}
2856	}
2857
2858	/* fill in the swap array */
2859	si = hw->blk[ICE_BLK_FD].es.fvw - 1;
2860	while (si >= 0) {
2861		u8 indexes_used = 1;
2862
2863		/* assume flat at this index */
2864#define ICE_SWAP_VALID	0x80
2865		used[si] = si | ICE_SWAP_VALID;
2866
2867		if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
2868			si -= indexes_used;
2869			continue;
2870		}
2871
2872		/* check for a swap location */
2873		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
2874			if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
2875			    es[si].off == ice_fd_pairs[j].off) {
2876				u8 idx;
2877
2878				/* determine the appropriate matching field */
2879				idx = j + ((j % 2) ? -1 : 1);
2880
2881				indexes_used = ice_fd_pairs[idx].count;
2882				for (k = 0; k < indexes_used; k++) {
2883					used[si - k] = (pair_start[idx] - k) |
2884						ICE_SWAP_VALID;
2885				}
2886
2887				break;
2888			}
2889
2890		si -= indexes_used;
2891	}
2892
2893	/* for each set of 4 swap and 4 inset indexes, write the appropriate
2894	 * register
2895	 */
2896	for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
2897		u32 raw_swap = 0;
2898		u32 raw_in = 0;
2899
2900		for (k = 0; k < 4; k++) {
2901			u8 idx;
2902
2903			idx = (j * 4) + k;
2904			if (used[idx] && !(mask_sel & BIT(idx))) {
2905				raw_swap |= used[idx] << (k * BITS_PER_BYTE);
2906#define ICE_INSET_DFLT 0x9f
2907				raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
2908			}
2909		}
2910
2911		/* write the appropriate swap register set */
2912		wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
2913
2914		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
2915			  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
2916
2917		/* write the appropriate inset register set */
2918		wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
2919
2920		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
2921			  prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
2922	}
2923
2924	/* initially clear the mask select for this profile */
2925	ice_update_fd_mask(hw, prof_id, 0);
2926
2927	return 0;
2928}
2929
2930/* The entries here needs to match the order of enum ice_ptype_attrib */
2931static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
2932	{ ICE_GTP_PDU_EH,	ICE_GTP_PDU_FLAG_MASK },
2933	{ ICE_GTP_SESSION,	ICE_GTP_FLAGS_MASK },
2934	{ ICE_GTP_DOWNLINK,	ICE_GTP_FLAGS_MASK },
2935	{ ICE_GTP_UPLINK,	ICE_GTP_FLAGS_MASK },
2936};
2937
2938/**
2939 * ice_get_ptype_attrib_info - get PTYPE attribute information
2940 * @type: attribute type
2941 * @info: pointer to variable to the attribute information
2942 */
2943static void
2944ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
2945			  struct ice_ptype_attrib_info *info)
2946{
2947	*info = ice_ptype_attributes[type];
2948}
2949
2950/**
2951 * ice_add_prof_attrib - add any PTG with attributes to profile
2952 * @prof: pointer to the profile to which PTG entries will be added
2953 * @ptg: PTG to be added
2954 * @ptype: PTYPE that needs to be looked up
2955 * @attr: array of attributes that will be considered
2956 * @attr_cnt: number of elements in the attribute array
2957 */
2958static int
2959ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
2960		    const struct ice_ptype_attributes *attr, u16 attr_cnt)
2961{
2962	bool found = false;
2963	u16 i;
2964
2965	for (i = 0; i < attr_cnt; i++)
2966		if (attr[i].ptype == ptype) {
2967			found = true;
2968
2969			prof->ptg[prof->ptg_cnt] = ptg;
2970			ice_get_ptype_attrib_info(attr[i].attrib,
2971						  &prof->attr[prof->ptg_cnt]);
2972
2973			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
2974				return -ENOSPC;
2975		}
2976
2977	if (!found)
2978		return -ENOENT;
2979
2980	return 0;
2981}
2982
2983/**
2984 * ice_disable_fd_swap - set register appropriately to disable FD SWAP
2985 * @hw: pointer to the HW struct
2986 * @prof_id: profile ID
2987 */
2988static void
2989ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
2990{
2991	u16 swap_val, fvw_num;
2992	unsigned int i;
2993
2994	swap_val = ICE_SWAP_VALID;
2995	fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
2996
2997	/* Since the SWAP Flag in the Programming Desc doesn't work,
2998	 * here add method to disable the SWAP Option via setting
2999	 * certain SWAP and INSET register sets.
3000	 */
3001	for (i = 0; i < fvw_num ; i++) {
3002		u32 raw_swap, raw_in;
3003		unsigned int j;
3004
3005		raw_swap = 0;
3006		raw_in = 0;
3007
3008		for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) {
3009			raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
3010			raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
3011		}
3012
3013		/* write the FDIR swap register set */
3014		wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
3015
3016		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n",
3017			  prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
3018
3019		/* write the FDIR inset register set */
3020		wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
3021
3022		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n",
3023			  prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
3024	}
3025}
3026
3027/*
3028 * ice_add_prof - add profile
3029 * @hw: pointer to the HW struct
3030 * @blk: hardware block
3031 * @id: profile tracking ID
3032 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
3033 * @attr: array of attributes
3034 * @attr_cnt: number of elements in attr array
3035 * @es: extraction sequence (length of array is determined by the block)
3036 * @masks: mask for extraction sequence
3037 * @symm: symmetric setting for RSS profiles
3038 * @fd_swap: enable/disable FDIR paired src/dst fields swap option
3039 *
3040 * This function registers a profile, which matches a set of PTYPES with a
3041 * particular extraction sequence. While the hardware profile is allocated
3042 * it will not be written until the first call to ice_add_flow that specifies
3043 * the ID value used here.
3044 */
3045int
3046ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
3047	     const struct ice_ptype_attributes *attr, u16 attr_cnt,
3048	     struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
3049{
3050	u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
3051	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3052	struct ice_prof_map *prof;
 
3053	u8 byte = 0;
3054	u8 prof_id;
3055	int status;
3056
3057	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3058
3059	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3060
3061	/* search for existing profile */
3062	status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
3063	if (status) {
3064		/* allocate profile ID */
3065		status = ice_alloc_prof_id(hw, blk, &prof_id);
3066		if (status)
3067			goto err_ice_add_prof;
3068		if (blk == ICE_BLK_FD && fd_swap) {
3069			/* For Flow Director block, the extraction sequence may
3070			 * need to be altered in the case where there are paired
3071			 * fields that have no match. This is necessary because
3072			 * for Flow Director, src and dest fields need to paired
3073			 * for filter programming and these values are swapped
3074			 * during Tx.
3075			 */
3076			status = ice_update_fd_swap(hw, prof_id, es);
3077			if (status)
3078				goto err_ice_add_prof;
3079		} else if (blk == ICE_BLK_FD) {
3080			ice_disable_fd_swap(hw, prof_id);
3081		}
3082		status = ice_update_prof_masking(hw, blk, prof_id, masks);
3083		if (status)
3084			goto err_ice_add_prof;
3085
3086		/* and write new es */
3087		ice_write_es(hw, blk, prof_id, es, symm);
3088	}
3089
3090	ice_prof_inc_ref(hw, blk, prof_id);
3091
3092	/* add profile info */
3093	prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
3094	if (!prof) {
3095		status = -ENOMEM;
3096		goto err_ice_add_prof;
3097	}
3098
3099	prof->profile_cookie = id;
3100	prof->prof_id = prof_id;
3101	prof->ptg_cnt = 0;
3102	prof->context = 0;
3103
3104	/* build list of ptgs */
3105	while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
3106		u8 bit;
3107
3108		if (!ptypes[byte]) {
3109			bytes--;
3110			byte++;
3111			continue;
3112		}
3113
3114		/* Examine 8 bits per byte */
3115		for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
3116				 BITS_PER_BYTE) {
3117			u16 ptype;
3118			u8 ptg;
3119
3120			ptype = byte * BITS_PER_BYTE + bit;
3121
3122			/* The package should place all ptypes in a non-zero
3123			 * PTG, so the following call should never fail.
3124			 */
3125			if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
3126				continue;
3127
3128			/* If PTG is already added, skip and continue */
3129			if (test_bit(ptg, ptgs_used))
3130				continue;
3131
3132			__set_bit(ptg, ptgs_used);
3133			/* Check to see there are any attributes for
3134			 * this PTYPE, and add them if found.
3135			 */
3136			status = ice_add_prof_attrib(prof, ptg, ptype,
3137						     attr, attr_cnt);
3138			if (status == -ENOSPC)
3139				break;
3140			if (status) {
3141				/* This is simple a PTYPE/PTG with no
3142				 * attribute
3143				 */
3144				prof->ptg[prof->ptg_cnt] = ptg;
3145				prof->attr[prof->ptg_cnt].flags = 0;
3146				prof->attr[prof->ptg_cnt].mask = 0;
3147
3148				if (++prof->ptg_cnt >=
3149				    ICE_MAX_PTG_PER_PROFILE)
3150					break;
3151			}
3152		}
3153
3154		bytes--;
3155		byte++;
3156	}
3157
3158	list_add(&prof->list, &hw->blk[blk].es.prof_map);
3159	status = 0;
3160
3161err_ice_add_prof:
3162	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3163	return status;
3164}
3165
3166/**
3167 * ice_search_prof_id - Search for a profile tracking ID
3168 * @hw: pointer to the HW struct
3169 * @blk: hardware block
3170 * @id: profile tracking ID
3171 *
3172 * This will search for a profile tracking ID which was previously added.
3173 * The profile map lock should be held before calling this function.
3174 */
3175struct ice_prof_map *
3176ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
3177{
3178	struct ice_prof_map *entry = NULL;
3179	struct ice_prof_map *map;
3180
3181	list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
3182		if (map->profile_cookie == id) {
3183			entry = map;
3184			break;
3185		}
3186
3187	return entry;
3188}
3189
3190/**
3191 * ice_vsig_prof_id_count - count profiles in a VSIG
3192 * @hw: pointer to the HW struct
3193 * @blk: hardware block
3194 * @vsig: VSIG to remove the profile from
3195 */
3196static u16
3197ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3198{
3199	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
3200	struct ice_vsig_prof *p;
3201
3202	list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3203			    list)
3204		count++;
3205
3206	return count;
3207}
3208
3209/**
3210 * ice_rel_tcam_idx - release a TCAM index
3211 * @hw: pointer to the HW struct
3212 * @blk: hardware block
3213 * @idx: the index to release
3214 */
3215static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
 
3216{
3217	/* Masks to invoke a never match entry */
3218	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3219	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
3220	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
3221	int status;
3222
3223	/* write the TCAM entry */
3224	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
3225				      dc_msk, nm_msk);
3226	if (status)
3227		return status;
3228
3229	/* release the TCAM entry */
3230	status = ice_free_tcam_ent(hw, blk, idx);
3231
3232	return status;
3233}
3234
3235/**
3236 * ice_rem_prof_id - remove one profile from a VSIG
3237 * @hw: pointer to the HW struct
3238 * @blk: hardware block
3239 * @prof: pointer to profile structure to remove
3240 */
3241static int
3242ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
3243		struct ice_vsig_prof *prof)
3244{
3245	int status;
3246	u16 i;
3247
3248	for (i = 0; i < prof->tcam_count; i++)
3249		if (prof->tcam[i].in_use) {
3250			prof->tcam[i].in_use = false;
3251			status = ice_rel_tcam_idx(hw, blk,
3252						  prof->tcam[i].tcam_idx);
3253			if (status)
3254				return -EIO;
3255		}
3256
3257	return 0;
3258}
3259
3260/**
3261 * ice_rem_vsig - remove VSIG
3262 * @hw: pointer to the HW struct
3263 * @blk: hardware block
3264 * @vsig: the VSIG to remove
3265 * @chg: the change list
3266 */
3267static int
3268ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3269	     struct list_head *chg)
3270{
3271	u16 idx = vsig & ICE_VSIG_IDX_M;
3272	struct ice_vsig_vsi *vsi_cur;
3273	struct ice_vsig_prof *d, *t;
 
3274
3275	/* remove TCAM entries */
3276	list_for_each_entry_safe(d, t,
3277				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3278				 list) {
3279		int status;
3280
3281		status = ice_rem_prof_id(hw, blk, d);
3282		if (status)
3283			return status;
3284
3285		list_del(&d->list);
3286		devm_kfree(ice_hw_to_dev(hw), d);
3287	}
3288
3289	/* Move all VSIS associated with this VSIG to the default VSIG */
3290	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3291	/* If the VSIG has at least 1 VSI then iterate through the list
3292	 * and remove the VSIs before deleting the group.
3293	 */
3294	if (vsi_cur)
3295		do {
3296			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
3297			struct ice_chs_chg *p;
3298
3299			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
3300					 GFP_KERNEL);
3301			if (!p)
3302				return -ENOMEM;
3303
3304			p->type = ICE_VSIG_REM;
3305			p->orig_vsig = vsig;
3306			p->vsig = ICE_DEFAULT_VSIG;
3307			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
3308
3309			list_add(&p->list_entry, chg);
3310
3311			vsi_cur = tmp;
3312		} while (vsi_cur);
3313
3314	return ice_vsig_free(hw, blk, vsig);
3315}
3316
3317/**
3318 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
3319 * @hw: pointer to the HW struct
3320 * @blk: hardware block
3321 * @vsig: VSIG to remove the profile from
3322 * @hdl: profile handle indicating which profile to remove
3323 * @chg: list to receive a record of changes
3324 */
3325static int
3326ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3327		     struct list_head *chg)
3328{
3329	u16 idx = vsig & ICE_VSIG_IDX_M;
3330	struct ice_vsig_prof *p, *t;
 
3331
3332	list_for_each_entry_safe(p, t,
3333				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3334				 list)
3335		if (p->profile_cookie == hdl) {
3336			int status;
3337
3338			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
3339				/* this is the last profile, remove the VSIG */
3340				return ice_rem_vsig(hw, blk, vsig, chg);
3341
3342			status = ice_rem_prof_id(hw, blk, p);
3343			if (!status) {
3344				list_del(&p->list);
3345				devm_kfree(ice_hw_to_dev(hw), p);
3346			}
3347			return status;
3348		}
3349
3350	return -ENOENT;
3351}
3352
3353/**
3354 * ice_rem_flow_all - remove all flows with a particular profile
3355 * @hw: pointer to the HW struct
3356 * @blk: hardware block
3357 * @id: profile tracking ID
3358 */
3359static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
 
3360{
3361	struct ice_chs_chg *del, *tmp;
 
3362	struct list_head chg;
3363	int status;
3364	u16 i;
3365
3366	INIT_LIST_HEAD(&chg);
3367
3368	for (i = 1; i < ICE_MAX_VSIGS; i++)
3369		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
3370			if (ice_has_prof_vsig(hw, blk, i, id)) {
3371				status = ice_rem_prof_id_vsig(hw, blk, i, id,
3372							      &chg);
3373				if (status)
3374					goto err_ice_rem_flow_all;
3375			}
3376		}
3377
3378	status = ice_upd_prof_hw(hw, blk, &chg);
3379
3380err_ice_rem_flow_all:
3381	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
3382		list_del(&del->list_entry);
3383		devm_kfree(ice_hw_to_dev(hw), del);
3384	}
3385
3386	return status;
3387}
3388
3389/**
3390 * ice_rem_prof - remove profile
3391 * @hw: pointer to the HW struct
3392 * @blk: hardware block
3393 * @id: profile tracking ID
3394 *
3395 * This will remove the profile specified by the ID parameter, which was
3396 * previously created through ice_add_prof. If any existing entries
3397 * are associated with this profile, they will be removed as well.
3398 */
3399int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
3400{
3401	struct ice_prof_map *pmap;
3402	int status;
3403
3404	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3405
3406	pmap = ice_search_prof_id(hw, blk, id);
3407	if (!pmap) {
3408		status = -ENOENT;
3409		goto err_ice_rem_prof;
3410	}
3411
3412	/* remove all flows with this profile */
3413	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
3414	if (status)
3415		goto err_ice_rem_prof;
3416
3417	/* dereference profile, and possibly remove */
3418	ice_prof_dec_ref(hw, blk, pmap->prof_id);
3419
3420	list_del(&pmap->list);
3421	devm_kfree(ice_hw_to_dev(hw), pmap);
3422
3423err_ice_rem_prof:
3424	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3425	return status;
3426}
3427
3428/**
3429 * ice_get_prof - get profile
3430 * @hw: pointer to the HW struct
3431 * @blk: hardware block
3432 * @hdl: profile handle
3433 * @chg: change list
3434 */
3435static int
3436ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
3437	     struct list_head *chg)
3438{
 
3439	struct ice_prof_map *map;
3440	struct ice_chs_chg *p;
3441	int status = 0;
3442	u16 i;
3443
3444	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3445	/* Get the details on the profile specified by the handle ID */
3446	map = ice_search_prof_id(hw, blk, hdl);
3447	if (!map) {
3448		status = -ENOENT;
3449		goto err_ice_get_prof;
3450	}
3451
3452	for (i = 0; i < map->ptg_cnt; i++)
3453		if (!hw->blk[blk].es.written[map->prof_id]) {
3454			/* add ES to change list */
3455			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
3456					 GFP_KERNEL);
3457			if (!p) {
3458				status = -ENOMEM;
3459				goto err_ice_get_prof;
3460			}
3461
3462			p->type = ICE_PTG_ES_ADD;
3463			p->ptype = 0;
3464			p->ptg = map->ptg[i];
3465			p->add_ptg = 0;
3466
3467			p->add_prof = 1;
3468			p->prof_id = map->prof_id;
3469
3470			hw->blk[blk].es.written[map->prof_id] = true;
3471
3472			list_add(&p->list_entry, chg);
3473		}
3474
3475err_ice_get_prof:
3476	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3477	/* let caller clean up the change list */
3478	return status;
3479}
3480
3481/**
3482 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
3483 * @hw: pointer to the HW struct
3484 * @blk: hardware block
3485 * @vsig: VSIG from which to copy the list
3486 * @lst: output list
3487 *
3488 * This routine makes a copy of the list of profiles in the specified VSIG.
3489 */
3490static int
3491ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3492		   struct list_head *lst)
3493{
3494	struct ice_vsig_prof *ent1, *ent2;
3495	u16 idx = vsig & ICE_VSIG_IDX_M;
3496
3497	list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3498			    list) {
3499		struct ice_vsig_prof *p;
3500
3501		/* copy to the input list */
3502		p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
3503				 GFP_KERNEL);
3504		if (!p)
3505			goto err_ice_get_profs_vsig;
3506
3507		list_add_tail(&p->list, lst);
3508	}
3509
3510	return 0;
3511
3512err_ice_get_profs_vsig:
3513	list_for_each_entry_safe(ent1, ent2, lst, list) {
3514		list_del(&ent1->list);
3515		devm_kfree(ice_hw_to_dev(hw), ent1);
3516	}
3517
3518	return -ENOMEM;
3519}
3520
3521/**
3522 * ice_add_prof_to_lst - add profile entry to a list
3523 * @hw: pointer to the HW struct
3524 * @blk: hardware block
3525 * @lst: the list to be added to
3526 * @hdl: profile handle of entry to add
3527 */
3528static int
3529ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
3530		    struct list_head *lst, u64 hdl)
3531{
 
3532	struct ice_prof_map *map;
3533	struct ice_vsig_prof *p;
3534	int status = 0;
3535	u16 i;
3536
3537	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3538	map = ice_search_prof_id(hw, blk, hdl);
3539	if (!map) {
3540		status = -ENOENT;
3541		goto err_ice_add_prof_to_lst;
3542	}
3543
3544	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3545	if (!p) {
3546		status = -ENOMEM;
3547		goto err_ice_add_prof_to_lst;
3548	}
3549
3550	p->profile_cookie = map->profile_cookie;
3551	p->prof_id = map->prof_id;
3552	p->tcam_count = map->ptg_cnt;
3553
3554	for (i = 0; i < map->ptg_cnt; i++) {
3555		p->tcam[i].prof_id = map->prof_id;
3556		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
3557		p->tcam[i].ptg = map->ptg[i];
3558	}
3559
3560	list_add(&p->list, lst);
3561
3562err_ice_add_prof_to_lst:
3563	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3564	return status;
3565}
3566
3567/**
3568 * ice_move_vsi - move VSI to another VSIG
3569 * @hw: pointer to the HW struct
3570 * @blk: hardware block
3571 * @vsi: the VSI to move
3572 * @vsig: the VSIG to move the VSI to
3573 * @chg: the change list
3574 */
3575static int
3576ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
3577	     struct list_head *chg)
3578{
 
3579	struct ice_chs_chg *p;
3580	u16 orig_vsig;
3581	int status;
3582
3583	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3584	if (!p)
3585		return -ENOMEM;
3586
3587	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3588	if (!status)
3589		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3590
3591	if (status) {
3592		devm_kfree(ice_hw_to_dev(hw), p);
3593		return status;
3594	}
3595
3596	p->type = ICE_VSI_MOVE;
3597	p->vsi = vsi;
3598	p->orig_vsig = orig_vsig;
3599	p->vsig = vsig;
3600
3601	list_add(&p->list_entry, chg);
3602
3603	return 0;
3604}
3605
3606/**
3607 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
3608 * @hw: pointer to the HW struct
3609 * @idx: the index of the TCAM entry to remove
3610 * @chg: the list of change structures to search
3611 */
3612static void
3613ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
3614{
3615	struct ice_chs_chg *pos, *tmp;
3616
3617	list_for_each_entry_safe(tmp, pos, chg, list_entry)
3618		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
3619			list_del(&tmp->list_entry);
3620			devm_kfree(ice_hw_to_dev(hw), tmp);
3621		}
3622}
3623
3624/**
3625 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
3626 * @hw: pointer to the HW struct
3627 * @blk: hardware block
3628 * @enable: true to enable, false to disable
3629 * @vsig: the VSIG of the TCAM entry
3630 * @tcam: pointer the TCAM info structure of the TCAM to disable
3631 * @chg: the change list
3632 *
3633 * This function appends an enable or disable TCAM entry in the change log
3634 */
3635static int
3636ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
3637		      u16 vsig, struct ice_tcam_inf *tcam,
3638		      struct list_head *chg)
3639{
 
3640	struct ice_chs_chg *p;
3641	int status;
3642
3643	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3644	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
3645	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
3646
3647	/* if disabling, free the TCAM */
3648	if (!enable) {
3649		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
3650
3651		/* if we have already created a change for this TCAM entry, then
3652		 * we need to remove that entry, in order to prevent writing to
3653		 * a TCAM entry we no longer will have ownership of.
3654		 */
3655		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
3656		tcam->tcam_idx = 0;
3657		tcam->in_use = 0;
3658		return status;
3659	}
3660
3661	/* for re-enabling, reallocate a TCAM */
3662	/* for entries with empty attribute masks, allocate entry from
3663	 * the bottom of the TCAM table; otherwise, allocate from the
3664	 * top of the table in order to give it higher priority
3665	 */
3666	status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
3667				    &tcam->tcam_idx);
3668	if (status)
3669		return status;
3670
3671	/* add TCAM to change list */
3672	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3673	if (!p)
3674		return -ENOMEM;
3675
3676	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
3677				      tcam->ptg, vsig, 0, tcam->attr.flags,
3678				      vl_msk, dc_msk, nm_msk);
3679	if (status)
3680		goto err_ice_prof_tcam_ena_dis;
3681
3682	tcam->in_use = 1;
3683
3684	p->type = ICE_TCAM_ADD;
3685	p->add_tcam_idx = true;
3686	p->prof_id = tcam->prof_id;
3687	p->ptg = tcam->ptg;
3688	p->vsig = 0;
3689	p->tcam_idx = tcam->tcam_idx;
3690
3691	/* log change */
3692	list_add(&p->list_entry, chg);
3693
3694	return 0;
3695
3696err_ice_prof_tcam_ena_dis:
3697	devm_kfree(ice_hw_to_dev(hw), p);
3698	return status;
3699}
3700
3701/**
3702 * ice_adj_prof_priorities - adjust profile based on priorities
3703 * @hw: pointer to the HW struct
3704 * @blk: hardware block
3705 * @vsig: the VSIG for which to adjust profile priorities
3706 * @chg: the change list
3707 */
3708static int
3709ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3710			struct list_head *chg)
3711{
3712	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3713	struct ice_vsig_prof *t;
3714	int status;
3715	u16 idx;
3716
3717	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3718	idx = vsig & ICE_VSIG_IDX_M;
3719
3720	/* Priority is based on the order in which the profiles are added. The
3721	 * newest added profile has highest priority and the oldest added
3722	 * profile has the lowest priority. Since the profile property list for
3723	 * a VSIG is sorted from newest to oldest, this code traverses the list
3724	 * in order and enables the first of each PTG that it finds (that is not
3725	 * already enabled); it also disables any duplicate PTGs that it finds
3726	 * in the older profiles (that are currently enabled).
3727	 */
3728
3729	list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3730			    list) {
3731		u16 i;
3732
3733		for (i = 0; i < t->tcam_count; i++) {
3734			/* Scan the priorities from newest to oldest.
3735			 * Make sure that the newest profiles take priority.
3736			 */
3737			if (test_bit(t->tcam[i].ptg, ptgs_used) &&
3738			    t->tcam[i].in_use) {
3739				/* need to mark this PTG as never match, as it
3740				 * was already in use and therefore duplicate
3741				 * (and lower priority)
3742				 */
3743				status = ice_prof_tcam_ena_dis(hw, blk, false,
3744							       vsig,
3745							       &t->tcam[i],
3746							       chg);
3747				if (status)
3748					return status;
3749			} else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
3750				   !t->tcam[i].in_use) {
3751				/* need to enable this PTG, as it in not in use
3752				 * and not enabled (highest priority)
3753				 */
3754				status = ice_prof_tcam_ena_dis(hw, blk, true,
3755							       vsig,
3756							       &t->tcam[i],
3757							       chg);
3758				if (status)
3759					return status;
3760			}
3761
3762			/* keep track of used ptgs */
3763			__set_bit(t->tcam[i].ptg, ptgs_used);
3764		}
3765	}
3766
3767	return 0;
3768}
3769
3770/**
3771 * ice_add_prof_id_vsig - add profile to VSIG
3772 * @hw: pointer to the HW struct
3773 * @blk: hardware block
3774 * @vsig: the VSIG to which this profile is to be added
3775 * @hdl: the profile handle indicating the profile to add
3776 * @rev: true to add entries to the end of the list
3777 * @chg: the change list
3778 */
3779static int
3780ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3781		     bool rev, struct list_head *chg)
3782{
3783	/* Masks that ignore flags */
3784	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3785	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
3786	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
 
3787	struct ice_prof_map *map;
3788	struct ice_vsig_prof *t;
3789	struct ice_chs_chg *p;
3790	u16 vsig_idx, i;
3791	int status = 0;
3792
3793	/* Error, if this VSIG already has this profile */
3794	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
3795		return -EEXIST;
3796
3797	/* new VSIG profile structure */
3798	t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
3799	if (!t)
3800		return -ENOMEM;
3801
3802	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3803	/* Get the details on the profile specified by the handle ID */
3804	map = ice_search_prof_id(hw, blk, hdl);
3805	if (!map) {
3806		status = -ENOENT;
3807		goto err_ice_add_prof_id_vsig;
3808	}
3809
3810	t->profile_cookie = map->profile_cookie;
3811	t->prof_id = map->prof_id;
3812	t->tcam_count = map->ptg_cnt;
3813
3814	/* create TCAM entries */
3815	for (i = 0; i < map->ptg_cnt; i++) {
3816		u16 tcam_idx;
3817
3818		/* add TCAM to change list */
3819		p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3820		if (!p) {
3821			status = -ENOMEM;
3822			goto err_ice_add_prof_id_vsig;
3823		}
3824
3825		/* allocate the TCAM entry index */
3826		/* for entries with empty attribute masks, allocate entry from
3827		 * the bottom of the TCAM table; otherwise, allocate from the
3828		 * top of the table in order to give it higher priority
3829		 */
3830		status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
3831					    &tcam_idx);
3832		if (status) {
3833			devm_kfree(ice_hw_to_dev(hw), p);
3834			goto err_ice_add_prof_id_vsig;
3835		}
3836
3837		t->tcam[i].ptg = map->ptg[i];
3838		t->tcam[i].prof_id = map->prof_id;
3839		t->tcam[i].tcam_idx = tcam_idx;
3840		t->tcam[i].attr = map->attr[i];
3841		t->tcam[i].in_use = true;
3842
3843		p->type = ICE_TCAM_ADD;
3844		p->add_tcam_idx = true;
3845		p->prof_id = t->tcam[i].prof_id;
3846		p->ptg = t->tcam[i].ptg;
3847		p->vsig = vsig;
3848		p->tcam_idx = t->tcam[i].tcam_idx;
3849
3850		/* write the TCAM entry */
3851		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
3852					      t->tcam[i].prof_id,
3853					      t->tcam[i].ptg, vsig, 0, 0,
3854					      vl_msk, dc_msk, nm_msk);
3855		if (status) {
3856			devm_kfree(ice_hw_to_dev(hw), p);
3857			goto err_ice_add_prof_id_vsig;
3858		}
3859
3860		/* log change */
3861		list_add(&p->list_entry, chg);
3862	}
3863
3864	/* add profile to VSIG */
3865	vsig_idx = vsig & ICE_VSIG_IDX_M;
3866	if (rev)
3867		list_add_tail(&t->list,
3868			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3869	else
3870		list_add(&t->list,
3871			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3872
3873	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3874	return status;
3875
3876err_ice_add_prof_id_vsig:
3877	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3878	/* let caller clean up the change list */
3879	devm_kfree(ice_hw_to_dev(hw), t);
3880	return status;
3881}
3882
3883/**
3884 * ice_create_prof_id_vsig - add a new VSIG with a single profile
3885 * @hw: pointer to the HW struct
3886 * @blk: hardware block
3887 * @vsi: the initial VSI that will be in VSIG
3888 * @hdl: the profile handle of the profile that will be added to the VSIG
3889 * @chg: the change list
3890 */
3891static int
3892ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
3893			struct list_head *chg)
3894{
 
3895	struct ice_chs_chg *p;
3896	u16 new_vsig;
3897	int status;
3898
3899	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3900	if (!p)
3901		return -ENOMEM;
3902
3903	new_vsig = ice_vsig_alloc(hw, blk);
3904	if (!new_vsig) {
3905		status = -EIO;
3906		goto err_ice_create_prof_id_vsig;
3907	}
3908
3909	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
3910	if (status)
3911		goto err_ice_create_prof_id_vsig;
3912
3913	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
3914	if (status)
3915		goto err_ice_create_prof_id_vsig;
3916
3917	p->type = ICE_VSIG_ADD;
3918	p->vsi = vsi;
3919	p->orig_vsig = ICE_DEFAULT_VSIG;
3920	p->vsig = new_vsig;
3921
3922	list_add(&p->list_entry, chg);
3923
3924	return 0;
3925
3926err_ice_create_prof_id_vsig:
3927	/* let caller clean up the change list */
3928	devm_kfree(ice_hw_to_dev(hw), p);
3929	return status;
3930}
3931
3932/**
3933 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
3934 * @hw: pointer to the HW struct
3935 * @blk: hardware block
3936 * @vsi: the initial VSI that will be in VSIG
3937 * @lst: the list of profile that will be added to the VSIG
3938 * @new_vsig: return of new VSIG
3939 * @chg: the change list
3940 */
3941static int
3942ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
3943			 struct list_head *lst, u16 *new_vsig,
3944			 struct list_head *chg)
3945{
3946	struct ice_vsig_prof *t;
3947	int status;
3948	u16 vsig;
3949
3950	vsig = ice_vsig_alloc(hw, blk);
3951	if (!vsig)
3952		return -EIO;
3953
3954	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
3955	if (status)
3956		return status;
3957
3958	list_for_each_entry(t, lst, list) {
3959		/* Reverse the order here since we are copying the list */
3960		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
3961					      true, chg);
3962		if (status)
3963			return status;
3964	}
3965
3966	*new_vsig = vsig;
3967
3968	return 0;
3969}
3970
3971/**
3972 * ice_find_prof_vsig - find a VSIG with a specific profile handle
3973 * @hw: pointer to the HW struct
3974 * @blk: hardware block
3975 * @hdl: the profile handle of the profile to search for
3976 * @vsig: returns the VSIG with the matching profile
3977 */
3978static bool
3979ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
3980{
3981	struct ice_vsig_prof *t;
 
3982	struct list_head lst;
3983	int status;
3984
3985	INIT_LIST_HEAD(&lst);
3986
3987	t = kzalloc(sizeof(*t), GFP_KERNEL);
3988	if (!t)
3989		return false;
3990
3991	t->profile_cookie = hdl;
3992	list_add(&t->list, &lst);
3993
3994	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
3995
3996	list_del(&t->list);
3997	kfree(t);
3998
3999	return !status;
4000}
4001
4002/**
4003 * ice_add_prof_id_flow - add profile flow
4004 * @hw: pointer to the HW struct
4005 * @blk: hardware block
4006 * @vsi: the VSI to enable with the profile specified by ID
4007 * @hdl: profile handle
4008 *
4009 * Calling this function will update the hardware tables to enable the
4010 * profile indicated by the ID parameter for the VSIs specified in the VSI
4011 * array. Once successfully called, the flow will be enabled.
4012 */
4013int
4014ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4015{
4016	struct ice_vsig_prof *tmp1, *del1;
4017	struct ice_chs_chg *tmp, *del;
4018	struct list_head union_lst;
 
4019	struct list_head chg;
4020	int status;
4021	u16 vsig;
4022
4023	INIT_LIST_HEAD(&union_lst);
4024	INIT_LIST_HEAD(&chg);
4025
4026	/* Get profile */
4027	status = ice_get_prof(hw, blk, hdl, &chg);
4028	if (status)
4029		return status;
4030
4031	/* determine if VSI is already part of a VSIG */
4032	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4033	if (!status && vsig) {
4034		bool only_vsi;
4035		u16 or_vsig;
4036		u16 ref;
4037
4038		/* found in VSIG */
4039		or_vsig = vsig;
4040
4041		/* make sure that there is no overlap/conflict between the new
4042		 * characteristics and the existing ones; we don't support that
4043		 * scenario
4044		 */
4045		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
4046			status = -EEXIST;
4047			goto err_ice_add_prof_id_flow;
4048		}
4049
4050		/* last VSI in the VSIG? */
4051		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4052		if (status)
4053			goto err_ice_add_prof_id_flow;
4054		only_vsi = (ref == 1);
4055
4056		/* create a union of the current profiles and the one being
4057		 * added
4058		 */
4059		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
4060		if (status)
4061			goto err_ice_add_prof_id_flow;
4062
4063		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
4064		if (status)
4065			goto err_ice_add_prof_id_flow;
4066
4067		/* search for an existing VSIG with an exact charc match */
4068		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
4069		if (!status) {
4070			/* move VSI to the VSIG that matches */
4071			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4072			if (status)
4073				goto err_ice_add_prof_id_flow;
4074
4075			/* VSI has been moved out of or_vsig. If the or_vsig had
4076			 * only that VSI it is now empty and can be removed.
4077			 */
4078			if (only_vsi) {
4079				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
4080				if (status)
4081					goto err_ice_add_prof_id_flow;
4082			}
4083		} else if (only_vsi) {
4084			/* If the original VSIG only contains one VSI, then it
4085			 * will be the requesting VSI. In this case the VSI is
4086			 * not sharing entries and we can simply add the new
4087			 * profile to the VSIG.
4088			 */
4089			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
4090						      &chg);
4091			if (status)
4092				goto err_ice_add_prof_id_flow;
4093
4094			/* Adjust priorities */
4095			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4096			if (status)
4097				goto err_ice_add_prof_id_flow;
4098		} else {
4099			/* No match, so we need a new VSIG */
4100			status = ice_create_vsig_from_lst(hw, blk, vsi,
4101							  &union_lst, &vsig,
4102							  &chg);
4103			if (status)
4104				goto err_ice_add_prof_id_flow;
4105
4106			/* Adjust priorities */
4107			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4108			if (status)
4109				goto err_ice_add_prof_id_flow;
4110		}
4111	} else {
4112		/* need to find or add a VSIG */
4113		/* search for an existing VSIG with an exact charc match */
4114		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
4115			/* found an exact match */
4116			/* add or move VSI to the VSIG that matches */
4117			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4118			if (status)
4119				goto err_ice_add_prof_id_flow;
4120		} else {
4121			/* we did not find an exact match */
4122			/* we need to add a VSIG */
4123			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
4124							 &chg);
4125			if (status)
4126				goto err_ice_add_prof_id_flow;
4127		}
4128	}
4129
4130	/* update hardware */
4131	if (!status)
4132		status = ice_upd_prof_hw(hw, blk, &chg);
4133
4134err_ice_add_prof_id_flow:
4135	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4136		list_del(&del->list_entry);
4137		devm_kfree(ice_hw_to_dev(hw), del);
4138	}
4139
4140	list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
4141		list_del(&del1->list);
4142		devm_kfree(ice_hw_to_dev(hw), del1);
4143	}
4144
4145	return status;
4146}
4147
4148/**
4149 * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI
4150 * @hw: pointer to the HW struct
4151 * @blk: HW block
4152 * @dest_vsi: dest VSI
4153 * @fdir_vsi: fdir programming VSI
4154 * @hdl: profile handle
4155 *
4156 * Update the hardware tables to enable the FDIR profile indicated by @hdl for
4157 * the VSI specified by @dest_vsi. On success, the flow will be enabled.
4158 *
4159 * Return: 0 on success or negative errno on failure.
4160 */
4161int
4162ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
4163			 u16 dest_vsi, u16 fdir_vsi, u64 hdl)
4164{
4165	u16 vsi_num;
4166	int status;
4167
4168	if (blk != ICE_BLK_FD)
4169		return -EINVAL;
4170
4171	vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
4172	status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
4173	if (status) {
4174		ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n",
4175			  status);
4176		return status;
4177	}
4178
4179	vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
4180	status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
4181	if (status) {
4182		ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n",
4183			  status);
4184		goto err;
4185	}
4186
4187	return 0;
4188
4189err:
4190	vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
4191	ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
4192
4193	return status;
4194}
4195
4196/**
4197 * ice_rem_prof_from_list - remove a profile from list
4198 * @hw: pointer to the HW struct
4199 * @lst: list to remove the profile from
4200 * @hdl: the profile handle indicating the profile to remove
4201 */
4202static int
4203ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
4204{
4205	struct ice_vsig_prof *ent, *tmp;
4206
4207	list_for_each_entry_safe(ent, tmp, lst, list)
4208		if (ent->profile_cookie == hdl) {
4209			list_del(&ent->list);
4210			devm_kfree(ice_hw_to_dev(hw), ent);
4211			return 0;
4212		}
4213
4214	return -ENOENT;
4215}
4216
4217/**
4218 * ice_rem_prof_id_flow - remove flow
4219 * @hw: pointer to the HW struct
4220 * @blk: hardware block
4221 * @vsi: the VSI from which to remove the profile specified by ID
4222 * @hdl: profile tracking handle
4223 *
4224 * Calling this function will update the hardware tables to remove the
4225 * profile indicated by the ID parameter for the VSIs specified in the VSI
4226 * array. Once successfully called, the flow will be disabled.
4227 */
4228int
4229ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4230{
4231	struct ice_vsig_prof *tmp1, *del1;
4232	struct ice_chs_chg *tmp, *del;
4233	struct list_head chg, copy;
4234	int status;
4235	u16 vsig;
4236
4237	INIT_LIST_HEAD(&copy);
4238	INIT_LIST_HEAD(&chg);
4239
4240	/* determine if VSI is already part of a VSIG */
4241	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4242	if (!status && vsig) {
4243		bool last_profile;
4244		bool only_vsi;
4245		u16 ref;
4246
4247		/* found in VSIG */
4248		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
4249		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4250		if (status)
4251			goto err_ice_rem_prof_id_flow;
4252		only_vsi = (ref == 1);
4253
4254		if (only_vsi) {
4255			/* If the original VSIG only contains one reference,
4256			 * which will be the requesting VSI, then the VSI is not
4257			 * sharing entries and we can simply remove the specific
4258			 * characteristics from the VSIG.
4259			 */
4260
4261			if (last_profile) {
4262				/* If there are no profiles left for this VSIG,
4263				 * then simply remove the VSIG.
4264				 */
4265				status = ice_rem_vsig(hw, blk, vsig, &chg);
4266				if (status)
4267					goto err_ice_rem_prof_id_flow;
4268			} else {
4269				status = ice_rem_prof_id_vsig(hw, blk, vsig,
4270							      hdl, &chg);
4271				if (status)
4272					goto err_ice_rem_prof_id_flow;
4273
4274				/* Adjust priorities */
4275				status = ice_adj_prof_priorities(hw, blk, vsig,
4276								 &chg);
4277				if (status)
4278					goto err_ice_rem_prof_id_flow;
4279			}
4280
4281		} else {
4282			/* Make a copy of the VSIG's list of Profiles */
4283			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
4284			if (status)
4285				goto err_ice_rem_prof_id_flow;
4286
4287			/* Remove specified profile entry from the list */
4288			status = ice_rem_prof_from_list(hw, &copy, hdl);
4289			if (status)
4290				goto err_ice_rem_prof_id_flow;
4291
4292			if (list_empty(&copy)) {
4293				status = ice_move_vsi(hw, blk, vsi,
4294						      ICE_DEFAULT_VSIG, &chg);
4295				if (status)
4296					goto err_ice_rem_prof_id_flow;
4297
4298			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
4299							    &vsig)) {
4300				/* found an exact match */
4301				/* add or move VSI to the VSIG that matches */
4302				/* Search for a VSIG with a matching profile
4303				 * list
4304				 */
4305
4306				/* Found match, move VSI to the matching VSIG */
4307				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4308				if (status)
4309					goto err_ice_rem_prof_id_flow;
4310			} else {
4311				/* since no existing VSIG supports this
4312				 * characteristic pattern, we need to create a
4313				 * new VSIG and TCAM entries
4314				 */
4315				status = ice_create_vsig_from_lst(hw, blk, vsi,
4316								  &copy, &vsig,
4317								  &chg);
4318				if (status)
4319					goto err_ice_rem_prof_id_flow;
4320
4321				/* Adjust priorities */
4322				status = ice_adj_prof_priorities(hw, blk, vsig,
4323								 &chg);
4324				if (status)
4325					goto err_ice_rem_prof_id_flow;
4326			}
4327		}
4328	} else {
4329		status = -ENOENT;
4330	}
4331
4332	/* update hardware tables */
4333	if (!status)
4334		status = ice_upd_prof_hw(hw, blk, &chg);
4335
4336err_ice_rem_prof_id_flow:
4337	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4338		list_del(&del->list_entry);
4339		devm_kfree(ice_hw_to_dev(hw), del);
4340	}
4341
4342	list_for_each_entry_safe(del1, tmp1, &copy, list) {
4343		list_del(&del1->list);
4344		devm_kfree(ice_hw_to_dev(hw), del1);
4345	}
4346
4347	return status;
4348}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019, Intel Corporation. */
   3
   4#include "ice_common.h"
   5#include "ice_flex_pipe.h"
   6#include "ice_flow.h"
   7
   8/* To support tunneling entries by PF, the package will append the PF number to
   9 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
  10 */
  11static const struct ice_tunnel_type_scan tnls[] = {
  12	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
  13	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
  14	{ TNL_LAST,		"" }
  15};
  16
  17static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
  18	/* SWITCH */
  19	{
  20		ICE_SID_XLT0_SW,
  21		ICE_SID_XLT_KEY_BUILDER_SW,
  22		ICE_SID_XLT1_SW,
  23		ICE_SID_XLT2_SW,
  24		ICE_SID_PROFID_TCAM_SW,
  25		ICE_SID_PROFID_REDIR_SW,
  26		ICE_SID_FLD_VEC_SW,
  27		ICE_SID_CDID_KEY_BUILDER_SW,
  28		ICE_SID_CDID_REDIR_SW
  29	},
  30
  31	/* ACL */
  32	{
  33		ICE_SID_XLT0_ACL,
  34		ICE_SID_XLT_KEY_BUILDER_ACL,
  35		ICE_SID_XLT1_ACL,
  36		ICE_SID_XLT2_ACL,
  37		ICE_SID_PROFID_TCAM_ACL,
  38		ICE_SID_PROFID_REDIR_ACL,
  39		ICE_SID_FLD_VEC_ACL,
  40		ICE_SID_CDID_KEY_BUILDER_ACL,
  41		ICE_SID_CDID_REDIR_ACL
  42	},
  43
  44	/* FD */
  45	{
  46		ICE_SID_XLT0_FD,
  47		ICE_SID_XLT_KEY_BUILDER_FD,
  48		ICE_SID_XLT1_FD,
  49		ICE_SID_XLT2_FD,
  50		ICE_SID_PROFID_TCAM_FD,
  51		ICE_SID_PROFID_REDIR_FD,
  52		ICE_SID_FLD_VEC_FD,
  53		ICE_SID_CDID_KEY_BUILDER_FD,
  54		ICE_SID_CDID_REDIR_FD
  55	},
  56
  57	/* RSS */
  58	{
  59		ICE_SID_XLT0_RSS,
  60		ICE_SID_XLT_KEY_BUILDER_RSS,
  61		ICE_SID_XLT1_RSS,
  62		ICE_SID_XLT2_RSS,
  63		ICE_SID_PROFID_TCAM_RSS,
  64		ICE_SID_PROFID_REDIR_RSS,
  65		ICE_SID_FLD_VEC_RSS,
  66		ICE_SID_CDID_KEY_BUILDER_RSS,
  67		ICE_SID_CDID_REDIR_RSS
  68	},
  69
  70	/* PE */
  71	{
  72		ICE_SID_XLT0_PE,
  73		ICE_SID_XLT_KEY_BUILDER_PE,
  74		ICE_SID_XLT1_PE,
  75		ICE_SID_XLT2_PE,
  76		ICE_SID_PROFID_TCAM_PE,
  77		ICE_SID_PROFID_REDIR_PE,
  78		ICE_SID_FLD_VEC_PE,
  79		ICE_SID_CDID_KEY_BUILDER_PE,
  80		ICE_SID_CDID_REDIR_PE
  81	}
  82};
  83
  84/**
  85 * ice_sect_id - returns section ID
  86 * @blk: block type
  87 * @sect: section type
  88 *
  89 * This helper function returns the proper section ID given a block type and a
  90 * section type.
  91 */
  92static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
  93{
  94	return ice_sect_lkup[blk][sect];
  95}
  96
  97/**
  98 * ice_pkg_val_buf
  99 * @buf: pointer to the ice buffer
 100 *
 101 * This helper function validates a buffer's header.
 102 */
 103static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
 104{
 105	struct ice_buf_hdr *hdr;
 106	u16 section_count;
 107	u16 data_end;
 108
 109	hdr = (struct ice_buf_hdr *)buf->buf;
 110	/* verify data */
 111	section_count = le16_to_cpu(hdr->section_count);
 112	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
 113		return NULL;
 114
 115	data_end = le16_to_cpu(hdr->data_end);
 116	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
 117		return NULL;
 118
 119	return hdr;
 120}
 121
 122/**
 123 * ice_find_buf_table
 124 * @ice_seg: pointer to the ice segment
 125 *
 126 * Returns the address of the buffer table within the ice segment.
 127 */
 128static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
 129{
 130	struct ice_nvm_table *nvms;
 131
 132	nvms = (struct ice_nvm_table *)
 133		(ice_seg->device_table +
 134		 le32_to_cpu(ice_seg->device_table_count));
 135
 136	return (__force struct ice_buf_table *)
 137		(nvms->vers + le32_to_cpu(nvms->table_count));
 138}
 139
 140/**
 141 * ice_pkg_enum_buf
 142 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
 143 * @state: pointer to the enum state
 144 *
 145 * This function will enumerate all the buffers in the ice segment. The first
 146 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
 147 * ice_seg is set to NULL which continues the enumeration. When the function
 148 * returns a NULL pointer, then the end of the buffers has been reached, or an
 149 * unexpected value has been detected (for example an invalid section count or
 150 * an invalid buffer end value).
 151 */
 152static struct ice_buf_hdr *
 153ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
 154{
 155	if (ice_seg) {
 156		state->buf_table = ice_find_buf_table(ice_seg);
 157		if (!state->buf_table)
 158			return NULL;
 159
 160		state->buf_idx = 0;
 161		return ice_pkg_val_buf(state->buf_table->buf_array);
 162	}
 163
 164	if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
 165		return ice_pkg_val_buf(state->buf_table->buf_array +
 166				       state->buf_idx);
 167	else
 168		return NULL;
 169}
 170
 171/**
 172 * ice_pkg_advance_sect
 173 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
 174 * @state: pointer to the enum state
 175 *
 176 * This helper function will advance the section within the ice segment,
 177 * also advancing the buffer if needed.
 178 */
 179static bool
 180ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
 181{
 182	if (!ice_seg && !state->buf)
 183		return false;
 184
 185	if (!ice_seg && state->buf)
 186		if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
 187			return true;
 188
 189	state->buf = ice_pkg_enum_buf(ice_seg, state);
 190	if (!state->buf)
 191		return false;
 192
 193	/* start of new buffer, reset section index */
 194	state->sect_idx = 0;
 195	return true;
 196}
 197
 198/**
 199 * ice_pkg_enum_section
 200 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
 201 * @state: pointer to the enum state
 202 * @sect_type: section type to enumerate
 203 *
 204 * This function will enumerate all the sections of a particular type in the
 205 * ice segment. The first call is made with the ice_seg parameter non-NULL;
 206 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
 207 * When the function returns a NULL pointer, then the end of the matching
 208 * sections has been reached.
 209 */
 210static void *
 211ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
 212		     u32 sect_type)
 213{
 214	u16 offset, size;
 215
 216	if (ice_seg)
 217		state->type = sect_type;
 218
 219	if (!ice_pkg_advance_sect(ice_seg, state))
 220		return NULL;
 221
 222	/* scan for next matching section */
 223	while (state->buf->section_entry[state->sect_idx].type !=
 224	       cpu_to_le32(state->type))
 225		if (!ice_pkg_advance_sect(NULL, state))
 226			return NULL;
 227
 228	/* validate section */
 229	offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
 230	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
 231		return NULL;
 232
 233	size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
 234	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
 235		return NULL;
 236
 237	/* make sure the section fits in the buffer */
 238	if (offset + size > ICE_PKG_BUF_SIZE)
 239		return NULL;
 240
 241	state->sect_type =
 242		le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
 243
 244	/* calc pointer to this section */
 245	state->sect = ((u8 *)state->buf) +
 246		le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
 247
 248	return state->sect;
 249}
 250
 251/**
 252 * ice_pkg_enum_entry
 253 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
 254 * @state: pointer to the enum state
 255 * @sect_type: section type to enumerate
 256 * @offset: pointer to variable that receives the offset in the table (optional)
 257 * @handler: function that handles access to the entries into the section type
 258 *
 259 * This function will enumerate all the entries in particular section type in
 260 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
 261 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
 262 * When the function returns a NULL pointer, then the end of the entries has
 263 * been reached.
 264 *
 265 * Since each section may have a different header and entry size, the handler
 266 * function is needed to determine the number and location entries in each
 267 * section.
 268 *
 269 * The offset parameter is optional, but should be used for sections that
 270 * contain an offset for each section table. For such cases, the section handler
 271 * function must return the appropriate offset + index to give the absolution
 272 * offset for each entry. For example, if the base for a section's header
 273 * indicates a base offset of 10, and the index for the entry is 2, then
 274 * section handler function should set the offset to 10 + 2 = 12.
 275 */
 276static void *
 277ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
 278		   u32 sect_type, u32 *offset,
 279		   void *(*handler)(u32 sect_type, void *section,
 280				    u32 index, u32 *offset))
 281{
 282	void *entry;
 283
 284	if (ice_seg) {
 285		if (!handler)
 286			return NULL;
 287
 288		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
 289			return NULL;
 290
 291		state->entry_idx = 0;
 292		state->handler = handler;
 293	} else {
 294		state->entry_idx++;
 295	}
 296
 297	if (!state->handler)
 298		return NULL;
 299
 300	/* get entry */
 301	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
 302			       offset);
 303	if (!entry) {
 304		/* end of a section, look for another section of this type */
 305		if (!ice_pkg_enum_section(NULL, state, 0))
 306			return NULL;
 307
 308		state->entry_idx = 0;
 309		entry = state->handler(state->sect_type, state->sect,
 310				       state->entry_idx, offset);
 311	}
 312
 313	return entry;
 314}
 315
 316/**
 317 * ice_boost_tcam_handler
 318 * @sect_type: section type
 319 * @section: pointer to section
 320 * @index: index of the boost TCAM entry to be returned
 321 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
 322 *
 323 * This is a callback function that can be passed to ice_pkg_enum_entry.
 324 * Handles enumeration of individual boost TCAM entries.
 325 */
 326static void *
 327ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
 328{
 329	struct ice_boost_tcam_section *boost;
 330
 331	if (!section)
 332		return NULL;
 333
 334	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
 335		return NULL;
 336
 337	/* cppcheck-suppress nullPointer */
 338	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
 339		return NULL;
 340
 341	if (offset)
 342		*offset = 0;
 343
 344	boost = section;
 345	if (index >= le16_to_cpu(boost->count))
 346		return NULL;
 347
 348	return boost->tcam + index;
 349}
 350
 351/**
 352 * ice_find_boost_entry
 353 * @ice_seg: pointer to the ice segment (non-NULL)
 354 * @addr: Boost TCAM address of entry to search for
 355 * @entry: returns pointer to the entry
 356 *
 357 * Finds a particular Boost TCAM entry and returns a pointer to that entry
 358 * if it is found. The ice_seg parameter must not be NULL since the first call
 359 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
 360 */
 361static enum ice_status
 362ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
 363		     struct ice_boost_tcam_entry **entry)
 364{
 365	struct ice_boost_tcam_entry *tcam;
 366	struct ice_pkg_enum state;
 367
 368	memset(&state, 0, sizeof(state));
 369
 370	if (!ice_seg)
 371		return ICE_ERR_PARAM;
 372
 373	do {
 374		tcam = ice_pkg_enum_entry(ice_seg, &state,
 375					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
 376					  ice_boost_tcam_handler);
 377		if (tcam && le16_to_cpu(tcam->addr) == addr) {
 378			*entry = tcam;
 379			return 0;
 380		}
 381
 382		ice_seg = NULL;
 383	} while (tcam);
 384
 385	*entry = NULL;
 386	return ICE_ERR_CFG;
 387}
 388
 389/**
 390 * ice_label_enum_handler
 391 * @sect_type: section type
 392 * @section: pointer to section
 393 * @index: index of the label entry to be returned
 394 * @offset: pointer to receive absolute offset, always zero for label sections
 395 *
 396 * This is a callback function that can be passed to ice_pkg_enum_entry.
 397 * Handles enumeration of individual label entries.
 398 */
 399static void *
 400ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
 401		       u32 *offset)
 402{
 403	struct ice_label_section *labels;
 404
 405	if (!section)
 406		return NULL;
 407
 408	/* cppcheck-suppress nullPointer */
 409	if (index > ICE_MAX_LABELS_IN_BUF)
 410		return NULL;
 411
 412	if (offset)
 413		*offset = 0;
 414
 415	labels = section;
 416	if (index >= le16_to_cpu(labels->count))
 417		return NULL;
 418
 419	return labels->label + index;
 420}
 421
 422/**
 423 * ice_enum_labels
 424 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
 425 * @type: the section type that will contain the label (0 on subsequent calls)
 426 * @state: ice_pkg_enum structure that will hold the state of the enumeration
 427 * @value: pointer to a value that will return the label's value if found
 428 *
 429 * Enumerates a list of labels in the package. The caller will call
 430 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
 431 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
 432 * the end of the list has been reached.
 433 */
 434static char *
 435ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
 436		u16 *value)
 437{
 438	struct ice_label *label;
 439
 440	/* Check for valid label section on first call */
 441	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
 442		return NULL;
 443
 444	label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
 445				   ice_label_enum_handler);
 446	if (!label)
 447		return NULL;
 448
 449	*value = le16_to_cpu(label->value);
 450	return label->name;
 451}
 452
 453/**
 454 * ice_init_pkg_hints
 455 * @hw: pointer to the HW structure
 456 * @ice_seg: pointer to the segment of the package scan (non-NULL)
 457 *
 458 * This function will scan the package and save off relevant information
 459 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
 460 * since the first call to ice_enum_labels requires a pointer to an actual
 461 * ice_seg structure.
 462 */
 463static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
 464{
 465	struct ice_pkg_enum state;
 466	char *label_name;
 467	u16 val;
 468	int i;
 469
 470	memset(&hw->tnl, 0, sizeof(hw->tnl));
 471	memset(&state, 0, sizeof(state));
 472
 473	if (!ice_seg)
 474		return;
 475
 476	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
 477				     &val);
 478
 479	while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
 480		for (i = 0; tnls[i].type != TNL_LAST; i++) {
 481			size_t len = strlen(tnls[i].label_prefix);
 482
 483			/* Look for matching label start, before continuing */
 484			if (strncmp(label_name, tnls[i].label_prefix, len))
 485				continue;
 486
 487			/* Make sure this label matches our PF. Note that the PF
 488			 * character ('0' - '7') will be located where our
 489			 * prefix string's null terminator is located.
 490			 */
 491			if ((label_name[len] - '0') == hw->pf_id) {
 492				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
 493				hw->tnl.tbl[hw->tnl.count].valid = false;
 494				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
 495				hw->tnl.tbl[hw->tnl.count].port = 0;
 496				hw->tnl.count++;
 497				break;
 498			}
 499		}
 500
 501		label_name = ice_enum_labels(NULL, 0, &state, &val);
 502	}
 503
 504	/* Cache the appropriate boost TCAM entry pointers */
 505	for (i = 0; i < hw->tnl.count; i++) {
 506		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
 507				     &hw->tnl.tbl[i].boost_entry);
 508		if (hw->tnl.tbl[i].boost_entry) {
 509			hw->tnl.tbl[i].valid = true;
 510			if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
 511				hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
 512		}
 513	}
 514}
 515
 516/* Key creation */
 517
 518#define ICE_DC_KEY	0x1	/* don't care */
 519#define ICE_DC_KEYINV	0x1
 520#define ICE_NM_KEY	0x0	/* never match */
 521#define ICE_NM_KEYINV	0x0
 522#define ICE_0_KEY	0x1	/* match 0 */
 523#define ICE_0_KEYINV	0x0
 524#define ICE_1_KEY	0x0	/* match 1 */
 525#define ICE_1_KEYINV	0x1
 526
 527/**
 528 * ice_gen_key_word - generate 16-bits of a key/mask word
 529 * @val: the value
 530 * @valid: valid bits mask (change only the valid bits)
 531 * @dont_care: don't care mask
 532 * @nvr_mtch: never match mask
 533 * @key: pointer to an array of where the resulting key portion
 534 * @key_inv: pointer to an array of where the resulting key invert portion
 535 *
 536 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
 537 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
 538 * of key and 8 bits of key invert.
 539 *
 540 *     '0' =    b01, always match a 0 bit
 541 *     '1' =    b10, always match a 1 bit
 542 *     '?' =    b11, don't care bit (always matches)
 543 *     '~' =    b00, never match bit
 544 *
 545 * Input:
 546 *          val:         b0  1  0  1  0  1
 547 *          dont_care:   b0  0  1  1  0  0
 548 *          never_mtch:  b0  0  0  0  1  1
 549 *          ------------------------------
 550 * Result:  key:        b01 10 11 11 00 00
 551 */
 552static enum ice_status
 553ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
 554		 u8 *key_inv)
 555{
 556	u8 in_key = *key, in_key_inv = *key_inv;
 557	u8 i;
 558
 559	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
 560	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
 561		return ICE_ERR_CFG;
 562
 563	*key = 0;
 564	*key_inv = 0;
 565
 566	/* encode the 8 bits into 8-bit key and 8-bit key invert */
 567	for (i = 0; i < 8; i++) {
 568		*key >>= 1;
 569		*key_inv >>= 1;
 570
 571		if (!(valid & 0x1)) { /* change only valid bits */
 572			*key |= (in_key & 0x1) << 7;
 573			*key_inv |= (in_key_inv & 0x1) << 7;
 574		} else if (dont_care & 0x1) { /* don't care bit */
 575			*key |= ICE_DC_KEY << 7;
 576			*key_inv |= ICE_DC_KEYINV << 7;
 577		} else if (nvr_mtch & 0x1) { /* never match bit */
 578			*key |= ICE_NM_KEY << 7;
 579			*key_inv |= ICE_NM_KEYINV << 7;
 580		} else if (val & 0x01) { /* exact 1 match */
 581			*key |= ICE_1_KEY << 7;
 582			*key_inv |= ICE_1_KEYINV << 7;
 583		} else { /* exact 0 match */
 584			*key |= ICE_0_KEY << 7;
 585			*key_inv |= ICE_0_KEYINV << 7;
 586		}
 587
 588		dont_care >>= 1;
 589		nvr_mtch >>= 1;
 590		valid >>= 1;
 591		val >>= 1;
 592		in_key >>= 1;
 593		in_key_inv >>= 1;
 594	}
 595
 596	return 0;
 597}
 598
 599/**
 600 * ice_bits_max_set - determine if the number of bits set is within a maximum
 601 * @mask: pointer to the byte array which is the mask
 602 * @size: the number of bytes in the mask
 603 * @max: the max number of set bits
 604 *
 605 * This function determines if there are at most 'max' number of bits set in an
 606 * array. Returns true if the number for bits set is <= max or will return false
 607 * otherwise.
 608 */
 609static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
 610{
 611	u16 count = 0;
 612	u16 i;
 613
 614	/* check each byte */
 615	for (i = 0; i < size; i++) {
 616		/* if 0, go to next byte */
 617		if (!mask[i])
 618			continue;
 619
 620		/* We know there is at least one set bit in this byte because of
 621		 * the above check; if we already have found 'max' number of
 622		 * bits set, then we can return failure now.
 623		 */
 624		if (count == max)
 625			return false;
 626
 627		/* count the bits in this byte, checking threshold */
 628		count += hweight8(mask[i]);
 629		if (count > max)
 630			return false;
 631	}
 632
 633	return true;
 634}
 635
 636/**
 637 * ice_set_key - generate a variable sized key with multiples of 16-bits
 638 * @key: pointer to where the key will be stored
 639 * @size: the size of the complete key in bytes (must be even)
 640 * @val: array of 8-bit values that makes up the value portion of the key
 641 * @upd: array of 8-bit masks that determine what key portion to update
 642 * @dc: array of 8-bit masks that make up the don't care mask
 643 * @nm: array of 8-bit masks that make up the never match mask
 644 * @off: the offset of the first byte in the key to update
 645 * @len: the number of bytes in the key update
 646 *
 647 * This function generates a key from a value, a don't care mask and a never
 648 * match mask.
 649 * upd, dc, and nm are optional parameters, and can be NULL:
 650 *	upd == NULL --> upd mask is all 1's (update all bits)
 651 *	dc == NULL --> dc mask is all 0's (no don't care bits)
 652 *	nm == NULL --> nm mask is all 0's (no never match bits)
 653 */
 654static enum ice_status
 655ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
 656	    u16 len)
 657{
 658	u16 half_size;
 659	u16 i;
 660
 661	/* size must be a multiple of 2 bytes. */
 662	if (size % 2)
 663		return ICE_ERR_CFG;
 664
 665	half_size = size / 2;
 666	if (off + len > half_size)
 667		return ICE_ERR_CFG;
 668
 669	/* Make sure at most one bit is set in the never match mask. Having more
 670	 * than one never match mask bit set will cause HW to consume excessive
 671	 * power otherwise; this is a power management efficiency check.
 672	 */
 673#define ICE_NVR_MTCH_BITS_MAX	1
 674	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
 675		return ICE_ERR_CFG;
 676
 677	for (i = 0; i < len; i++)
 678		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
 679				     dc ? dc[i] : 0, nm ? nm[i] : 0,
 680				     key + off + i, key + half_size + off + i))
 681			return ICE_ERR_CFG;
 682
 683	return 0;
 684}
 685
 686/**
 687 * ice_acquire_global_cfg_lock
 688 * @hw: pointer to the HW structure
 689 * @access: access type (read or write)
 690 *
 691 * This function will request ownership of the global config lock for reading
 692 * or writing of the package. When attempting to obtain write access, the
 693 * caller must check for the following two return values:
 694 *
 695 * ICE_SUCCESS        - Means the caller has acquired the global config lock
 696 *                      and can perform writing of the package.
 697 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
 698 *                      package or has found that no update was necessary; in
 699 *                      this case, the caller can just skip performing any
 700 *                      update of the package.
 701 */
 702static enum ice_status
 703ice_acquire_global_cfg_lock(struct ice_hw *hw,
 704			    enum ice_aq_res_access_type access)
 705{
 706	enum ice_status status;
 707
 708	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
 709				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
 710
 711	if (!status)
 712		mutex_lock(&ice_global_cfg_lock_sw);
 713	else if (status == ICE_ERR_AQ_NO_WORK)
 714		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
 715
 716	return status;
 717}
 718
 719/**
 720 * ice_release_global_cfg_lock
 721 * @hw: pointer to the HW structure
 722 *
 723 * This function will release the global config lock.
 724 */
 725static void ice_release_global_cfg_lock(struct ice_hw *hw)
 726{
 727	mutex_unlock(&ice_global_cfg_lock_sw);
 728	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
 729}
 730
 731/**
 732 * ice_acquire_change_lock
 733 * @hw: pointer to the HW structure
 734 * @access: access type (read or write)
 735 *
 736 * This function will request ownership of the change lock.
 737 */
 738static enum ice_status
 739ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
 740{
 741	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
 742			       ICE_CHANGE_LOCK_TIMEOUT);
 743}
 744
 745/**
 746 * ice_release_change_lock
 747 * @hw: pointer to the HW structure
 748 *
 749 * This function will release the change lock using the proper Admin Command.
 750 */
 751static void ice_release_change_lock(struct ice_hw *hw)
 752{
 753	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
 754}
 755
 756/**
 757 * ice_aq_download_pkg
 758 * @hw: pointer to the hardware structure
 759 * @pkg_buf: the package buffer to transfer
 760 * @buf_size: the size of the package buffer
 761 * @last_buf: last buffer indicator
 762 * @error_offset: returns error offset
 763 * @error_info: returns error information
 764 * @cd: pointer to command details structure or NULL
 765 *
 766 * Download Package (0x0C40)
 767 */
 768static enum ice_status
 769ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
 770		    u16 buf_size, bool last_buf, u32 *error_offset,
 771		    u32 *error_info, struct ice_sq_cd *cd)
 772{
 773	struct ice_aqc_download_pkg *cmd;
 774	struct ice_aq_desc desc;
 775	enum ice_status status;
 776
 777	if (error_offset)
 778		*error_offset = 0;
 779	if (error_info)
 780		*error_info = 0;
 781
 782	cmd = &desc.params.download_pkg;
 783	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
 784	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 785
 786	if (last_buf)
 787		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
 788
 789	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
 790	if (status == ICE_ERR_AQ_ERROR) {
 791		/* Read error from buffer only when the FW returned an error */
 792		struct ice_aqc_download_pkg_resp *resp;
 793
 794		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
 795		if (error_offset)
 796			*error_offset = le32_to_cpu(resp->error_offset);
 797		if (error_info)
 798			*error_info = le32_to_cpu(resp->error_info);
 799	}
 800
 801	return status;
 802}
 803
 804/**
 805 * ice_aq_update_pkg
 806 * @hw: pointer to the hardware structure
 807 * @pkg_buf: the package cmd buffer
 808 * @buf_size: the size of the package cmd buffer
 809 * @last_buf: last buffer indicator
 810 * @error_offset: returns error offset
 811 * @error_info: returns error information
 812 * @cd: pointer to command details structure or NULL
 813 *
 814 * Update Package (0x0C42)
 815 */
 816static enum ice_status
 817ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
 818		  bool last_buf, u32 *error_offset, u32 *error_info,
 819		  struct ice_sq_cd *cd)
 820{
 821	struct ice_aqc_download_pkg *cmd;
 822	struct ice_aq_desc desc;
 823	enum ice_status status;
 824
 825	if (error_offset)
 826		*error_offset = 0;
 827	if (error_info)
 828		*error_info = 0;
 829
 830	cmd = &desc.params.download_pkg;
 831	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
 832	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 833
 834	if (last_buf)
 835		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
 836
 837	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
 838	if (status == ICE_ERR_AQ_ERROR) {
 839		/* Read error from buffer only when the FW returned an error */
 840		struct ice_aqc_download_pkg_resp *resp;
 841
 842		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
 843		if (error_offset)
 844			*error_offset = le32_to_cpu(resp->error_offset);
 845		if (error_info)
 846			*error_info = le32_to_cpu(resp->error_info);
 847	}
 848
 849	return status;
 850}
 851
 852/**
 853 * ice_find_seg_in_pkg
 854 * @hw: pointer to the hardware structure
 855 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
 856 * @pkg_hdr: pointer to the package header to be searched
 857 *
 858 * This function searches a package file for a particular segment type. On
 859 * success it returns a pointer to the segment header, otherwise it will
 860 * return NULL.
 861 */
 862static struct ice_generic_seg_hdr *
 863ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
 864		    struct ice_pkg_hdr *pkg_hdr)
 865{
 866	u32 i;
 
 867
 868	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
 869		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
 870		  pkg_hdr->pkg_format_ver.update,
 871		  pkg_hdr->pkg_format_ver.draft);
 872
 873	/* Search all package segments for the requested segment type */
 874	for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
 875		struct ice_generic_seg_hdr *seg;
 876
 877		seg = (struct ice_generic_seg_hdr *)
 878			((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
 879
 880		if (le32_to_cpu(seg->seg_type) == seg_type)
 881			return seg;
 882	}
 883
 884	return NULL;
 885}
 886
 887/**
 888 * ice_update_pkg
 889 * @hw: pointer to the hardware structure
 890 * @bufs: pointer to an array of buffers
 891 * @count: the number of buffers in the array
 892 *
 893 * Obtains change lock and updates package.
 894 */
 895static enum ice_status
 896ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
 897{
 898	enum ice_status status;
 899	u32 offset, info, i;
 900
 901	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
 902	if (status)
 903		return status;
 904
 905	for (i = 0; i < count; i++) {
 906		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
 907		bool last = ((i + 1) == count);
 908
 909		status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
 910					   last, &offset, &info, NULL);
 911
 912		if (status) {
 913			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
 914				  status, offset, info);
 915			break;
 916		}
 917	}
 918
 919	ice_release_change_lock(hw);
 920
 921	return status;
 922}
 923
 924/**
 925 * ice_dwnld_cfg_bufs
 926 * @hw: pointer to the hardware structure
 927 * @bufs: pointer to an array of buffers
 928 * @count: the number of buffers in the array
 929 *
 930 * Obtains global config lock and downloads the package configuration buffers
 931 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
 932 * found indicates that the rest of the buffers are all metadata buffers.
 933 */
 934static enum ice_status
 935ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
 936{
 937	enum ice_status status;
 938	struct ice_buf_hdr *bh;
 939	u32 offset, info, i;
 940
 941	if (!bufs || !count)
 942		return ICE_ERR_PARAM;
 943
 944	/* If the first buffer's first section has its metadata bit set
 945	 * then there are no buffers to be downloaded, and the operation is
 946	 * considered a success.
 947	 */
 948	bh = (struct ice_buf_hdr *)bufs;
 949	if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
 950		return 0;
 951
 952	/* reset pkg_dwnld_status in case this function is called in the
 953	 * reset/rebuild flow
 954	 */
 955	hw->pkg_dwnld_status = ICE_AQ_RC_OK;
 956
 957	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
 958	if (status) {
 959		if (status == ICE_ERR_AQ_NO_WORK)
 960			hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
 961		else
 962			hw->pkg_dwnld_status = hw->adminq.sq_last_status;
 963		return status;
 964	}
 965
 966	for (i = 0; i < count; i++) {
 967		bool last = ((i + 1) == count);
 968
 969		if (!last) {
 970			/* check next buffer for metadata flag */
 971			bh = (struct ice_buf_hdr *)(bufs + i + 1);
 972
 973			/* A set metadata flag in the next buffer will signal
 974			 * that the current buffer will be the last buffer
 975			 * downloaded
 976			 */
 977			if (le16_to_cpu(bh->section_count))
 978				if (le32_to_cpu(bh->section_entry[0].type) &
 979				    ICE_METADATA_BUF)
 980					last = true;
 981		}
 982
 983		bh = (struct ice_buf_hdr *)(bufs + i);
 984
 985		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
 986					     &offset, &info, NULL);
 987
 988		/* Save AQ status from download package */
 989		hw->pkg_dwnld_status = hw->adminq.sq_last_status;
 990		if (status) {
 991			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
 992				  status, offset, info);
 993
 994			break;
 995		}
 996
 997		if (last)
 998			break;
 999	}
1000
1001	ice_release_global_cfg_lock(hw);
1002
1003	return status;
1004}
1005
1006/**
1007 * ice_aq_get_pkg_info_list
1008 * @hw: pointer to the hardware structure
1009 * @pkg_info: the buffer which will receive the information list
1010 * @buf_size: the size of the pkg_info information buffer
1011 * @cd: pointer to command details structure or NULL
1012 *
1013 * Get Package Info List (0x0C43)
1014 */
1015static enum ice_status
1016ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017			 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018			 u16 buf_size, struct ice_sq_cd *cd)
1019{
1020	struct ice_aq_desc desc;
 
 
 
1021
1022	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
 
 
1023
1024	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1025}
 
1026
1027/**
1028 * ice_download_pkg
1029 * @hw: pointer to the hardware structure
1030 * @ice_seg: pointer to the segment of the package to be downloaded
1031 *
1032 * Handles the download of a complete package.
1033 */
1034static enum ice_status
1035ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1036{
1037	struct ice_buf_table *ice_buf_tbl;
1038
1039	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040		  ice_seg->hdr.seg_format_ver.major,
1041		  ice_seg->hdr.seg_format_ver.minor,
1042		  ice_seg->hdr.seg_format_ver.update,
1043		  ice_seg->hdr.seg_format_ver.draft);
1044
1045	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046		  le32_to_cpu(ice_seg->hdr.seg_type),
1047		  le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1048
1049	ice_buf_tbl = ice_find_buf_table(ice_seg);
1050
1051	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052		  le32_to_cpu(ice_buf_tbl->buf_count));
1053
1054	return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055				  le32_to_cpu(ice_buf_tbl->buf_count));
1056}
1057
1058/**
1059 * ice_init_pkg_info
1060 * @hw: pointer to the hardware structure
1061 * @pkg_hdr: pointer to the driver's package hdr
1062 *
1063 * Saves off the package details into the HW structure.
1064 */
1065static enum ice_status
1066ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067{
1068	struct ice_generic_seg_hdr *seg_hdr;
1069
1070	if (!pkg_hdr)
1071		return ICE_ERR_PARAM;
1072
1073	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1074	if (seg_hdr) {
1075		struct ice_meta_sect *meta;
1076		struct ice_pkg_enum state;
1077
1078		memset(&state, 0, sizeof(state));
1079
1080		/* Get package information from the Metadata Section */
1081		meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1082					    ICE_SID_METADATA);
1083		if (!meta) {
1084			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1085			return ICE_ERR_CFG;
1086		}
1087
1088		hw->pkg_ver = meta->ver;
1089		memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1090
1091		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1092			  meta->ver.major, meta->ver.minor, meta->ver.update,
1093			  meta->ver.draft, meta->name);
1094
1095		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1096		memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1097		       sizeof(hw->ice_seg_id));
1098
1099		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1100			  seg_hdr->seg_format_ver.major,
1101			  seg_hdr->seg_format_ver.minor,
1102			  seg_hdr->seg_format_ver.update,
1103			  seg_hdr->seg_format_ver.draft,
1104			  seg_hdr->seg_id);
1105	} else {
1106		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1107		return ICE_ERR_CFG;
 
 
1108	}
1109
1110	return 0;
1111}
1112
1113/**
1114 * ice_get_pkg_info
1115 * @hw: pointer to the hardware structure
1116 *
1117 * Store details of the package currently loaded in HW into the HW structure.
1118 */
1119static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1120{
1121	struct ice_aqc_get_pkg_info_resp *pkg_info;
1122	enum ice_status status;
1123	u16 size;
1124	u32 i;
1125
1126	size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1127	pkg_info = kzalloc(size, GFP_KERNEL);
1128	if (!pkg_info)
1129		return ICE_ERR_NO_MEMORY;
1130
1131	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1132	if (status)
1133		goto init_pkg_free_alloc;
1134
1135	for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1136#define ICE_PKG_FLAG_COUNT	4
1137		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1138		u8 place = 0;
1139
1140		if (pkg_info->pkg_info[i].is_active) {
1141			flags[place++] = 'A';
1142			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1143			hw->active_track_id =
1144				le32_to_cpu(pkg_info->pkg_info[i].track_id);
1145			memcpy(hw->active_pkg_name,
1146			       pkg_info->pkg_info[i].name,
1147			       sizeof(pkg_info->pkg_info[i].name));
1148			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1149		}
1150		if (pkg_info->pkg_info[i].is_active_at_boot)
1151			flags[place++] = 'B';
1152		if (pkg_info->pkg_info[i].is_modified)
1153			flags[place++] = 'M';
1154		if (pkg_info->pkg_info[i].is_in_nvm)
1155			flags[place++] = 'N';
1156
1157		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1158			  i, pkg_info->pkg_info[i].ver.major,
1159			  pkg_info->pkg_info[i].ver.minor,
1160			  pkg_info->pkg_info[i].ver.update,
1161			  pkg_info->pkg_info[i].ver.draft,
1162			  pkg_info->pkg_info[i].name, flags);
1163	}
1164
1165init_pkg_free_alloc:
1166	kfree(pkg_info);
1167
1168	return status;
1169}
1170
1171/**
1172 * ice_verify_pkg - verify package
1173 * @pkg: pointer to the package buffer
1174 * @len: size of the package buffer
1175 *
1176 * Verifies various attributes of the package file, including length, format
1177 * version, and the requirement of at least one segment.
1178 */
1179static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1180{
1181	u32 seg_count;
1182	u32 i;
1183
1184	if (len < struct_size(pkg, seg_offset, 1))
1185		return ICE_ERR_BUF_TOO_SHORT;
1186
1187	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1188	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1189	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1190	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1191		return ICE_ERR_CFG;
1192
1193	/* pkg must have at least one segment */
1194	seg_count = le32_to_cpu(pkg->seg_count);
1195	if (seg_count < 1)
1196		return ICE_ERR_CFG;
1197
1198	/* make sure segment array fits in package length */
1199	if (len < struct_size(pkg, seg_offset, seg_count))
1200		return ICE_ERR_BUF_TOO_SHORT;
1201
1202	/* all segments must fit within length */
1203	for (i = 0; i < seg_count; i++) {
1204		u32 off = le32_to_cpu(pkg->seg_offset[i]);
1205		struct ice_generic_seg_hdr *seg;
1206
1207		/* segment header must fit */
1208		if (len < off + sizeof(*seg))
1209			return ICE_ERR_BUF_TOO_SHORT;
1210
1211		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1212
1213		/* segment body must fit */
1214		if (len < off + le32_to_cpu(seg->seg_size))
1215			return ICE_ERR_BUF_TOO_SHORT;
1216	}
1217
1218	return 0;
1219}
1220
1221/**
1222 * ice_free_seg - free package segment pointer
1223 * @hw: pointer to the hardware structure
1224 *
1225 * Frees the package segment pointer in the proper manner, depending on if the
1226 * segment was allocated or just the passed in pointer was stored.
1227 */
1228void ice_free_seg(struct ice_hw *hw)
1229{
1230	if (hw->pkg_copy) {
1231		devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1232		hw->pkg_copy = NULL;
1233		hw->pkg_size = 0;
1234	}
1235	hw->seg = NULL;
1236}
1237
1238/**
1239 * ice_init_pkg_regs - initialize additional package registers
1240 * @hw: pointer to the hardware structure
1241 */
1242static void ice_init_pkg_regs(struct ice_hw *hw)
1243{
1244#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1245#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1246#define ICE_SW_BLK_IDX	0
1247
1248	/* setup Switch block input mask, which is 48-bits in two parts */
1249	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1250	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1251}
1252
1253/**
1254 * ice_chk_pkg_version - check package version for compatibility with driver
1255 * @pkg_ver: pointer to a version structure to check
1256 *
1257 * Check to make sure that the package about to be downloaded is compatible with
1258 * the driver. To be compatible, the major and minor components of the package
1259 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1260 * definitions.
1261 */
1262static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1263{
1264	if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1265	    pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1266		return ICE_ERR_NOT_SUPPORTED;
1267
1268	return 0;
1269}
1270
1271/**
1272 * ice_chk_pkg_compat
1273 * @hw: pointer to the hardware structure
1274 * @ospkg: pointer to the package hdr
1275 * @seg: pointer to the package segment hdr
1276 *
1277 * This function checks the package version compatibility with driver and NVM
1278 */
1279static enum ice_status
1280ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1281		   struct ice_seg **seg)
1282{
1283	struct ice_aqc_get_pkg_info_resp *pkg;
1284	enum ice_status status;
1285	u16 size;
1286	u32 i;
1287
1288	/* Check package version compatibility */
1289	status = ice_chk_pkg_version(&hw->pkg_ver);
1290	if (status) {
1291		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1292		return status;
1293	}
1294
1295	/* find ICE segment in given package */
1296	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1297						     ospkg);
1298	if (!*seg) {
1299		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1300		return ICE_ERR_CFG;
1301	}
1302
1303	/* Check if FW is compatible with the OS package */
1304	size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1305	pkg = kzalloc(size, GFP_KERNEL);
1306	if (!pkg)
1307		return ICE_ERR_NO_MEMORY;
1308
1309	status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1310	if (status)
1311		goto fw_ddp_compat_free_alloc;
1312
1313	for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1314		/* loop till we find the NVM package */
1315		if (!pkg->pkg_info[i].is_in_nvm)
1316			continue;
1317		if ((*seg)->hdr.seg_format_ver.major !=
1318			pkg->pkg_info[i].ver.major ||
1319		    (*seg)->hdr.seg_format_ver.minor >
1320			pkg->pkg_info[i].ver.minor) {
1321			status = ICE_ERR_FW_DDP_MISMATCH;
1322			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1323		}
1324		/* done processing NVM package so break */
1325		break;
1326	}
1327fw_ddp_compat_free_alloc:
1328	kfree(pkg);
1329	return status;
1330}
1331
1332/**
1333 * ice_init_pkg - initialize/download package
1334 * @hw: pointer to the hardware structure
1335 * @buf: pointer to the package buffer
1336 * @len: size of the package buffer
1337 *
1338 * This function initializes a package. The package contains HW tables
1339 * required to do packet processing. First, the function extracts package
1340 * information such as version. Then it finds the ice configuration segment
1341 * within the package; this function then saves a copy of the segment pointer
1342 * within the supplied package buffer. Next, the function will cache any hints
1343 * from the package, followed by downloading the package itself. Note, that if
1344 * a previous PF driver has already downloaded the package successfully, then
1345 * the current driver will not have to download the package again.
1346 *
1347 * The local package contents will be used to query default behavior and to
1348 * update specific sections of the HW's version of the package (e.g. to update
1349 * the parse graph to understand new protocols).
1350 *
1351 * This function stores a pointer to the package buffer memory, and it is
1352 * expected that the supplied buffer will not be freed immediately. If the
1353 * package buffer needs to be freed, such as when read from a file, use
1354 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1355 * case.
1356 */
1357enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1358{
1359	struct ice_pkg_hdr *pkg;
1360	enum ice_status status;
1361	struct ice_seg *seg;
1362
1363	if (!buf || !len)
1364		return ICE_ERR_PARAM;
1365
1366	pkg = (struct ice_pkg_hdr *)buf;
1367	status = ice_verify_pkg(pkg, len);
1368	if (status) {
1369		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1370			  status);
1371		return status;
1372	}
1373
1374	/* initialize package info */
1375	status = ice_init_pkg_info(hw, pkg);
1376	if (status)
1377		return status;
1378
1379	/* before downloading the package, check package version for
1380	 * compatibility with driver
1381	 */
1382	status = ice_chk_pkg_compat(hw, pkg, &seg);
1383	if (status)
1384		return status;
1385
1386	/* initialize package hints and then download package */
1387	ice_init_pkg_hints(hw, seg);
1388	status = ice_download_pkg(hw, seg);
1389	if (status == ICE_ERR_AQ_NO_WORK) {
1390		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1391		status = 0;
1392	}
1393
1394	/* Get information on the package currently loaded in HW, then make sure
1395	 * the driver is compatible with this version.
1396	 */
1397	if (!status) {
1398		status = ice_get_pkg_info(hw);
1399		if (!status)
1400			status = ice_chk_pkg_version(&hw->active_pkg_ver);
1401	}
1402
1403	if (!status) {
1404		hw->seg = seg;
1405		/* on successful package download update other required
1406		 * registers to support the package and fill HW tables
1407		 * with package content.
1408		 */
1409		ice_init_pkg_regs(hw);
1410		ice_fill_blk_tbls(hw);
1411	} else {
1412		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1413			  status);
1414	}
1415
1416	return status;
1417}
1418
1419/**
1420 * ice_copy_and_init_pkg - initialize/download a copy of the package
1421 * @hw: pointer to the hardware structure
1422 * @buf: pointer to the package buffer
1423 * @len: size of the package buffer
1424 *
1425 * This function copies the package buffer, and then calls ice_init_pkg() to
1426 * initialize the copied package contents.
1427 *
1428 * The copying is necessary if the package buffer supplied is constant, or if
1429 * the memory may disappear shortly after calling this function.
1430 *
1431 * If the package buffer resides in the data segment and can be modified, the
1432 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1433 *
1434 * However, if the package buffer needs to be copied first, such as when being
1435 * read from a file, the caller should use ice_copy_and_init_pkg().
1436 *
1437 * This function will first copy the package buffer, before calling
1438 * ice_init_pkg(). The caller is free to immediately destroy the original
1439 * package buffer, as the new copy will be managed by this function and
1440 * related routines.
1441 */
1442enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1443{
1444	enum ice_status status;
1445	u8 *buf_copy;
1446
1447	if (!buf || !len)
1448		return ICE_ERR_PARAM;
1449
1450	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1451
1452	status = ice_init_pkg(hw, buf_copy, len);
1453	if (status) {
1454		/* Free the copy, since we failed to initialize the package */
1455		devm_kfree(ice_hw_to_dev(hw), buf_copy);
1456	} else {
1457		/* Track the copied pkg so we can free it later */
1458		hw->pkg_copy = buf_copy;
1459		hw->pkg_size = len;
1460	}
1461
1462	return status;
1463}
1464
1465/**
1466 * ice_pkg_buf_alloc
1467 * @hw: pointer to the HW structure
1468 *
1469 * Allocates a package buffer and returns a pointer to the buffer header.
1470 * Note: all package contents must be in Little Endian form.
1471 */
1472static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1473{
1474	struct ice_buf_build *bld;
1475	struct ice_buf_hdr *buf;
1476
1477	bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1478	if (!bld)
1479		return NULL;
1480
1481	buf = (struct ice_buf_hdr *)bld;
1482	buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1483					     section_entry));
1484	return bld;
1485}
1486
1487/**
1488 * ice_pkg_buf_free
1489 * @hw: pointer to the HW structure
1490 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1491 *
1492 * Frees a package buffer
1493 */
1494static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1495{
1496	devm_kfree(ice_hw_to_dev(hw), bld);
1497}
1498
1499/**
1500 * ice_pkg_buf_reserve_section
1501 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1502 * @count: the number of sections to reserve
1503 *
1504 * Reserves one or more section table entries in a package buffer. This routine
1505 * can be called multiple times as long as they are made before calling
1506 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1507 * is called once, the number of sections that can be allocated will not be able
1508 * to be increased; not using all reserved sections is fine, but this will
1509 * result in some wasted space in the buffer.
1510 * Note: all package contents must be in Little Endian form.
1511 */
1512static enum ice_status
1513ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1514{
1515	struct ice_buf_hdr *buf;
1516	u16 section_count;
1517	u16 data_end;
1518
1519	if (!bld)
1520		return ICE_ERR_PARAM;
1521
1522	buf = (struct ice_buf_hdr *)&bld->buf;
1523
1524	/* already an active section, can't increase table size */
1525	section_count = le16_to_cpu(buf->section_count);
1526	if (section_count > 0)
1527		return ICE_ERR_CFG;
1528
1529	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1530		return ICE_ERR_CFG;
1531	bld->reserved_section_table_entries += count;
1532
1533	data_end = le16_to_cpu(buf->data_end) +
1534		flex_array_size(buf, section_entry, count);
1535	buf->data_end = cpu_to_le16(data_end);
1536
1537	return 0;
1538}
1539
1540/**
1541 * ice_pkg_buf_alloc_section
1542 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1543 * @type: the section type value
1544 * @size: the size of the section to reserve (in bytes)
1545 *
1546 * Reserves memory in the buffer for a section's content and updates the
1547 * buffers' status accordingly. This routine returns a pointer to the first
1548 * byte of the section start within the buffer, which is used to fill in the
1549 * section contents.
1550 * Note: all package contents must be in Little Endian form.
1551 */
1552static void *
1553ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1554{
1555	struct ice_buf_hdr *buf;
1556	u16 sect_count;
1557	u16 data_end;
1558
1559	if (!bld || !type || !size)
1560		return NULL;
1561
1562	buf = (struct ice_buf_hdr *)&bld->buf;
1563
1564	/* check for enough space left in buffer */
1565	data_end = le16_to_cpu(buf->data_end);
1566
1567	/* section start must align on 4 byte boundary */
1568	data_end = ALIGN(data_end, 4);
1569
1570	if ((data_end + size) > ICE_MAX_S_DATA_END)
1571		return NULL;
1572
1573	/* check for more available section table entries */
1574	sect_count = le16_to_cpu(buf->section_count);
1575	if (sect_count < bld->reserved_section_table_entries) {
1576		void *section_ptr = ((u8 *)buf) + data_end;
1577
1578		buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1579		buf->section_entry[sect_count].size = cpu_to_le16(size);
1580		buf->section_entry[sect_count].type = cpu_to_le32(type);
1581
1582		data_end += size;
1583		buf->data_end = cpu_to_le16(data_end);
1584
1585		buf->section_count = cpu_to_le16(sect_count + 1);
1586		return section_ptr;
1587	}
1588
1589	/* no free section table entries */
1590	return NULL;
1591}
1592
1593/**
1594 * ice_pkg_buf_get_active_sections
1595 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1596 *
1597 * Returns the number of active sections. Before using the package buffer
1598 * in an update package command, the caller should make sure that there is at
1599 * least one active section - otherwise, the buffer is not legal and should
1600 * not be used.
1601 * Note: all package contents must be in Little Endian form.
1602 */
1603static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1604{
1605	struct ice_buf_hdr *buf;
1606
1607	if (!bld)
1608		return 0;
1609
1610	buf = (struct ice_buf_hdr *)&bld->buf;
1611	return le16_to_cpu(buf->section_count);
1612}
1613
1614/**
1615 * ice_pkg_buf
1616 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1617 *
1618 * Return a pointer to the buffer's header
1619 */
1620static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1621{
1622	if (!bld)
1623		return NULL;
1624
1625	return &bld->buf;
1626}
1627
1628/**
1629 * ice_get_open_tunnel_port - retrieve an open tunnel port
1630 * @hw: pointer to the HW structure
1631 * @port: returns open port
1632 */
1633bool
1634ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
1635{
1636	bool res = false;
1637	u16 i;
1638
1639	mutex_lock(&hw->tnl_lock);
1640
1641	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1642		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
1643			*port = hw->tnl.tbl[i].port;
1644			res = true;
1645			break;
1646		}
1647
1648	mutex_unlock(&hw->tnl_lock);
1649
1650	return res;
1651}
1652
1653/**
1654 * ice_tunnel_idx_to_entry - convert linear index to the sparse one
1655 * @hw: pointer to the HW structure
1656 * @type: type of tunnel
1657 * @idx: linear index
1658 *
1659 * Stack assumes we have 2 linear tables with indexes [0, count_valid),
1660 * but really the port table may be sprase, and types are mixed, so convert
1661 * the stack index into the device index.
1662 */
1663static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1664				   u16 idx)
1665{
1666	u16 i;
1667
1668	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1669		if (hw->tnl.tbl[i].valid &&
1670		    hw->tnl.tbl[i].type == type &&
1671		    idx-- == 0)
1672			return i;
1673
1674	WARN_ON_ONCE(1);
1675	return 0;
1676}
1677
1678/**
1679 * ice_create_tunnel
1680 * @hw: pointer to the HW structure
1681 * @index: device table entry
1682 * @type: type of tunnel
1683 * @port: port of tunnel to create
1684 *
1685 * Create a tunnel by updating the parse graph in the parser. We do that by
1686 * creating a package buffer with the tunnel info and issuing an update package
1687 * command.
1688 */
1689static enum ice_status
1690ice_create_tunnel(struct ice_hw *hw, u16 index,
1691		  enum ice_tunnel_type type, u16 port)
1692{
1693	struct ice_boost_tcam_section *sect_rx, *sect_tx;
1694	enum ice_status status = ICE_ERR_MAX_LIMIT;
1695	struct ice_buf_build *bld;
 
1696
1697	mutex_lock(&hw->tnl_lock);
1698
1699	bld = ice_pkg_buf_alloc(hw);
1700	if (!bld) {
1701		status = ICE_ERR_NO_MEMORY;
1702		goto ice_create_tunnel_end;
1703	}
1704
1705	/* allocate 2 sections, one for Rx parser, one for Tx parser */
1706	if (ice_pkg_buf_reserve_section(bld, 2))
1707		goto ice_create_tunnel_err;
1708
1709	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1710					    struct_size(sect_rx, tcam, 1));
1711	if (!sect_rx)
1712		goto ice_create_tunnel_err;
1713	sect_rx->count = cpu_to_le16(1);
1714
1715	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1716					    struct_size(sect_tx, tcam, 1));
1717	if (!sect_tx)
1718		goto ice_create_tunnel_err;
1719	sect_tx->count = cpu_to_le16(1);
1720
1721	/* copy original boost entry to update package buffer */
1722	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1723	       sizeof(*sect_rx->tcam));
1724
1725	/* over-write the never-match dest port key bits with the encoded port
1726	 * bits
1727	 */
1728	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1729		    (u8 *)&port, NULL, NULL, NULL,
1730		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1731		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1732
1733	/* exact copy of entry to Tx section entry */
1734	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1735
1736	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1737	if (!status)
1738		hw->tnl.tbl[index].port = port;
1739
1740ice_create_tunnel_err:
1741	ice_pkg_buf_free(hw, bld);
1742
1743ice_create_tunnel_end:
1744	mutex_unlock(&hw->tnl_lock);
1745
1746	return status;
1747}
1748
1749/**
1750 * ice_destroy_tunnel
1751 * @hw: pointer to the HW structure
1752 * @index: device table entry
1753 * @type: type of tunnel
1754 * @port: port of tunnel to destroy (ignored if the all parameter is true)
1755 *
1756 * Destroys a tunnel or all tunnels by creating an update package buffer
1757 * targeting the specific updates requested and then performing an update
1758 * package.
1759 */
1760static enum ice_status
1761ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
1762		   u16 port)
1763{
1764	struct ice_boost_tcam_section *sect_rx, *sect_tx;
1765	enum ice_status status = ICE_ERR_MAX_LIMIT;
1766	struct ice_buf_build *bld;
 
1767
1768	mutex_lock(&hw->tnl_lock);
1769
1770	if (WARN_ON(!hw->tnl.tbl[index].valid ||
1771		    hw->tnl.tbl[index].type != type ||
1772		    hw->tnl.tbl[index].port != port)) {
1773		status = ICE_ERR_OUT_OF_RANGE;
1774		goto ice_destroy_tunnel_end;
1775	}
1776
1777	bld = ice_pkg_buf_alloc(hw);
1778	if (!bld) {
1779		status = ICE_ERR_NO_MEMORY;
1780		goto ice_destroy_tunnel_end;
1781	}
1782
1783	/* allocate 2 sections, one for Rx parser, one for Tx parser */
1784	if (ice_pkg_buf_reserve_section(bld, 2))
1785		goto ice_destroy_tunnel_err;
1786
1787	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1788					    struct_size(sect_rx, tcam, 1));
1789	if (!sect_rx)
1790		goto ice_destroy_tunnel_err;
1791	sect_rx->count = cpu_to_le16(1);
1792
1793	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1794					    struct_size(sect_tx, tcam, 1));
1795	if (!sect_tx)
1796		goto ice_destroy_tunnel_err;
1797	sect_tx->count = cpu_to_le16(1);
1798
1799	/* copy original boost entry to update package buffer, one copy to Rx
1800	 * section, another copy to the Tx section
1801	 */
1802	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1803	       sizeof(*sect_rx->tcam));
1804	memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
1805	       sizeof(*sect_tx->tcam));
1806
1807	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1808	if (!status)
1809		hw->tnl.tbl[index].port = 0;
1810
1811ice_destroy_tunnel_err:
1812	ice_pkg_buf_free(hw, bld);
1813
1814ice_destroy_tunnel_end:
1815	mutex_unlock(&hw->tnl_lock);
1816
1817	return status;
1818}
1819
1820int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
1821			    unsigned int idx, struct udp_tunnel_info *ti)
1822{
1823	struct ice_netdev_priv *np = netdev_priv(netdev);
1824	struct ice_vsi *vsi = np->vsi;
1825	struct ice_pf *pf = vsi->back;
1826	enum ice_tunnel_type tnl_type;
1827	enum ice_status status;
1828	u16 index;
1829
1830	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1831	index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
1832
1833	status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
1834	if (status) {
1835		netdev_err(netdev, "Error adding UDP tunnel - %s\n",
1836			   ice_stat_str(status));
1837		return -EIO;
1838	}
1839
1840	udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
1841	return 0;
1842}
1843
1844int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
1845			      unsigned int idx, struct udp_tunnel_info *ti)
1846{
1847	struct ice_netdev_priv *np = netdev_priv(netdev);
1848	struct ice_vsi *vsi = np->vsi;
1849	struct ice_pf *pf = vsi->back;
1850	enum ice_tunnel_type tnl_type;
1851	enum ice_status status;
1852
1853	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1854
1855	status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
1856				    ntohs(ti->port));
1857	if (status) {
1858		netdev_err(netdev, "Error removing UDP tunnel - %s\n",
1859			   ice_stat_str(status));
1860		return -EIO;
1861	}
1862
1863	return 0;
1864}
1865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1866/* PTG Management */
1867
1868/**
1869 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
1870 * @hw: pointer to the hardware structure
1871 * @blk: HW block
1872 * @ptype: the ptype to search for
1873 * @ptg: pointer to variable that receives the PTG
1874 *
1875 * This function will search the PTGs for a particular ptype, returning the
1876 * PTG ID that contains it through the PTG parameter, with the value of
1877 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
1878 */
1879static enum ice_status
1880ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1881{
1882	if (ptype >= ICE_XLT1_CNT || !ptg)
1883		return ICE_ERR_PARAM;
1884
1885	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1886	return 0;
1887}
1888
1889/**
1890 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
1891 * @hw: pointer to the hardware structure
1892 * @blk: HW block
1893 * @ptg: the PTG to allocate
1894 *
1895 * This function allocates a given packet type group ID specified by the PTG
1896 * parameter.
1897 */
1898static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
1899{
1900	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
1901}
1902
1903/**
1904 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
1905 * @hw: pointer to the hardware structure
1906 * @blk: HW block
1907 * @ptype: the ptype to remove
1908 * @ptg: the PTG to remove the ptype from
1909 *
1910 * This function will remove the ptype from the specific PTG, and move it to
1911 * the default PTG (ICE_DEFAULT_PTG).
1912 */
1913static enum ice_status
1914ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1915{
1916	struct ice_ptg_ptype **ch;
1917	struct ice_ptg_ptype *p;
1918
1919	if (ptype > ICE_XLT1_CNT - 1)
1920		return ICE_ERR_PARAM;
1921
1922	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
1923		return ICE_ERR_DOES_NOT_EXIST;
1924
1925	/* Should not happen if .in_use is set, bad config */
1926	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
1927		return ICE_ERR_CFG;
1928
1929	/* find the ptype within this PTG, and bypass the link over it */
1930	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1931	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1932	while (p) {
1933		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
1934			*ch = p->next_ptype;
1935			break;
1936		}
1937
1938		ch = &p->next_ptype;
1939		p = p->next_ptype;
1940	}
1941
1942	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
1943	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
1944
1945	return 0;
1946}
1947
1948/**
1949 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
1950 * @hw: pointer to the hardware structure
1951 * @blk: HW block
1952 * @ptype: the ptype to add or move
1953 * @ptg: the PTG to add or move the ptype to
1954 *
1955 * This function will either add or move a ptype to a particular PTG depending
1956 * on if the ptype is already part of another group. Note that using a
1957 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
1958 * default PTG.
1959 */
1960static enum ice_status
1961ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1962{
1963	enum ice_status status;
1964	u8 original_ptg;
 
1965
1966	if (ptype > ICE_XLT1_CNT - 1)
1967		return ICE_ERR_PARAM;
1968
1969	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
1970		return ICE_ERR_DOES_NOT_EXIST;
1971
1972	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
1973	if (status)
1974		return status;
1975
1976	/* Is ptype already in the correct PTG? */
1977	if (original_ptg == ptg)
1978		return 0;
1979
1980	/* Remove from original PTG and move back to the default PTG */
1981	if (original_ptg != ICE_DEFAULT_PTG)
1982		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
1983
1984	/* Moving to default PTG? Then we're done with this request */
1985	if (ptg == ICE_DEFAULT_PTG)
1986		return 0;
1987
1988	/* Add ptype to PTG at beginning of list */
1989	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
1990		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1991	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
1992		&hw->blk[blk].xlt1.ptypes[ptype];
1993
1994	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
1995	hw->blk[blk].xlt1.t[ptype] = ptg;
1996
1997	return 0;
1998}
1999
2000/* Block / table size info */
2001struct ice_blk_size_details {
2002	u16 xlt1;			/* # XLT1 entries */
2003	u16 xlt2;			/* # XLT2 entries */
2004	u16 prof_tcam;			/* # profile ID TCAM entries */
2005	u16 prof_id;			/* # profile IDs */
2006	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
2007	u16 prof_redir;			/* # profile redirection entries */
2008	u16 es;				/* # extraction sequence entries */
2009	u16 fvw;			/* # field vector words */
2010	u8 overwrite;			/* overwrite existing entries allowed */
2011	u8 reverse;			/* reverse FV order */
2012};
2013
2014static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2015	/**
2016	 * Table Definitions
2017	 * XLT1 - Number of entries in XLT1 table
2018	 * XLT2 - Number of entries in XLT2 table
2019	 * TCAM - Number of entries Profile ID TCAM table
2020	 * CDID - Control Domain ID of the hardware block
2021	 * PRED - Number of entries in the Profile Redirection Table
2022	 * FV   - Number of entries in the Field Vector
2023	 * FVW  - Width (in WORDs) of the Field Vector
2024	 * OVR  - Overwrite existing table entries
2025	 * REV  - Reverse FV
2026	 */
2027	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
2028	/*          Overwrite   , Reverse FV */
2029	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
2030		    false, false },
2031	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
2032		    false, false },
2033	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2034		    false, true  },
2035	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2036		    true,  true  },
2037	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
2038		    false, false },
2039};
2040
2041enum ice_sid_all {
2042	ICE_SID_XLT1_OFF = 0,
2043	ICE_SID_XLT2_OFF,
2044	ICE_SID_PR_OFF,
2045	ICE_SID_PR_REDIR_OFF,
2046	ICE_SID_ES_OFF,
2047	ICE_SID_OFF_COUNT,
2048};
2049
2050/* Characteristic handling */
2051
2052/**
2053 * ice_match_prop_lst - determine if properties of two lists match
2054 * @list1: first properties list
2055 * @list2: second properties list
2056 *
2057 * Count, cookies and the order must match in order to be considered equivalent.
2058 */
2059static bool
2060ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2061{
2062	struct ice_vsig_prof *tmp1;
2063	struct ice_vsig_prof *tmp2;
2064	u16 chk_count = 0;
2065	u16 count = 0;
2066
2067	/* compare counts */
2068	list_for_each_entry(tmp1, list1, list)
2069		count++;
2070	list_for_each_entry(tmp2, list2, list)
2071		chk_count++;
2072	/* cppcheck-suppress knownConditionTrueFalse */
2073	if (!count || count != chk_count)
2074		return false;
2075
2076	tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2077	tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2078
2079	/* profile cookies must compare, and in the exact same order to take
2080	 * into account priority
2081	 */
2082	while (count--) {
2083		if (tmp2->profile_cookie != tmp1->profile_cookie)
2084			return false;
2085
2086		tmp1 = list_next_entry(tmp1, list);
2087		tmp2 = list_next_entry(tmp2, list);
2088	}
2089
2090	return true;
2091}
2092
2093/* VSIG Management */
2094
2095/**
2096 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2097 * @hw: pointer to the hardware structure
2098 * @blk: HW block
2099 * @vsi: VSI of interest
2100 * @vsig: pointer to receive the VSI group
2101 *
2102 * This function will lookup the VSI entry in the XLT2 list and return
2103 * the VSI group its associated with.
2104 */
2105static enum ice_status
2106ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2107{
2108	if (!vsig || vsi >= ICE_MAX_VSI)
2109		return ICE_ERR_PARAM;
2110
2111	/* As long as there's a default or valid VSIG associated with the input
2112	 * VSI, the functions returns a success. Any handling of VSIG will be
2113	 * done by the following add, update or remove functions.
2114	 */
2115	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2116
2117	return 0;
2118}
2119
2120/**
2121 * ice_vsig_alloc_val - allocate a new VSIG by value
2122 * @hw: pointer to the hardware structure
2123 * @blk: HW block
2124 * @vsig: the VSIG to allocate
2125 *
2126 * This function will allocate a given VSIG specified by the VSIG parameter.
2127 */
2128static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2129{
2130	u16 idx = vsig & ICE_VSIG_IDX_M;
2131
2132	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2133		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2134		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2135	}
2136
2137	return ICE_VSIG_VALUE(idx, hw->pf_id);
2138}
2139
2140/**
2141 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2142 * @hw: pointer to the hardware structure
2143 * @blk: HW block
2144 *
2145 * This function will iterate through the VSIG list and mark the first
2146 * unused entry for the new VSIG entry as used and return that value.
2147 */
2148static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2149{
2150	u16 i;
2151
2152	for (i = 1; i < ICE_MAX_VSIGS; i++)
2153		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2154			return ice_vsig_alloc_val(hw, blk, i);
2155
2156	return ICE_DEFAULT_VSIG;
2157}
2158
2159/**
2160 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2161 * @hw: pointer to the hardware structure
2162 * @blk: HW block
2163 * @chs: characteristic list
2164 * @vsig: returns the VSIG with the matching profiles, if found
2165 *
2166 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2167 * a group have the same characteristic set. To check if there exists a VSIG
2168 * which has the same characteristics as the input characteristics; this
2169 * function will iterate through the XLT2 list and return the VSIG that has a
2170 * matching configuration. In order to make sure that priorities are accounted
2171 * for, the list must match exactly, including the order in which the
2172 * characteristics are listed.
2173 */
2174static enum ice_status
2175ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2176			struct list_head *chs, u16 *vsig)
2177{
2178	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2179	u16 i;
2180
2181	for (i = 0; i < xlt2->count; i++)
2182		if (xlt2->vsig_tbl[i].in_use &&
2183		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2184			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2185			return 0;
2186		}
2187
2188	return ICE_ERR_DOES_NOT_EXIST;
2189}
2190
2191/**
2192 * ice_vsig_free - free VSI group
2193 * @hw: pointer to the hardware structure
2194 * @blk: HW block
2195 * @vsig: VSIG to remove
2196 *
2197 * The function will remove all VSIs associated with the input VSIG and move
2198 * them to the DEFAULT_VSIG and mark the VSIG available.
2199 */
2200static enum ice_status
2201ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2202{
2203	struct ice_vsig_prof *dtmp, *del;
2204	struct ice_vsig_vsi *vsi_cur;
2205	u16 idx;
2206
2207	idx = vsig & ICE_VSIG_IDX_M;
2208	if (idx >= ICE_MAX_VSIGS)
2209		return ICE_ERR_PARAM;
2210
2211	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2212		return ICE_ERR_DOES_NOT_EXIST;
2213
2214	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2215
2216	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2217	/* If the VSIG has at least 1 VSI then iterate through the
2218	 * list and remove the VSIs before deleting the group.
2219	 */
2220	if (vsi_cur) {
2221		/* remove all vsis associated with this VSIG XLT2 entry */
2222		do {
2223			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2224
2225			vsi_cur->vsig = ICE_DEFAULT_VSIG;
2226			vsi_cur->changed = 1;
2227			vsi_cur->next_vsi = NULL;
2228			vsi_cur = tmp;
2229		} while (vsi_cur);
2230
2231		/* NULL terminate head of VSI list */
2232		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2233	}
2234
2235	/* free characteristic list */
2236	list_for_each_entry_safe(del, dtmp,
2237				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2238				 list) {
2239		list_del(&del->list);
2240		devm_kfree(ice_hw_to_dev(hw), del);
2241	}
2242
2243	/* if VSIG characteristic list was cleared for reset
2244	 * re-initialize the list head
2245	 */
2246	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2247
2248	return 0;
2249}
2250
2251/**
2252 * ice_vsig_remove_vsi - remove VSI from VSIG
2253 * @hw: pointer to the hardware structure
2254 * @blk: HW block
2255 * @vsi: VSI to remove
2256 * @vsig: VSI group to remove from
2257 *
2258 * The function will remove the input VSI from its VSI group and move it
2259 * to the DEFAULT_VSIG.
2260 */
2261static enum ice_status
2262ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2263{
2264	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2265	u16 idx;
2266
2267	idx = vsig & ICE_VSIG_IDX_M;
2268
2269	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2270		return ICE_ERR_PARAM;
2271
2272	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2273		return ICE_ERR_DOES_NOT_EXIST;
2274
2275	/* entry already in default VSIG, don't have to remove */
2276	if (idx == ICE_DEFAULT_VSIG)
2277		return 0;
2278
2279	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2280	if (!(*vsi_head))
2281		return ICE_ERR_CFG;
2282
2283	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2284	vsi_cur = (*vsi_head);
2285
2286	/* iterate the VSI list, skip over the entry to be removed */
2287	while (vsi_cur) {
2288		if (vsi_tgt == vsi_cur) {
2289			(*vsi_head) = vsi_cur->next_vsi;
2290			break;
2291		}
2292		vsi_head = &vsi_cur->next_vsi;
2293		vsi_cur = vsi_cur->next_vsi;
2294	}
2295
2296	/* verify if VSI was removed from group list */
2297	if (!vsi_cur)
2298		return ICE_ERR_DOES_NOT_EXIST;
2299
2300	vsi_cur->vsig = ICE_DEFAULT_VSIG;
2301	vsi_cur->changed = 1;
2302	vsi_cur->next_vsi = NULL;
2303
2304	return 0;
2305}
2306
2307/**
2308 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2309 * @hw: pointer to the hardware structure
2310 * @blk: HW block
2311 * @vsi: VSI to move
2312 * @vsig: destination VSI group
2313 *
2314 * This function will move or add the input VSI to the target VSIG.
2315 * The function will find the original VSIG the VSI belongs to and
2316 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2317 * then move entry to the new VSIG.
2318 */
2319static enum ice_status
2320ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2321{
2322	struct ice_vsig_vsi *tmp;
2323	enum ice_status status;
2324	u16 orig_vsig, idx;
 
2325
2326	idx = vsig & ICE_VSIG_IDX_M;
2327
2328	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2329		return ICE_ERR_PARAM;
2330
2331	/* if VSIG not in use and VSIG is not default type this VSIG
2332	 * doesn't exist.
2333	 */
2334	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2335	    vsig != ICE_DEFAULT_VSIG)
2336		return ICE_ERR_DOES_NOT_EXIST;
2337
2338	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2339	if (status)
2340		return status;
2341
2342	/* no update required if vsigs match */
2343	if (orig_vsig == vsig)
2344		return 0;
2345
2346	if (orig_vsig != ICE_DEFAULT_VSIG) {
2347		/* remove entry from orig_vsig and add to default VSIG */
2348		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2349		if (status)
2350			return status;
2351	}
2352
2353	if (idx == ICE_DEFAULT_VSIG)
2354		return 0;
2355
2356	/* Create VSI entry and add VSIG and prop_mask values */
2357	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2358	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2359
2360	/* Add new entry to the head of the VSIG list */
2361	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2362	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2363		&hw->blk[blk].xlt2.vsis[vsi];
2364	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2365	hw->blk[blk].xlt2.t[vsi] = vsig;
2366
2367	return 0;
2368}
2369
2370/**
2371 * ice_prof_has_mask_idx - determine if profile index masking is identical
2372 * @hw: pointer to the hardware structure
2373 * @blk: HW block
2374 * @prof: profile to check
2375 * @idx: profile index to check
2376 * @mask: mask to match
2377 */
2378static bool
2379ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2380		      u16 mask)
2381{
2382	bool expect_no_mask = false;
2383	bool found = false;
2384	bool match = false;
2385	u16 i;
2386
2387	/* If mask is 0x0000 or 0xffff, then there is no masking */
2388	if (mask == 0 || mask == 0xffff)
2389		expect_no_mask = true;
2390
2391	/* Scan the enabled masks on this profile, for the specified idx */
2392	for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2393	     hw->blk[blk].masks.count; i++)
2394		if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2395			if (hw->blk[blk].masks.masks[i].in_use &&
2396			    hw->blk[blk].masks.masks[i].idx == idx) {
2397				found = true;
2398				if (hw->blk[blk].masks.masks[i].mask == mask)
2399					match = true;
2400				break;
2401			}
2402
2403	if (expect_no_mask) {
2404		if (found)
2405			return false;
2406	} else {
2407		if (!match)
2408			return false;
2409	}
2410
2411	return true;
2412}
2413
2414/**
2415 * ice_prof_has_mask - determine if profile masking is identical
2416 * @hw: pointer to the hardware structure
2417 * @blk: HW block
2418 * @prof: profile to check
2419 * @masks: masks to match
2420 */
2421static bool
2422ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2423{
2424	u16 i;
2425
2426	/* es->mask_ena[prof] will have the mask */
2427	for (i = 0; i < hw->blk[blk].es.fvw; i++)
2428		if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2429			return false;
2430
2431	return true;
2432}
2433
2434/**
2435 * ice_find_prof_id_with_mask - find profile ID for a given field vector
2436 * @hw: pointer to the hardware structure
2437 * @blk: HW block
2438 * @fv: field vector to search for
2439 * @masks: masks for FV
 
2440 * @prof_id: receives the profile ID
2441 */
2442static enum ice_status
2443ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2444			   struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
 
2445{
2446	struct ice_es *es = &hw->blk[blk].es;
2447	u8 i;
2448
2449	/* For FD, we don't want to re-use a existed profile with the same
2450	 * field vector and mask. This will cause rule interference.
2451	 */
2452	if (blk == ICE_BLK_FD)
2453		return ICE_ERR_DOES_NOT_EXIST;
2454
2455	for (i = 0; i < (u8)es->count; i++) {
2456		u16 off = i * es->fvw;
2457
 
 
 
2458		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2459			continue;
2460
2461		/* check if masks settings are the same for this profile */
2462		if (masks && !ice_prof_has_mask(hw, blk, i, masks))
2463			continue;
2464
2465		*prof_id = i;
2466		return 0;
2467	}
2468
2469	return ICE_ERR_DOES_NOT_EXIST;
2470}
2471
2472/**
2473 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2474 * @blk: the block type
2475 * @rsrc_type: pointer to variable to receive the resource type
2476 */
2477static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2478{
2479	switch (blk) {
2480	case ICE_BLK_FD:
2481		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2482		break;
2483	case ICE_BLK_RSS:
2484		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2485		break;
2486	default:
2487		return false;
2488	}
2489	return true;
2490}
2491
2492/**
2493 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2494 * @blk: the block type
2495 * @rsrc_type: pointer to variable to receive the resource type
2496 */
2497static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2498{
2499	switch (blk) {
2500	case ICE_BLK_FD:
2501		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2502		break;
2503	case ICE_BLK_RSS:
2504		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2505		break;
2506	default:
2507		return false;
2508	}
2509	return true;
2510}
2511
2512/**
2513 * ice_alloc_tcam_ent - allocate hardware TCAM entry
2514 * @hw: pointer to the HW struct
2515 * @blk: the block to allocate the TCAM for
2516 * @btm: true to allocate from bottom of table, false to allocate from top
2517 * @tcam_idx: pointer to variable to receive the TCAM entry
2518 *
2519 * This function allocates a new entry in a Profile ID TCAM for a specific
2520 * block.
2521 */
2522static enum ice_status
2523ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
2524		   u16 *tcam_idx)
2525{
2526	u16 res_type;
2527
2528	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2529		return ICE_ERR_PARAM;
2530
2531	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
2532}
2533
2534/**
2535 * ice_free_tcam_ent - free hardware TCAM entry
2536 * @hw: pointer to the HW struct
2537 * @blk: the block from which to free the TCAM entry
2538 * @tcam_idx: the TCAM entry to free
2539 *
2540 * This function frees an entry in a Profile ID TCAM for a specific block.
2541 */
2542static enum ice_status
2543ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2544{
2545	u16 res_type;
2546
2547	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2548		return ICE_ERR_PARAM;
2549
2550	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2551}
2552
2553/**
2554 * ice_alloc_prof_id - allocate profile ID
2555 * @hw: pointer to the HW struct
2556 * @blk: the block to allocate the profile ID for
2557 * @prof_id: pointer to variable to receive the profile ID
2558 *
2559 * This function allocates a new profile ID, which also corresponds to a Field
2560 * Vector (Extraction Sequence) entry.
2561 */
2562static enum ice_status
2563ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2564{
2565	enum ice_status status;
2566	u16 res_type;
2567	u16 get_prof;
 
2568
2569	if (!ice_prof_id_rsrc_type(blk, &res_type))
2570		return ICE_ERR_PARAM;
2571
2572	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2573	if (!status)
2574		*prof_id = (u8)get_prof;
2575
2576	return status;
2577}
2578
2579/**
2580 * ice_free_prof_id - free profile ID
2581 * @hw: pointer to the HW struct
2582 * @blk: the block from which to free the profile ID
2583 * @prof_id: the profile ID to free
2584 *
2585 * This function frees a profile ID, which also corresponds to a Field Vector.
2586 */
2587static enum ice_status
2588ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2589{
2590	u16 tmp_prof_id = (u16)prof_id;
2591	u16 res_type;
2592
2593	if (!ice_prof_id_rsrc_type(blk, &res_type))
2594		return ICE_ERR_PARAM;
2595
2596	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2597}
2598
2599/**
2600 * ice_prof_inc_ref - increment reference count for profile
2601 * @hw: pointer to the HW struct
2602 * @blk: the block from which to free the profile ID
2603 * @prof_id: the profile ID for which to increment the reference count
2604 */
2605static enum ice_status
2606ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2607{
2608	if (prof_id > hw->blk[blk].es.count)
2609		return ICE_ERR_PARAM;
2610
2611	hw->blk[blk].es.ref_count[prof_id]++;
2612
2613	return 0;
2614}
2615
2616/**
2617 * ice_write_prof_mask_reg - write profile mask register
2618 * @hw: pointer to the HW struct
2619 * @blk: hardware block
2620 * @mask_idx: mask index
2621 * @idx: index of the FV which will use the mask
2622 * @mask: the 16-bit mask
2623 */
2624static void
2625ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2626			u16 idx, u16 mask)
2627{
2628	u32 offset;
2629	u32 val;
2630
2631	switch (blk) {
2632	case ICE_BLK_RSS:
2633		offset = GLQF_HMASK(mask_idx);
2634		val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
2635		val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2636		break;
2637	case ICE_BLK_FD:
2638		offset = GLQF_FDMASK(mask_idx);
2639		val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
2640		val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
2641		break;
2642	default:
2643		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2644			  blk);
2645		return;
2646	}
2647
2648	wr32(hw, offset, val);
2649	ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2650		  blk, idx, offset, val);
2651}
2652
2653/**
2654 * ice_write_prof_mask_enable_res - write profile mask enable register
2655 * @hw: pointer to the HW struct
2656 * @blk: hardware block
2657 * @prof_id: profile ID
2658 * @enable_mask: enable mask
2659 */
2660static void
2661ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2662			       u16 prof_id, u32 enable_mask)
2663{
2664	u32 offset;
2665
2666	switch (blk) {
2667	case ICE_BLK_RSS:
2668		offset = GLQF_HMASK_SEL(prof_id);
2669		break;
2670	case ICE_BLK_FD:
2671		offset = GLQF_FDMASK_SEL(prof_id);
2672		break;
2673	default:
2674		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2675			  blk);
2676		return;
2677	}
2678
2679	wr32(hw, offset, enable_mask);
2680	ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2681		  blk, prof_id, offset, enable_mask);
2682}
2683
2684/**
2685 * ice_init_prof_masks - initial prof masks
2686 * @hw: pointer to the HW struct
2687 * @blk: hardware block
2688 */
2689static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2690{
2691	u16 per_pf;
2692	u16 i;
2693
2694	mutex_init(&hw->blk[blk].masks.lock);
2695
2696	per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
2697
2698	hw->blk[blk].masks.count = per_pf;
2699	hw->blk[blk].masks.first = hw->pf_id * per_pf;
2700
2701	memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
2702
2703	for (i = hw->blk[blk].masks.first;
2704	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2705		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2706}
2707
2708/**
2709 * ice_init_all_prof_masks - initialize all prof masks
2710 * @hw: pointer to the HW struct
2711 */
2712static void ice_init_all_prof_masks(struct ice_hw *hw)
2713{
2714	ice_init_prof_masks(hw, ICE_BLK_RSS);
2715	ice_init_prof_masks(hw, ICE_BLK_FD);
2716}
2717
2718/**
2719 * ice_alloc_prof_mask - allocate profile mask
2720 * @hw: pointer to the HW struct
2721 * @blk: hardware block
2722 * @idx: index of FV which will use the mask
2723 * @mask: the 16-bit mask
2724 * @mask_idx: variable to receive the mask index
2725 */
2726static enum ice_status
2727ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
2728		    u16 *mask_idx)
2729{
2730	bool found_unused = false, found_copy = false;
2731	enum ice_status status = ICE_ERR_MAX_LIMIT;
2732	u16 unused_idx = 0, copy_idx = 0;
 
2733	u16 i;
2734
2735	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2736		return ICE_ERR_PARAM;
2737
2738	mutex_lock(&hw->blk[blk].masks.lock);
2739
2740	for (i = hw->blk[blk].masks.first;
2741	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2742		if (hw->blk[blk].masks.masks[i].in_use) {
2743			/* if mask is in use and it exactly duplicates the
2744			 * desired mask and index, then in can be reused
2745			 */
2746			if (hw->blk[blk].masks.masks[i].mask == mask &&
2747			    hw->blk[blk].masks.masks[i].idx == idx) {
2748				found_copy = true;
2749				copy_idx = i;
2750				break;
2751			}
2752		} else {
2753			/* save off unused index, but keep searching in case
2754			 * there is an exact match later on
2755			 */
2756			if (!found_unused) {
2757				found_unused = true;
2758				unused_idx = i;
2759			}
2760		}
2761
2762	if (found_copy)
2763		i = copy_idx;
2764	else if (found_unused)
2765		i = unused_idx;
2766	else
2767		goto err_ice_alloc_prof_mask;
2768
2769	/* update mask for a new entry */
2770	if (found_unused) {
2771		hw->blk[blk].masks.masks[i].in_use = true;
2772		hw->blk[blk].masks.masks[i].mask = mask;
2773		hw->blk[blk].masks.masks[i].idx = idx;
2774		hw->blk[blk].masks.masks[i].ref = 0;
2775		ice_write_prof_mask_reg(hw, blk, i, idx, mask);
2776	}
2777
2778	hw->blk[blk].masks.masks[i].ref++;
2779	*mask_idx = i;
2780	status = 0;
2781
2782err_ice_alloc_prof_mask:
2783	mutex_unlock(&hw->blk[blk].masks.lock);
2784
2785	return status;
2786}
2787
2788/**
2789 * ice_free_prof_mask - free profile mask
2790 * @hw: pointer to the HW struct
2791 * @blk: hardware block
2792 * @mask_idx: index of mask
2793 */
2794static enum ice_status
2795ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
2796{
2797	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2798		return ICE_ERR_PARAM;
2799
2800	if (!(mask_idx >= hw->blk[blk].masks.first &&
2801	      mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
2802		return ICE_ERR_DOES_NOT_EXIST;
2803
2804	mutex_lock(&hw->blk[blk].masks.lock);
2805
2806	if (!hw->blk[blk].masks.masks[mask_idx].in_use)
2807		goto exit_ice_free_prof_mask;
2808
2809	if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
2810		hw->blk[blk].masks.masks[mask_idx].ref--;
2811		goto exit_ice_free_prof_mask;
2812	}
2813
2814	/* remove mask */
2815	hw->blk[blk].masks.masks[mask_idx].in_use = false;
2816	hw->blk[blk].masks.masks[mask_idx].mask = 0;
2817	hw->blk[blk].masks.masks[mask_idx].idx = 0;
2818
2819	/* update mask as unused entry */
2820	ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
2821		  mask_idx);
2822	ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
2823
2824exit_ice_free_prof_mask:
2825	mutex_unlock(&hw->blk[blk].masks.lock);
2826
2827	return 0;
2828}
2829
2830/**
2831 * ice_free_prof_masks - free all profile masks for a profile
2832 * @hw: pointer to the HW struct
2833 * @blk: hardware block
2834 * @prof_id: profile ID
2835 */
2836static enum ice_status
2837ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
2838{
2839	u32 mask_bm;
2840	u16 i;
2841
2842	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2843		return ICE_ERR_PARAM;
2844
2845	mask_bm = hw->blk[blk].es.mask_ena[prof_id];
2846	for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
2847		if (mask_bm & BIT(i))
2848			ice_free_prof_mask(hw, blk, i);
2849
2850	return 0;
2851}
2852
2853/**
2854 * ice_shutdown_prof_masks - releases lock for masking
2855 * @hw: pointer to the HW struct
2856 * @blk: hardware block
2857 *
2858 * This should be called before unloading the driver
2859 */
2860static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
2861{
2862	u16 i;
2863
2864	mutex_lock(&hw->blk[blk].masks.lock);
2865
2866	for (i = hw->blk[blk].masks.first;
2867	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
2868		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2869
2870		hw->blk[blk].masks.masks[i].in_use = false;
2871		hw->blk[blk].masks.masks[i].idx = 0;
2872		hw->blk[blk].masks.masks[i].mask = 0;
2873	}
2874
2875	mutex_unlock(&hw->blk[blk].masks.lock);
2876	mutex_destroy(&hw->blk[blk].masks.lock);
2877}
2878
2879/**
2880 * ice_shutdown_all_prof_masks - releases all locks for masking
2881 * @hw: pointer to the HW struct
2882 *
2883 * This should be called before unloading the driver
2884 */
2885static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
2886{
2887	ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
2888	ice_shutdown_prof_masks(hw, ICE_BLK_FD);
2889}
2890
2891/**
2892 * ice_update_prof_masking - set registers according to masking
2893 * @hw: pointer to the HW struct
2894 * @blk: hardware block
2895 * @prof_id: profile ID
2896 * @masks: masks
2897 */
2898static enum ice_status
2899ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
2900			u16 *masks)
2901{
2902	bool err = false;
2903	u32 ena_mask = 0;
2904	u16 idx;
2905	u16 i;
2906
2907	/* Only support FD and RSS masking, otherwise nothing to be done */
2908	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2909		return 0;
2910
2911	for (i = 0; i < hw->blk[blk].es.fvw; i++)
2912		if (masks[i] && masks[i] != 0xFFFF) {
2913			if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
2914				ena_mask |= BIT(idx);
2915			} else {
2916				/* not enough bitmaps */
2917				err = true;
2918				break;
2919			}
2920		}
2921
2922	if (err) {
2923		/* free any bitmaps we have allocated */
2924		for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
2925			if (ena_mask & BIT(i))
2926				ice_free_prof_mask(hw, blk, i);
2927
2928		return ICE_ERR_OUT_OF_RANGE;
2929	}
2930
2931	/* enable the masks for this profile */
2932	ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
2933
2934	/* store enabled masks with profile so that they can be freed later */
2935	hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
2936
2937	return 0;
2938}
2939
2940/**
2941 * ice_write_es - write an extraction sequence to hardware
2942 * @hw: pointer to the HW struct
2943 * @blk: the block in which to write the extraction sequence
2944 * @prof_id: the profile ID to write
2945 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
 
2946 */
2947static void
2948ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
2949	     struct ice_fv_word *fv)
2950{
2951	u16 off;
2952
2953	off = prof_id * hw->blk[blk].es.fvw;
2954	if (!fv) {
2955		memset(&hw->blk[blk].es.t[off], 0,
2956		       hw->blk[blk].es.fvw * sizeof(*fv));
2957		hw->blk[blk].es.written[prof_id] = false;
2958	} else {
2959		memcpy(&hw->blk[blk].es.t[off], fv,
2960		       hw->blk[blk].es.fvw * sizeof(*fv));
2961	}
 
 
 
2962}
2963
2964/**
2965 * ice_prof_dec_ref - decrement reference count for profile
2966 * @hw: pointer to the HW struct
2967 * @blk: the block from which to free the profile ID
2968 * @prof_id: the profile ID for which to decrement the reference count
2969 */
2970static enum ice_status
2971ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2972{
2973	if (prof_id > hw->blk[blk].es.count)
2974		return ICE_ERR_PARAM;
2975
2976	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
2977		if (!--hw->blk[blk].es.ref_count[prof_id]) {
2978			ice_write_es(hw, blk, prof_id, NULL);
2979			ice_free_prof_masks(hw, blk, prof_id);
2980			return ice_free_prof_id(hw, blk, prof_id);
2981		}
2982	}
2983
2984	return 0;
2985}
2986
2987/* Block / table section IDs */
2988static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
2989	/* SWITCH */
2990	{	ICE_SID_XLT1_SW,
2991		ICE_SID_XLT2_SW,
2992		ICE_SID_PROFID_TCAM_SW,
2993		ICE_SID_PROFID_REDIR_SW,
2994		ICE_SID_FLD_VEC_SW
2995	},
2996
2997	/* ACL */
2998	{	ICE_SID_XLT1_ACL,
2999		ICE_SID_XLT2_ACL,
3000		ICE_SID_PROFID_TCAM_ACL,
3001		ICE_SID_PROFID_REDIR_ACL,
3002		ICE_SID_FLD_VEC_ACL
3003	},
3004
3005	/* FD */
3006	{	ICE_SID_XLT1_FD,
3007		ICE_SID_XLT2_FD,
3008		ICE_SID_PROFID_TCAM_FD,
3009		ICE_SID_PROFID_REDIR_FD,
3010		ICE_SID_FLD_VEC_FD
3011	},
3012
3013	/* RSS */
3014	{	ICE_SID_XLT1_RSS,
3015		ICE_SID_XLT2_RSS,
3016		ICE_SID_PROFID_TCAM_RSS,
3017		ICE_SID_PROFID_REDIR_RSS,
3018		ICE_SID_FLD_VEC_RSS
3019	},
3020
3021	/* PE */
3022	{	ICE_SID_XLT1_PE,
3023		ICE_SID_XLT2_PE,
3024		ICE_SID_PROFID_TCAM_PE,
3025		ICE_SID_PROFID_REDIR_PE,
3026		ICE_SID_FLD_VEC_PE
3027	}
3028};
3029
3030/**
3031 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3032 * @hw: pointer to the hardware structure
3033 * @blk: the HW block to initialize
3034 */
3035static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3036{
3037	u16 pt;
3038
3039	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3040		u8 ptg;
3041
3042		ptg = hw->blk[blk].xlt1.t[pt];
3043		if (ptg != ICE_DEFAULT_PTG) {
3044			ice_ptg_alloc_val(hw, blk, ptg);
3045			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3046		}
3047	}
3048}
3049
3050/**
3051 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3052 * @hw: pointer to the hardware structure
3053 * @blk: the HW block to initialize
3054 */
3055static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3056{
3057	u16 vsi;
3058
3059	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3060		u16 vsig;
3061
3062		vsig = hw->blk[blk].xlt2.t[vsi];
3063		if (vsig) {
3064			ice_vsig_alloc_val(hw, blk, vsig);
3065			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3066			/* no changes at this time, since this has been
3067			 * initialized from the original package
3068			 */
3069			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3070		}
3071	}
3072}
3073
3074/**
3075 * ice_init_sw_db - init software database from HW tables
3076 * @hw: pointer to the hardware structure
3077 */
3078static void ice_init_sw_db(struct ice_hw *hw)
3079{
3080	u16 i;
3081
3082	for (i = 0; i < ICE_BLK_COUNT; i++) {
3083		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3084		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3085	}
3086}
3087
3088/**
3089 * ice_fill_tbl - Reads content of a single table type into database
3090 * @hw: pointer to the hardware structure
3091 * @block_id: Block ID of the table to copy
3092 * @sid: Section ID of the table to copy
3093 *
3094 * Will attempt to read the entire content of a given table of a single block
3095 * into the driver database. We assume that the buffer will always
3096 * be as large or larger than the data contained in the package. If
3097 * this condition is not met, there is most likely an error in the package
3098 * contents.
3099 */
3100static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3101{
3102	u32 dst_len, sect_len, offset = 0;
3103	struct ice_prof_redir_section *pr;
3104	struct ice_prof_id_section *pid;
3105	struct ice_xlt1_section *xlt1;
3106	struct ice_xlt2_section *xlt2;
3107	struct ice_sw_fv_section *es;
3108	struct ice_pkg_enum state;
3109	u8 *src, *dst;
3110	void *sect;
3111
3112	/* if the HW segment pointer is null then the first iteration of
3113	 * ice_pkg_enum_section() will fail. In this case the HW tables will
3114	 * not be filled and return success.
3115	 */
3116	if (!hw->seg) {
3117		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3118		return;
3119	}
3120
3121	memset(&state, 0, sizeof(state));
3122
3123	sect = ice_pkg_enum_section(hw->seg, &state, sid);
3124
3125	while (sect) {
3126		switch (sid) {
3127		case ICE_SID_XLT1_SW:
3128		case ICE_SID_XLT1_FD:
3129		case ICE_SID_XLT1_RSS:
3130		case ICE_SID_XLT1_ACL:
3131		case ICE_SID_XLT1_PE:
3132			xlt1 = sect;
3133			src = xlt1->value;
3134			sect_len = le16_to_cpu(xlt1->count) *
3135				sizeof(*hw->blk[block_id].xlt1.t);
3136			dst = hw->blk[block_id].xlt1.t;
3137			dst_len = hw->blk[block_id].xlt1.count *
3138				sizeof(*hw->blk[block_id].xlt1.t);
3139			break;
3140		case ICE_SID_XLT2_SW:
3141		case ICE_SID_XLT2_FD:
3142		case ICE_SID_XLT2_RSS:
3143		case ICE_SID_XLT2_ACL:
3144		case ICE_SID_XLT2_PE:
3145			xlt2 = sect;
3146			src = (__force u8 *)xlt2->value;
3147			sect_len = le16_to_cpu(xlt2->count) *
3148				sizeof(*hw->blk[block_id].xlt2.t);
3149			dst = (u8 *)hw->blk[block_id].xlt2.t;
3150			dst_len = hw->blk[block_id].xlt2.count *
3151				sizeof(*hw->blk[block_id].xlt2.t);
3152			break;
3153		case ICE_SID_PROFID_TCAM_SW:
3154		case ICE_SID_PROFID_TCAM_FD:
3155		case ICE_SID_PROFID_TCAM_RSS:
3156		case ICE_SID_PROFID_TCAM_ACL:
3157		case ICE_SID_PROFID_TCAM_PE:
3158			pid = sect;
3159			src = (u8 *)pid->entry;
3160			sect_len = le16_to_cpu(pid->count) *
3161				sizeof(*hw->blk[block_id].prof.t);
3162			dst = (u8 *)hw->blk[block_id].prof.t;
3163			dst_len = hw->blk[block_id].prof.count *
3164				sizeof(*hw->blk[block_id].prof.t);
3165			break;
3166		case ICE_SID_PROFID_REDIR_SW:
3167		case ICE_SID_PROFID_REDIR_FD:
3168		case ICE_SID_PROFID_REDIR_RSS:
3169		case ICE_SID_PROFID_REDIR_ACL:
3170		case ICE_SID_PROFID_REDIR_PE:
3171			pr = sect;
3172			src = pr->redir_value;
3173			sect_len = le16_to_cpu(pr->count) *
3174				sizeof(*hw->blk[block_id].prof_redir.t);
3175			dst = hw->blk[block_id].prof_redir.t;
3176			dst_len = hw->blk[block_id].prof_redir.count *
3177				sizeof(*hw->blk[block_id].prof_redir.t);
3178			break;
3179		case ICE_SID_FLD_VEC_SW:
3180		case ICE_SID_FLD_VEC_FD:
3181		case ICE_SID_FLD_VEC_RSS:
3182		case ICE_SID_FLD_VEC_ACL:
3183		case ICE_SID_FLD_VEC_PE:
3184			es = sect;
3185			src = (u8 *)es->fv;
3186			sect_len = (u32)(le16_to_cpu(es->count) *
3187					 hw->blk[block_id].es.fvw) *
3188				sizeof(*hw->blk[block_id].es.t);
3189			dst = (u8 *)hw->blk[block_id].es.t;
3190			dst_len = (u32)(hw->blk[block_id].es.count *
3191					hw->blk[block_id].es.fvw) *
3192				sizeof(*hw->blk[block_id].es.t);
3193			break;
3194		default:
3195			return;
3196		}
3197
3198		/* if the section offset exceeds destination length, terminate
3199		 * table fill.
3200		 */
3201		if (offset > dst_len)
3202			return;
3203
3204		/* if the sum of section size and offset exceed destination size
3205		 * then we are out of bounds of the HW table size for that PF.
3206		 * Changing section length to fill the remaining table space
3207		 * of that PF.
3208		 */
3209		if ((offset + sect_len) > dst_len)
3210			sect_len = dst_len - offset;
3211
3212		memcpy(dst + offset, src, sect_len);
3213		offset += sect_len;
3214		sect = ice_pkg_enum_section(NULL, &state, sid);
3215	}
3216}
3217
3218/**
3219 * ice_fill_blk_tbls - Read package context for tables
3220 * @hw: pointer to the hardware structure
3221 *
3222 * Reads the current package contents and populates the driver
3223 * database with the data iteratively for all advanced feature
3224 * blocks. Assume that the HW tables have been allocated.
3225 */
3226void ice_fill_blk_tbls(struct ice_hw *hw)
3227{
3228	u8 i;
3229
3230	for (i = 0; i < ICE_BLK_COUNT; i++) {
3231		enum ice_block blk_id = (enum ice_block)i;
3232
3233		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3234		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3235		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3236		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3237		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3238	}
3239
3240	ice_init_sw_db(hw);
3241}
3242
3243/**
3244 * ice_free_prof_map - free profile map
3245 * @hw: pointer to the hardware structure
3246 * @blk_idx: HW block index
3247 */
3248static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3249{
3250	struct ice_es *es = &hw->blk[blk_idx].es;
3251	struct ice_prof_map *del, *tmp;
3252
3253	mutex_lock(&es->prof_map_lock);
3254	list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
3255		list_del(&del->list);
3256		devm_kfree(ice_hw_to_dev(hw), del);
3257	}
3258	INIT_LIST_HEAD(&es->prof_map);
3259	mutex_unlock(&es->prof_map_lock);
3260}
3261
3262/**
3263 * ice_free_flow_profs - free flow profile entries
3264 * @hw: pointer to the hardware structure
3265 * @blk_idx: HW block index
3266 */
3267static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3268{
3269	struct ice_flow_prof *p, *tmp;
3270
3271	mutex_lock(&hw->fl_profs_locks[blk_idx]);
3272	list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
3273		struct ice_flow_entry *e, *t;
3274
3275		list_for_each_entry_safe(e, t, &p->entries, l_entry)
3276			ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3277					   ICE_FLOW_ENTRY_HNDL(e));
3278
3279		list_del(&p->l_entry);
3280
3281		mutex_destroy(&p->entries_lock);
3282		devm_kfree(ice_hw_to_dev(hw), p);
3283	}
3284	mutex_unlock(&hw->fl_profs_locks[blk_idx]);
3285
3286	/* if driver is in reset and tables are being cleared
3287	 * re-initialize the flow profile list heads
3288	 */
3289	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3290}
3291
3292/**
3293 * ice_free_vsig_tbl - free complete VSIG table entries
3294 * @hw: pointer to the hardware structure
3295 * @blk: the HW block on which to free the VSIG table entries
3296 */
3297static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3298{
3299	u16 i;
3300
3301	if (!hw->blk[blk].xlt2.vsig_tbl)
3302		return;
3303
3304	for (i = 1; i < ICE_MAX_VSIGS; i++)
3305		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3306			ice_vsig_free(hw, blk, i);
3307}
3308
3309/**
3310 * ice_free_hw_tbls - free hardware table memory
3311 * @hw: pointer to the hardware structure
3312 */
3313void ice_free_hw_tbls(struct ice_hw *hw)
3314{
3315	struct ice_rss_cfg *r, *rt;
3316	u8 i;
3317
3318	for (i = 0; i < ICE_BLK_COUNT; i++) {
3319		if (hw->blk[i].is_list_init) {
3320			struct ice_es *es = &hw->blk[i].es;
3321
3322			ice_free_prof_map(hw, i);
3323			mutex_destroy(&es->prof_map_lock);
3324
3325			ice_free_flow_profs(hw, i);
3326			mutex_destroy(&hw->fl_profs_locks[i]);
3327
3328			hw->blk[i].is_list_init = false;
3329		}
3330		ice_free_vsig_tbl(hw, (enum ice_block)i);
3331		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
3332		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
3333		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
3334		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
3335		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
3336		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
3337		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
3338		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
3339		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
3340		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
 
3341		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
3342		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
 
3343	}
3344
3345	list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
3346		list_del(&r->l_entry);
3347		devm_kfree(ice_hw_to_dev(hw), r);
3348	}
3349	mutex_destroy(&hw->rss_locks);
3350	ice_shutdown_all_prof_masks(hw);
3351	memset(hw->blk, 0, sizeof(hw->blk));
3352}
3353
3354/**
3355 * ice_init_flow_profs - init flow profile locks and list heads
3356 * @hw: pointer to the hardware structure
3357 * @blk_idx: HW block index
3358 */
3359static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3360{
3361	mutex_init(&hw->fl_profs_locks[blk_idx]);
3362	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3363}
3364
3365/**
3366 * ice_clear_hw_tbls - clear HW tables and flow profiles
3367 * @hw: pointer to the hardware structure
3368 */
3369void ice_clear_hw_tbls(struct ice_hw *hw)
3370{
3371	u8 i;
3372
3373	for (i = 0; i < ICE_BLK_COUNT; i++) {
3374		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
 
3375		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3376		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3377		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3378		struct ice_es *es = &hw->blk[i].es;
3379
3380		if (hw->blk[i].is_list_init) {
3381			ice_free_prof_map(hw, i);
3382			ice_free_flow_profs(hw, i);
3383		}
3384
3385		ice_free_vsig_tbl(hw, (enum ice_block)i);
3386
3387		memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3388		memset(xlt1->ptg_tbl, 0,
3389		       ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3390		memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3391
3392		memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3393		memset(xlt2->vsig_tbl, 0,
3394		       xlt2->count * sizeof(*xlt2->vsig_tbl));
3395		memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3396
3397		memset(prof->t, 0, prof->count * sizeof(*prof->t));
3398		memset(prof_redir->t, 0,
3399		       prof_redir->count * sizeof(*prof_redir->t));
3400
3401		memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3402		memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
 
3403		memset(es->written, 0, es->count * sizeof(*es->written));
3404		memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
 
 
3405	}
3406}
3407
3408/**
3409 * ice_init_hw_tbls - init hardware table memory
3410 * @hw: pointer to the hardware structure
3411 */
3412enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3413{
3414	u8 i;
3415
3416	mutex_init(&hw->rss_locks);
3417	INIT_LIST_HEAD(&hw->rss_list_head);
3418	ice_init_all_prof_masks(hw);
3419	for (i = 0; i < ICE_BLK_COUNT; i++) {
3420		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
 
3421		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3422		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3423		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3424		struct ice_es *es = &hw->blk[i].es;
3425		u16 j;
3426
3427		if (hw->blk[i].is_list_init)
3428			continue;
3429
3430		ice_init_flow_profs(hw, i);
3431		mutex_init(&es->prof_map_lock);
3432		INIT_LIST_HEAD(&es->prof_map);
3433		hw->blk[i].is_list_init = true;
3434
3435		hw->blk[i].overwrite = blk_sizes[i].overwrite;
3436		es->reverse = blk_sizes[i].reverse;
3437
3438		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3439		xlt1->count = blk_sizes[i].xlt1;
3440
3441		xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3442					    sizeof(*xlt1->ptypes), GFP_KERNEL);
3443
3444		if (!xlt1->ptypes)
3445			goto err;
3446
3447		xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3448					     sizeof(*xlt1->ptg_tbl),
3449					     GFP_KERNEL);
3450
3451		if (!xlt1->ptg_tbl)
3452			goto err;
3453
3454		xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3455				       sizeof(*xlt1->t), GFP_KERNEL);
3456		if (!xlt1->t)
3457			goto err;
3458
3459		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3460		xlt2->count = blk_sizes[i].xlt2;
3461
3462		xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3463					  sizeof(*xlt2->vsis), GFP_KERNEL);
3464
3465		if (!xlt2->vsis)
3466			goto err;
3467
3468		xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3469					      sizeof(*xlt2->vsig_tbl),
3470					      GFP_KERNEL);
3471		if (!xlt2->vsig_tbl)
3472			goto err;
3473
3474		for (j = 0; j < xlt2->count; j++)
3475			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3476
3477		xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3478				       sizeof(*xlt2->t), GFP_KERNEL);
3479		if (!xlt2->t)
3480			goto err;
3481
3482		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3483		prof->count = blk_sizes[i].prof_tcam;
3484		prof->max_prof_id = blk_sizes[i].prof_id;
3485		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3486		prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3487				       sizeof(*prof->t), GFP_KERNEL);
3488
3489		if (!prof->t)
3490			goto err;
3491
3492		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3493		prof_redir->count = blk_sizes[i].prof_redir;
3494		prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3495					     prof_redir->count,
3496					     sizeof(*prof_redir->t),
3497					     GFP_KERNEL);
3498
3499		if (!prof_redir->t)
3500			goto err;
3501
3502		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3503		es->count = blk_sizes[i].es;
3504		es->fvw = blk_sizes[i].fvw;
3505		es->t = devm_kcalloc(ice_hw_to_dev(hw),
3506				     (u32)(es->count * es->fvw),
3507				     sizeof(*es->t), GFP_KERNEL);
3508		if (!es->t)
3509			goto err;
3510
3511		es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3512					     sizeof(*es->ref_count),
3513					     GFP_KERNEL);
3514		if (!es->ref_count)
3515			goto err;
3516
 
 
 
 
 
3517		es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3518					   sizeof(*es->written), GFP_KERNEL);
3519		if (!es->written)
3520			goto err;
3521
3522		es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3523					    sizeof(*es->mask_ena), GFP_KERNEL);
3524		if (!es->mask_ena)
3525			goto err;
 
 
 
 
 
 
3526	}
3527	return 0;
3528
3529err:
3530	ice_free_hw_tbls(hw);
3531	return ICE_ERR_NO_MEMORY;
3532}
3533
3534/**
3535 * ice_prof_gen_key - generate profile ID key
3536 * @hw: pointer to the HW struct
3537 * @blk: the block in which to write profile ID to
3538 * @ptg: packet type group (PTG) portion of key
3539 * @vsig: VSIG portion of key
3540 * @cdid: CDID portion of key
3541 * @flags: flag portion of key
3542 * @vl_msk: valid mask
3543 * @dc_msk: don't care mask
3544 * @nm_msk: never match mask
3545 * @key: output of profile ID key
3546 */
3547static enum ice_status
3548ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3549		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3550		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3551		 u8 key[ICE_TCAM_KEY_SZ])
3552{
3553	struct ice_prof_id_key inkey;
3554
3555	inkey.xlt1 = ptg;
3556	inkey.xlt2_cdid = cpu_to_le16(vsig);
3557	inkey.flags = cpu_to_le16(flags);
3558
3559	switch (hw->blk[blk].prof.cdid_bits) {
3560	case 0:
3561		break;
3562	case 2:
3563#define ICE_CD_2_M 0xC000U
3564#define ICE_CD_2_S 14
3565		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3566		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3567		break;
3568	case 4:
3569#define ICE_CD_4_M 0xF000U
3570#define ICE_CD_4_S 12
3571		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3572		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3573		break;
3574	case 8:
3575#define ICE_CD_8_M 0xFF00U
3576#define ICE_CD_8_S 16
3577		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3578		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3579		break;
3580	default:
3581		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3582		break;
3583	}
3584
3585	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3586			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3587}
3588
3589/**
3590 * ice_tcam_write_entry - write TCAM entry
3591 * @hw: pointer to the HW struct
3592 * @blk: the block in which to write profile ID to
3593 * @idx: the entry index to write to
3594 * @prof_id: profile ID
3595 * @ptg: packet type group (PTG) portion of key
3596 * @vsig: VSIG portion of key
3597 * @cdid: CDID portion of key
3598 * @flags: flag portion of key
3599 * @vl_msk: valid mask
3600 * @dc_msk: don't care mask
3601 * @nm_msk: never match mask
3602 */
3603static enum ice_status
3604ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3605		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3606		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3607		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3608		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3609{
3610	struct ice_prof_tcam_entry;
3611	enum ice_status status;
3612
3613	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3614				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3615	if (!status) {
3616		hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3617		hw->blk[blk].prof.t[idx].prof_id = prof_id;
3618	}
3619
3620	return status;
3621}
3622
3623/**
3624 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3625 * @hw: pointer to the hardware structure
3626 * @blk: HW block
3627 * @vsig: VSIG to query
3628 * @refs: pointer to variable to receive the reference count
3629 */
3630static enum ice_status
3631ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3632{
3633	u16 idx = vsig & ICE_VSIG_IDX_M;
3634	struct ice_vsig_vsi *ptr;
3635
3636	*refs = 0;
3637
3638	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3639		return ICE_ERR_DOES_NOT_EXIST;
3640
3641	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3642	while (ptr) {
3643		(*refs)++;
3644		ptr = ptr->next_vsi;
3645	}
3646
3647	return 0;
3648}
3649
3650/**
3651 * ice_has_prof_vsig - check to see if VSIG has a specific profile
3652 * @hw: pointer to the hardware structure
3653 * @blk: HW block
3654 * @vsig: VSIG to check against
3655 * @hdl: profile handle
3656 */
3657static bool
3658ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3659{
3660	u16 idx = vsig & ICE_VSIG_IDX_M;
3661	struct ice_vsig_prof *ent;
3662
3663	list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3664			    list)
3665		if (ent->profile_cookie == hdl)
3666			return true;
3667
3668	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
3669		  vsig);
3670	return false;
3671}
3672
3673/**
3674 * ice_prof_bld_es - build profile ID extraction sequence changes
3675 * @hw: pointer to the HW struct
3676 * @blk: hardware block
3677 * @bld: the update package buffer build to add to
3678 * @chgs: the list of changes to make in hardware
3679 */
3680static enum ice_status
3681ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3682		struct ice_buf_build *bld, struct list_head *chgs)
3683{
3684	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3685	struct ice_chs_chg *tmp;
3686
3687	list_for_each_entry(tmp, chgs, list_entry)
3688		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3689			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3690			struct ice_pkg_es *p;
3691			u32 id;
3692
3693			id = ice_sect_id(blk, ICE_VEC_TBL);
3694			p = ice_pkg_buf_alloc_section(bld, id,
3695						      struct_size(p, es, 1) +
3696						      vec_size -
3697						      sizeof(p->es[0]));
3698
3699			if (!p)
3700				return ICE_ERR_MAX_LIMIT;
3701
3702			p->count = cpu_to_le16(1);
3703			p->offset = cpu_to_le16(tmp->prof_id);
3704
3705			memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3706		}
3707
3708	return 0;
3709}
3710
3711/**
3712 * ice_prof_bld_tcam - build profile ID TCAM changes
3713 * @hw: pointer to the HW struct
3714 * @blk: hardware block
3715 * @bld: the update package buffer build to add to
3716 * @chgs: the list of changes to make in hardware
3717 */
3718static enum ice_status
3719ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3720		  struct ice_buf_build *bld, struct list_head *chgs)
3721{
3722	struct ice_chs_chg *tmp;
3723
3724	list_for_each_entry(tmp, chgs, list_entry)
3725		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3726			struct ice_prof_id_section *p;
3727			u32 id;
3728
3729			id = ice_sect_id(blk, ICE_PROF_TCAM);
3730			p = ice_pkg_buf_alloc_section(bld, id,
3731						      struct_size(p, entry, 1));
3732
3733			if (!p)
3734				return ICE_ERR_MAX_LIMIT;
3735
3736			p->count = cpu_to_le16(1);
3737			p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
3738			p->entry[0].prof_id = tmp->prof_id;
3739
3740			memcpy(p->entry[0].key,
3741			       &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3742			       sizeof(hw->blk[blk].prof.t->key));
3743		}
3744
3745	return 0;
3746}
3747
3748/**
3749 * ice_prof_bld_xlt1 - build XLT1 changes
3750 * @blk: hardware block
3751 * @bld: the update package buffer build to add to
3752 * @chgs: the list of changes to make in hardware
3753 */
3754static enum ice_status
3755ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3756		  struct list_head *chgs)
3757{
3758	struct ice_chs_chg *tmp;
3759
3760	list_for_each_entry(tmp, chgs, list_entry)
3761		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3762			struct ice_xlt1_section *p;
3763			u32 id;
3764
3765			id = ice_sect_id(blk, ICE_XLT1);
3766			p = ice_pkg_buf_alloc_section(bld, id,
3767						      struct_size(p, value, 1));
3768
3769			if (!p)
3770				return ICE_ERR_MAX_LIMIT;
3771
3772			p->count = cpu_to_le16(1);
3773			p->offset = cpu_to_le16(tmp->ptype);
3774			p->value[0] = tmp->ptg;
3775		}
3776
3777	return 0;
3778}
3779
3780/**
3781 * ice_prof_bld_xlt2 - build XLT2 changes
3782 * @blk: hardware block
3783 * @bld: the update package buffer build to add to
3784 * @chgs: the list of changes to make in hardware
3785 */
3786static enum ice_status
3787ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3788		  struct list_head *chgs)
3789{
3790	struct ice_chs_chg *tmp;
3791
3792	list_for_each_entry(tmp, chgs, list_entry) {
3793		struct ice_xlt2_section *p;
3794		u32 id;
3795
3796		switch (tmp->type) {
3797		case ICE_VSIG_ADD:
3798		case ICE_VSI_MOVE:
3799		case ICE_VSIG_REM:
3800			id = ice_sect_id(blk, ICE_XLT2);
3801			p = ice_pkg_buf_alloc_section(bld, id,
3802						      struct_size(p, value, 1));
3803
3804			if (!p)
3805				return ICE_ERR_MAX_LIMIT;
3806
3807			p->count = cpu_to_le16(1);
3808			p->offset = cpu_to_le16(tmp->vsi);
3809			p->value[0] = cpu_to_le16(tmp->vsig);
3810			break;
3811		default:
3812			break;
3813		}
3814	}
3815
3816	return 0;
3817}
3818
3819/**
3820 * ice_upd_prof_hw - update hardware using the change list
3821 * @hw: pointer to the HW struct
3822 * @blk: hardware block
3823 * @chgs: the list of changes to make in hardware
3824 */
3825static enum ice_status
3826ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3827		struct list_head *chgs)
3828{
3829	struct ice_buf_build *b;
3830	struct ice_chs_chg *tmp;
3831	enum ice_status status;
3832	u16 pkg_sects;
3833	u16 xlt1 = 0;
3834	u16 xlt2 = 0;
3835	u16 tcam = 0;
3836	u16 es = 0;
 
3837	u16 sects;
3838
3839	/* count number of sections we need */
3840	list_for_each_entry(tmp, chgs, list_entry) {
3841		switch (tmp->type) {
3842		case ICE_PTG_ES_ADD:
3843			if (tmp->add_ptg)
3844				xlt1++;
3845			if (tmp->add_prof)
3846				es++;
3847			break;
3848		case ICE_TCAM_ADD:
3849			tcam++;
3850			break;
3851		case ICE_VSIG_ADD:
3852		case ICE_VSI_MOVE:
3853		case ICE_VSIG_REM:
3854			xlt2++;
3855			break;
3856		default:
3857			break;
3858		}
3859	}
3860	sects = xlt1 + xlt2 + tcam + es;
3861
3862	if (!sects)
3863		return 0;
3864
3865	/* Build update package buffer */
3866	b = ice_pkg_buf_alloc(hw);
3867	if (!b)
3868		return ICE_ERR_NO_MEMORY;
3869
3870	status = ice_pkg_buf_reserve_section(b, sects);
3871	if (status)
3872		goto error_tmp;
3873
3874	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
3875	if (es) {
3876		status = ice_prof_bld_es(hw, blk, b, chgs);
3877		if (status)
3878			goto error_tmp;
3879	}
3880
3881	if (tcam) {
3882		status = ice_prof_bld_tcam(hw, blk, b, chgs);
3883		if (status)
3884			goto error_tmp;
3885	}
3886
3887	if (xlt1) {
3888		status = ice_prof_bld_xlt1(blk, b, chgs);
3889		if (status)
3890			goto error_tmp;
3891	}
3892
3893	if (xlt2) {
3894		status = ice_prof_bld_xlt2(blk, b, chgs);
3895		if (status)
3896			goto error_tmp;
3897	}
3898
3899	/* After package buffer build check if the section count in buffer is
3900	 * non-zero and matches the number of sections detected for package
3901	 * update.
3902	 */
3903	pkg_sects = ice_pkg_buf_get_active_sections(b);
3904	if (!pkg_sects || pkg_sects != sects) {
3905		status = ICE_ERR_INVAL_SIZE;
3906		goto error_tmp;
3907	}
3908
3909	/* update package */
3910	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
3911	if (status == ICE_ERR_AQ_ERROR)
3912		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
3913
3914error_tmp:
3915	ice_pkg_buf_free(hw, b);
3916	return status;
3917}
3918
3919/**
3920 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
3921 * @hw: pointer to the HW struct
3922 * @prof_id: profile ID
3923 * @mask_sel: mask select
3924 *
3925 * This function enable any of the masks selected by the mask select parameter
3926 * for the profile specified.
3927 */
3928static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
3929{
3930	wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
3931
3932	ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
3933		  GLQF_FDMASK_SEL(prof_id), mask_sel);
3934}
3935
3936struct ice_fd_src_dst_pair {
3937	u8 prot_id;
3938	u8 count;
3939	u16 off;
3940};
3941
3942static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
3943	/* These are defined in pairs */
3944	{ ICE_PROT_IPV4_OF_OR_S, 2, 12 },
3945	{ ICE_PROT_IPV4_OF_OR_S, 2, 16 },
3946
3947	{ ICE_PROT_IPV4_IL, 2, 12 },
3948	{ ICE_PROT_IPV4_IL, 2, 16 },
3949
3950	{ ICE_PROT_IPV6_OF_OR_S, 8, 8 },
3951	{ ICE_PROT_IPV6_OF_OR_S, 8, 24 },
3952
3953	{ ICE_PROT_IPV6_IL, 8, 8 },
3954	{ ICE_PROT_IPV6_IL, 8, 24 },
3955
3956	{ ICE_PROT_TCP_IL, 1, 0 },
3957	{ ICE_PROT_TCP_IL, 1, 2 },
3958
3959	{ ICE_PROT_UDP_OF, 1, 0 },
3960	{ ICE_PROT_UDP_OF, 1, 2 },
3961
3962	{ ICE_PROT_UDP_IL_OR_S, 1, 0 },
3963	{ ICE_PROT_UDP_IL_OR_S, 1, 2 },
3964
3965	{ ICE_PROT_SCTP_IL, 1, 0 },
3966	{ ICE_PROT_SCTP_IL, 1, 2 }
3967};
3968
3969#define ICE_FD_SRC_DST_PAIR_COUNT	ARRAY_SIZE(ice_fd_pairs)
3970
3971/**
3972 * ice_update_fd_swap - set register appropriately for a FD FV extraction
3973 * @hw: pointer to the HW struct
3974 * @prof_id: profile ID
3975 * @es: extraction sequence (length of array is determined by the block)
3976 */
3977static enum ice_status
3978ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
3979{
3980	DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3981	u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
3982#define ICE_FD_FV_NOT_FOUND (-2)
3983	s8 first_free = ICE_FD_FV_NOT_FOUND;
3984	u8 used[ICE_MAX_FV_WORDS] = { 0 };
3985	s8 orig_free, si;
3986	u32 mask_sel = 0;
3987	u8 i, j, k;
3988
3989	bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3990
3991	/* This code assumes that the Flow Director field vectors are assigned
3992	 * from the end of the FV indexes working towards the zero index, that
3993	 * only complete fields will be included and will be consecutive, and
3994	 * that there are no gaps between valid indexes.
3995	 */
3996
3997	/* Determine swap fields present */
3998	for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
3999		/* Find the first free entry, assuming right to left population.
4000		 * This is where we can start adding additional pairs if needed.
4001		 */
4002		if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4003		    ICE_PROT_INVALID)
4004			first_free = i - 1;
4005
4006		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4007			if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4008			    es[i].off == ice_fd_pairs[j].off) {
4009				set_bit(j, pair_list);
4010				pair_start[j] = i;
4011			}
4012	}
4013
4014	orig_free = first_free;
4015
4016	/* determine missing swap fields that need to be added */
4017	for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4018		u8 bit1 = test_bit(i + 1, pair_list);
4019		u8 bit0 = test_bit(i, pair_list);
4020
4021		if (bit0 ^ bit1) {
4022			u8 index;
4023
4024			/* add the appropriate 'paired' entry */
4025			if (!bit0)
4026				index = i;
4027			else
4028				index = i + 1;
4029
4030			/* check for room */
4031			if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4032				return ICE_ERR_MAX_LIMIT;
4033
4034			/* place in extraction sequence */
4035			for (k = 0; k < ice_fd_pairs[index].count; k++) {
4036				es[first_free - k].prot_id =
4037					ice_fd_pairs[index].prot_id;
4038				es[first_free - k].off =
4039					ice_fd_pairs[index].off + (k * 2);
4040
4041				if (k > first_free)
4042					return ICE_ERR_OUT_OF_RANGE;
4043
4044				/* keep track of non-relevant fields */
4045				mask_sel |= BIT(first_free - k);
4046			}
4047
4048			pair_start[index] = first_free;
4049			first_free -= ice_fd_pairs[index].count;
4050		}
4051	}
4052
4053	/* fill in the swap array */
4054	si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4055	while (si >= 0) {
4056		u8 indexes_used = 1;
4057
4058		/* assume flat at this index */
4059#define ICE_SWAP_VALID	0x80
4060		used[si] = si | ICE_SWAP_VALID;
4061
4062		if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4063			si -= indexes_used;
4064			continue;
4065		}
4066
4067		/* check for a swap location */
4068		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4069			if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4070			    es[si].off == ice_fd_pairs[j].off) {
4071				u8 idx;
4072
4073				/* determine the appropriate matching field */
4074				idx = j + ((j % 2) ? -1 : 1);
4075
4076				indexes_used = ice_fd_pairs[idx].count;
4077				for (k = 0; k < indexes_used; k++) {
4078					used[si - k] = (pair_start[idx] - k) |
4079						ICE_SWAP_VALID;
4080				}
4081
4082				break;
4083			}
4084
4085		si -= indexes_used;
4086	}
4087
4088	/* for each set of 4 swap and 4 inset indexes, write the appropriate
4089	 * register
4090	 */
4091	for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4092		u32 raw_swap = 0;
4093		u32 raw_in = 0;
4094
4095		for (k = 0; k < 4; k++) {
4096			u8 idx;
4097
4098			idx = (j * 4) + k;
4099			if (used[idx] && !(mask_sel & BIT(idx))) {
4100				raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4101#define ICE_INSET_DFLT 0x9f
4102				raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4103			}
4104		}
4105
4106		/* write the appropriate swap register set */
4107		wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4108
4109		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4110			  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4111
4112		/* write the appropriate inset register set */
4113		wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4114
4115		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4116			  prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4117	}
4118
4119	/* initially clear the mask select for this profile */
4120	ice_update_fd_mask(hw, prof_id, 0);
4121
4122	return 0;
4123}
4124
4125/* The entries here needs to match the order of enum ice_ptype_attrib */
4126static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4127	{ ICE_GTP_PDU_EH,	ICE_GTP_PDU_FLAG_MASK },
4128	{ ICE_GTP_SESSION,	ICE_GTP_FLAGS_MASK },
4129	{ ICE_GTP_DOWNLINK,	ICE_GTP_FLAGS_MASK },
4130	{ ICE_GTP_UPLINK,	ICE_GTP_FLAGS_MASK },
4131};
4132
4133/**
4134 * ice_get_ptype_attrib_info - get PTYPE attribute information
4135 * @type: attribute type
4136 * @info: pointer to variable to the attribute information
4137 */
4138static void
4139ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4140			  struct ice_ptype_attrib_info *info)
4141{
4142	*info = ice_ptype_attributes[type];
4143}
4144
4145/**
4146 * ice_add_prof_attrib - add any PTG with attributes to profile
4147 * @prof: pointer to the profile to which PTG entries will be added
4148 * @ptg: PTG to be added
4149 * @ptype: PTYPE that needs to be looked up
4150 * @attr: array of attributes that will be considered
4151 * @attr_cnt: number of elements in the attribute array
4152 */
4153static enum ice_status
4154ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4155		    const struct ice_ptype_attributes *attr, u16 attr_cnt)
4156{
4157	bool found = false;
4158	u16 i;
4159
4160	for (i = 0; i < attr_cnt; i++)
4161		if (attr[i].ptype == ptype) {
4162			found = true;
4163
4164			prof->ptg[prof->ptg_cnt] = ptg;
4165			ice_get_ptype_attrib_info(attr[i].attrib,
4166						  &prof->attr[prof->ptg_cnt]);
4167
4168			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4169				return ICE_ERR_MAX_LIMIT;
4170		}
4171
4172	if (!found)
4173		return ICE_ERR_DOES_NOT_EXIST;
4174
4175	return 0;
4176}
4177
4178/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4179 * ice_add_prof - add profile
4180 * @hw: pointer to the HW struct
4181 * @blk: hardware block
4182 * @id: profile tracking ID
4183 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4184 * @attr: array of attributes
4185 * @attr_cnt: number of elements in attr array
4186 * @es: extraction sequence (length of array is determined by the block)
4187 * @masks: mask for extraction sequence
 
 
4188 *
4189 * This function registers a profile, which matches a set of PTYPES with a
4190 * particular extraction sequence. While the hardware profile is allocated
4191 * it will not be written until the first call to ice_add_flow that specifies
4192 * the ID value used here.
4193 */
4194enum ice_status
4195ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4196	     const struct ice_ptype_attributes *attr, u16 attr_cnt,
4197	     struct ice_fv_word *es, u16 *masks)
4198{
4199	u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4200	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4201	struct ice_prof_map *prof;
4202	enum ice_status status;
4203	u8 byte = 0;
4204	u8 prof_id;
 
4205
4206	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4207
4208	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4209
4210	/* search for existing profile */
4211	status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4212	if (status) {
4213		/* allocate profile ID */
4214		status = ice_alloc_prof_id(hw, blk, &prof_id);
4215		if (status)
4216			goto err_ice_add_prof;
4217		if (blk == ICE_BLK_FD) {
4218			/* For Flow Director block, the extraction sequence may
4219			 * need to be altered in the case where there are paired
4220			 * fields that have no match. This is necessary because
4221			 * for Flow Director, src and dest fields need to paired
4222			 * for filter programming and these values are swapped
4223			 * during Tx.
4224			 */
4225			status = ice_update_fd_swap(hw, prof_id, es);
4226			if (status)
4227				goto err_ice_add_prof;
 
 
4228		}
4229		status = ice_update_prof_masking(hw, blk, prof_id, masks);
4230		if (status)
4231			goto err_ice_add_prof;
4232
4233		/* and write new es */
4234		ice_write_es(hw, blk, prof_id, es);
4235	}
4236
4237	ice_prof_inc_ref(hw, blk, prof_id);
4238
4239	/* add profile info */
4240	prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
4241	if (!prof) {
4242		status = ICE_ERR_NO_MEMORY;
4243		goto err_ice_add_prof;
4244	}
4245
4246	prof->profile_cookie = id;
4247	prof->prof_id = prof_id;
4248	prof->ptg_cnt = 0;
4249	prof->context = 0;
4250
4251	/* build list of ptgs */
4252	while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4253		u8 bit;
4254
4255		if (!ptypes[byte]) {
4256			bytes--;
4257			byte++;
4258			continue;
4259		}
4260
4261		/* Examine 8 bits per byte */
4262		for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
4263				 BITS_PER_BYTE) {
4264			u16 ptype;
4265			u8 ptg;
4266
4267			ptype = byte * BITS_PER_BYTE + bit;
4268
4269			/* The package should place all ptypes in a non-zero
4270			 * PTG, so the following call should never fail.
4271			 */
4272			if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4273				continue;
4274
4275			/* If PTG is already added, skip and continue */
4276			if (test_bit(ptg, ptgs_used))
4277				continue;
4278
4279			set_bit(ptg, ptgs_used);
4280			/* Check to see there are any attributes for
4281			 * this PTYPE, and add them if found.
4282			 */
4283			status = ice_add_prof_attrib(prof, ptg, ptype,
4284						     attr, attr_cnt);
4285			if (status == ICE_ERR_MAX_LIMIT)
4286				break;
4287			if (status) {
4288				/* This is simple a PTYPE/PTG with no
4289				 * attribute
4290				 */
4291				prof->ptg[prof->ptg_cnt] = ptg;
4292				prof->attr[prof->ptg_cnt].flags = 0;
4293				prof->attr[prof->ptg_cnt].mask = 0;
4294
4295				if (++prof->ptg_cnt >=
4296				    ICE_MAX_PTG_PER_PROFILE)
4297					break;
4298			}
4299		}
4300
4301		bytes--;
4302		byte++;
4303	}
4304
4305	list_add(&prof->list, &hw->blk[blk].es.prof_map);
4306	status = 0;
4307
4308err_ice_add_prof:
4309	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4310	return status;
4311}
4312
4313/**
4314 * ice_search_prof_id - Search for a profile tracking ID
4315 * @hw: pointer to the HW struct
4316 * @blk: hardware block
4317 * @id: profile tracking ID
4318 *
4319 * This will search for a profile tracking ID which was previously added.
4320 * The profile map lock should be held before calling this function.
4321 */
4322static struct ice_prof_map *
4323ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4324{
4325	struct ice_prof_map *entry = NULL;
4326	struct ice_prof_map *map;
4327
4328	list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
4329		if (map->profile_cookie == id) {
4330			entry = map;
4331			break;
4332		}
4333
4334	return entry;
4335}
4336
4337/**
4338 * ice_vsig_prof_id_count - count profiles in a VSIG
4339 * @hw: pointer to the HW struct
4340 * @blk: hardware block
4341 * @vsig: VSIG to remove the profile from
4342 */
4343static u16
4344ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4345{
4346	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4347	struct ice_vsig_prof *p;
4348
4349	list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4350			    list)
4351		count++;
4352
4353	return count;
4354}
4355
4356/**
4357 * ice_rel_tcam_idx - release a TCAM index
4358 * @hw: pointer to the HW struct
4359 * @blk: hardware block
4360 * @idx: the index to release
4361 */
4362static enum ice_status
4363ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4364{
4365	/* Masks to invoke a never match entry */
4366	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4367	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4368	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4369	enum ice_status status;
4370
4371	/* write the TCAM entry */
4372	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4373				      dc_msk, nm_msk);
4374	if (status)
4375		return status;
4376
4377	/* release the TCAM entry */
4378	status = ice_free_tcam_ent(hw, blk, idx);
4379
4380	return status;
4381}
4382
4383/**
4384 * ice_rem_prof_id - remove one profile from a VSIG
4385 * @hw: pointer to the HW struct
4386 * @blk: hardware block
4387 * @prof: pointer to profile structure to remove
4388 */
4389static enum ice_status
4390ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4391		struct ice_vsig_prof *prof)
4392{
4393	enum ice_status status;
4394	u16 i;
4395
4396	for (i = 0; i < prof->tcam_count; i++)
4397		if (prof->tcam[i].in_use) {
4398			prof->tcam[i].in_use = false;
4399			status = ice_rel_tcam_idx(hw, blk,
4400						  prof->tcam[i].tcam_idx);
4401			if (status)
4402				return ICE_ERR_HW_TABLE;
4403		}
4404
4405	return 0;
4406}
4407
4408/**
4409 * ice_rem_vsig - remove VSIG
4410 * @hw: pointer to the HW struct
4411 * @blk: hardware block
4412 * @vsig: the VSIG to remove
4413 * @chg: the change list
4414 */
4415static enum ice_status
4416ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4417	     struct list_head *chg)
4418{
4419	u16 idx = vsig & ICE_VSIG_IDX_M;
4420	struct ice_vsig_vsi *vsi_cur;
4421	struct ice_vsig_prof *d, *t;
4422	enum ice_status status;
4423
4424	/* remove TCAM entries */
4425	list_for_each_entry_safe(d, t,
4426				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4427				 list) {
 
 
4428		status = ice_rem_prof_id(hw, blk, d);
4429		if (status)
4430			return status;
4431
4432		list_del(&d->list);
4433		devm_kfree(ice_hw_to_dev(hw), d);
4434	}
4435
4436	/* Move all VSIS associated with this VSIG to the default VSIG */
4437	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4438	/* If the VSIG has at least 1 VSI then iterate through the list
4439	 * and remove the VSIs before deleting the group.
4440	 */
4441	if (vsi_cur)
4442		do {
4443			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4444			struct ice_chs_chg *p;
4445
4446			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4447					 GFP_KERNEL);
4448			if (!p)
4449				return ICE_ERR_NO_MEMORY;
4450
4451			p->type = ICE_VSIG_REM;
4452			p->orig_vsig = vsig;
4453			p->vsig = ICE_DEFAULT_VSIG;
4454			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4455
4456			list_add(&p->list_entry, chg);
4457
4458			vsi_cur = tmp;
4459		} while (vsi_cur);
4460
4461	return ice_vsig_free(hw, blk, vsig);
4462}
4463
4464/**
4465 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4466 * @hw: pointer to the HW struct
4467 * @blk: hardware block
4468 * @vsig: VSIG to remove the profile from
4469 * @hdl: profile handle indicating which profile to remove
4470 * @chg: list to receive a record of changes
4471 */
4472static enum ice_status
4473ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4474		     struct list_head *chg)
4475{
4476	u16 idx = vsig & ICE_VSIG_IDX_M;
4477	struct ice_vsig_prof *p, *t;
4478	enum ice_status status;
4479
4480	list_for_each_entry_safe(p, t,
4481				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4482				 list)
4483		if (p->profile_cookie == hdl) {
 
 
4484			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4485				/* this is the last profile, remove the VSIG */
4486				return ice_rem_vsig(hw, blk, vsig, chg);
4487
4488			status = ice_rem_prof_id(hw, blk, p);
4489			if (!status) {
4490				list_del(&p->list);
4491				devm_kfree(ice_hw_to_dev(hw), p);
4492			}
4493			return status;
4494		}
4495
4496	return ICE_ERR_DOES_NOT_EXIST;
4497}
4498
4499/**
4500 * ice_rem_flow_all - remove all flows with a particular profile
4501 * @hw: pointer to the HW struct
4502 * @blk: hardware block
4503 * @id: profile tracking ID
4504 */
4505static enum ice_status
4506ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4507{
4508	struct ice_chs_chg *del, *tmp;
4509	enum ice_status status;
4510	struct list_head chg;
 
4511	u16 i;
4512
4513	INIT_LIST_HEAD(&chg);
4514
4515	for (i = 1; i < ICE_MAX_VSIGS; i++)
4516		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4517			if (ice_has_prof_vsig(hw, blk, i, id)) {
4518				status = ice_rem_prof_id_vsig(hw, blk, i, id,
4519							      &chg);
4520				if (status)
4521					goto err_ice_rem_flow_all;
4522			}
4523		}
4524
4525	status = ice_upd_prof_hw(hw, blk, &chg);
4526
4527err_ice_rem_flow_all:
4528	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4529		list_del(&del->list_entry);
4530		devm_kfree(ice_hw_to_dev(hw), del);
4531	}
4532
4533	return status;
4534}
4535
4536/**
4537 * ice_rem_prof - remove profile
4538 * @hw: pointer to the HW struct
4539 * @blk: hardware block
4540 * @id: profile tracking ID
4541 *
4542 * This will remove the profile specified by the ID parameter, which was
4543 * previously created through ice_add_prof. If any existing entries
4544 * are associated with this profile, they will be removed as well.
4545 */
4546enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4547{
4548	struct ice_prof_map *pmap;
4549	enum ice_status status;
4550
4551	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4552
4553	pmap = ice_search_prof_id(hw, blk, id);
4554	if (!pmap) {
4555		status = ICE_ERR_DOES_NOT_EXIST;
4556		goto err_ice_rem_prof;
4557	}
4558
4559	/* remove all flows with this profile */
4560	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4561	if (status)
4562		goto err_ice_rem_prof;
4563
4564	/* dereference profile, and possibly remove */
4565	ice_prof_dec_ref(hw, blk, pmap->prof_id);
4566
4567	list_del(&pmap->list);
4568	devm_kfree(ice_hw_to_dev(hw), pmap);
4569
4570err_ice_rem_prof:
4571	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4572	return status;
4573}
4574
4575/**
4576 * ice_get_prof - get profile
4577 * @hw: pointer to the HW struct
4578 * @blk: hardware block
4579 * @hdl: profile handle
4580 * @chg: change list
4581 */
4582static enum ice_status
4583ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4584	     struct list_head *chg)
4585{
4586	enum ice_status status = 0;
4587	struct ice_prof_map *map;
4588	struct ice_chs_chg *p;
 
4589	u16 i;
4590
4591	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4592	/* Get the details on the profile specified by the handle ID */
4593	map = ice_search_prof_id(hw, blk, hdl);
4594	if (!map) {
4595		status = ICE_ERR_DOES_NOT_EXIST;
4596		goto err_ice_get_prof;
4597	}
4598
4599	for (i = 0; i < map->ptg_cnt; i++)
4600		if (!hw->blk[blk].es.written[map->prof_id]) {
4601			/* add ES to change list */
4602			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4603					 GFP_KERNEL);
4604			if (!p) {
4605				status = ICE_ERR_NO_MEMORY;
4606				goto err_ice_get_prof;
4607			}
4608
4609			p->type = ICE_PTG_ES_ADD;
4610			p->ptype = 0;
4611			p->ptg = map->ptg[i];
4612			p->add_ptg = 0;
4613
4614			p->add_prof = 1;
4615			p->prof_id = map->prof_id;
4616
4617			hw->blk[blk].es.written[map->prof_id] = true;
4618
4619			list_add(&p->list_entry, chg);
4620		}
4621
4622err_ice_get_prof:
4623	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4624	/* let caller clean up the change list */
4625	return status;
4626}
4627
4628/**
4629 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4630 * @hw: pointer to the HW struct
4631 * @blk: hardware block
4632 * @vsig: VSIG from which to copy the list
4633 * @lst: output list
4634 *
4635 * This routine makes a copy of the list of profiles in the specified VSIG.
4636 */
4637static enum ice_status
4638ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4639		   struct list_head *lst)
4640{
4641	struct ice_vsig_prof *ent1, *ent2;
4642	u16 idx = vsig & ICE_VSIG_IDX_M;
4643
4644	list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4645			    list) {
4646		struct ice_vsig_prof *p;
4647
4648		/* copy to the input list */
4649		p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4650				 GFP_KERNEL);
4651		if (!p)
4652			goto err_ice_get_profs_vsig;
4653
4654		list_add_tail(&p->list, lst);
4655	}
4656
4657	return 0;
4658
4659err_ice_get_profs_vsig:
4660	list_for_each_entry_safe(ent1, ent2, lst, list) {
4661		list_del(&ent1->list);
4662		devm_kfree(ice_hw_to_dev(hw), ent1);
4663	}
4664
4665	return ICE_ERR_NO_MEMORY;
4666}
4667
4668/**
4669 * ice_add_prof_to_lst - add profile entry to a list
4670 * @hw: pointer to the HW struct
4671 * @blk: hardware block
4672 * @lst: the list to be added to
4673 * @hdl: profile handle of entry to add
4674 */
4675static enum ice_status
4676ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4677		    struct list_head *lst, u64 hdl)
4678{
4679	enum ice_status status = 0;
4680	struct ice_prof_map *map;
4681	struct ice_vsig_prof *p;
 
4682	u16 i;
4683
4684	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4685	map = ice_search_prof_id(hw, blk, hdl);
4686	if (!map) {
4687		status = ICE_ERR_DOES_NOT_EXIST;
4688		goto err_ice_add_prof_to_lst;
4689	}
4690
4691	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4692	if (!p) {
4693		status = ICE_ERR_NO_MEMORY;
4694		goto err_ice_add_prof_to_lst;
4695	}
4696
4697	p->profile_cookie = map->profile_cookie;
4698	p->prof_id = map->prof_id;
4699	p->tcam_count = map->ptg_cnt;
4700
4701	for (i = 0; i < map->ptg_cnt; i++) {
4702		p->tcam[i].prof_id = map->prof_id;
4703		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4704		p->tcam[i].ptg = map->ptg[i];
4705	}
4706
4707	list_add(&p->list, lst);
4708
4709err_ice_add_prof_to_lst:
4710	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4711	return status;
4712}
4713
4714/**
4715 * ice_move_vsi - move VSI to another VSIG
4716 * @hw: pointer to the HW struct
4717 * @blk: hardware block
4718 * @vsi: the VSI to move
4719 * @vsig: the VSIG to move the VSI to
4720 * @chg: the change list
4721 */
4722static enum ice_status
4723ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4724	     struct list_head *chg)
4725{
4726	enum ice_status status;
4727	struct ice_chs_chg *p;
4728	u16 orig_vsig;
 
4729
4730	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4731	if (!p)
4732		return ICE_ERR_NO_MEMORY;
4733
4734	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4735	if (!status)
4736		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4737
4738	if (status) {
4739		devm_kfree(ice_hw_to_dev(hw), p);
4740		return status;
4741	}
4742
4743	p->type = ICE_VSI_MOVE;
4744	p->vsi = vsi;
4745	p->orig_vsig = orig_vsig;
4746	p->vsig = vsig;
4747
4748	list_add(&p->list_entry, chg);
4749
4750	return 0;
4751}
4752
4753/**
4754 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
4755 * @hw: pointer to the HW struct
4756 * @idx: the index of the TCAM entry to remove
4757 * @chg: the list of change structures to search
4758 */
4759static void
4760ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
4761{
4762	struct ice_chs_chg *pos, *tmp;
4763
4764	list_for_each_entry_safe(tmp, pos, chg, list_entry)
4765		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4766			list_del(&tmp->list_entry);
4767			devm_kfree(ice_hw_to_dev(hw), tmp);
4768		}
4769}
4770
4771/**
4772 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4773 * @hw: pointer to the HW struct
4774 * @blk: hardware block
4775 * @enable: true to enable, false to disable
4776 * @vsig: the VSIG of the TCAM entry
4777 * @tcam: pointer the TCAM info structure of the TCAM to disable
4778 * @chg: the change list
4779 *
4780 * This function appends an enable or disable TCAM entry in the change log
4781 */
4782static enum ice_status
4783ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4784		      u16 vsig, struct ice_tcam_inf *tcam,
4785		      struct list_head *chg)
4786{
4787	enum ice_status status;
4788	struct ice_chs_chg *p;
 
4789
4790	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4791	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4792	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4793
4794	/* if disabling, free the TCAM */
4795	if (!enable) {
4796		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4797
4798		/* if we have already created a change for this TCAM entry, then
4799		 * we need to remove that entry, in order to prevent writing to
4800		 * a TCAM entry we no longer will have ownership of.
4801		 */
4802		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4803		tcam->tcam_idx = 0;
4804		tcam->in_use = 0;
4805		return status;
4806	}
4807
4808	/* for re-enabling, reallocate a TCAM */
4809	/* for entries with empty attribute masks, allocate entry from
4810	 * the bottom of the TCAM table; otherwise, allocate from the
4811	 * top of the table in order to give it higher priority
4812	 */
4813	status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
4814				    &tcam->tcam_idx);
4815	if (status)
4816		return status;
4817
4818	/* add TCAM to change list */
4819	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4820	if (!p)
4821		return ICE_ERR_NO_MEMORY;
4822
4823	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4824				      tcam->ptg, vsig, 0, tcam->attr.flags,
4825				      vl_msk, dc_msk, nm_msk);
4826	if (status)
4827		goto err_ice_prof_tcam_ena_dis;
4828
4829	tcam->in_use = 1;
4830
4831	p->type = ICE_TCAM_ADD;
4832	p->add_tcam_idx = true;
4833	p->prof_id = tcam->prof_id;
4834	p->ptg = tcam->ptg;
4835	p->vsig = 0;
4836	p->tcam_idx = tcam->tcam_idx;
4837
4838	/* log change */
4839	list_add(&p->list_entry, chg);
4840
4841	return 0;
4842
4843err_ice_prof_tcam_ena_dis:
4844	devm_kfree(ice_hw_to_dev(hw), p);
4845	return status;
4846}
4847
4848/**
4849 * ice_adj_prof_priorities - adjust profile based on priorities
4850 * @hw: pointer to the HW struct
4851 * @blk: hardware block
4852 * @vsig: the VSIG for which to adjust profile priorities
4853 * @chg: the change list
4854 */
4855static enum ice_status
4856ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4857			struct list_head *chg)
4858{
4859	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4860	struct ice_vsig_prof *t;
4861	enum ice_status status;
4862	u16 idx;
4863
4864	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4865	idx = vsig & ICE_VSIG_IDX_M;
4866
4867	/* Priority is based on the order in which the profiles are added. The
4868	 * newest added profile has highest priority and the oldest added
4869	 * profile has the lowest priority. Since the profile property list for
4870	 * a VSIG is sorted from newest to oldest, this code traverses the list
4871	 * in order and enables the first of each PTG that it finds (that is not
4872	 * already enabled); it also disables any duplicate PTGs that it finds
4873	 * in the older profiles (that are currently enabled).
4874	 */
4875
4876	list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4877			    list) {
4878		u16 i;
4879
4880		for (i = 0; i < t->tcam_count; i++) {
4881			/* Scan the priorities from newest to oldest.
4882			 * Make sure that the newest profiles take priority.
4883			 */
4884			if (test_bit(t->tcam[i].ptg, ptgs_used) &&
4885			    t->tcam[i].in_use) {
4886				/* need to mark this PTG as never match, as it
4887				 * was already in use and therefore duplicate
4888				 * (and lower priority)
4889				 */
4890				status = ice_prof_tcam_ena_dis(hw, blk, false,
4891							       vsig,
4892							       &t->tcam[i],
4893							       chg);
4894				if (status)
4895					return status;
4896			} else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
4897				   !t->tcam[i].in_use) {
4898				/* need to enable this PTG, as it in not in use
4899				 * and not enabled (highest priority)
4900				 */
4901				status = ice_prof_tcam_ena_dis(hw, blk, true,
4902							       vsig,
4903							       &t->tcam[i],
4904							       chg);
4905				if (status)
4906					return status;
4907			}
4908
4909			/* keep track of used ptgs */
4910			set_bit(t->tcam[i].ptg, ptgs_used);
4911		}
4912	}
4913
4914	return 0;
4915}
4916
4917/**
4918 * ice_add_prof_id_vsig - add profile to VSIG
4919 * @hw: pointer to the HW struct
4920 * @blk: hardware block
4921 * @vsig: the VSIG to which this profile is to be added
4922 * @hdl: the profile handle indicating the profile to add
4923 * @rev: true to add entries to the end of the list
4924 * @chg: the change list
4925 */
4926static enum ice_status
4927ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4928		     bool rev, struct list_head *chg)
4929{
4930	/* Masks that ignore flags */
4931	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4932	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4933	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4934	enum ice_status status = 0;
4935	struct ice_prof_map *map;
4936	struct ice_vsig_prof *t;
4937	struct ice_chs_chg *p;
4938	u16 vsig_idx, i;
 
4939
4940	/* Error, if this VSIG already has this profile */
4941	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
4942		return ICE_ERR_ALREADY_EXISTS;
4943
4944	/* new VSIG profile structure */
4945	t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
4946	if (!t)
4947		return ICE_ERR_NO_MEMORY;
4948
4949	mutex_lock(&hw->blk[blk].es.prof_map_lock);
4950	/* Get the details on the profile specified by the handle ID */
4951	map = ice_search_prof_id(hw, blk, hdl);
4952	if (!map) {
4953		status = ICE_ERR_DOES_NOT_EXIST;
4954		goto err_ice_add_prof_id_vsig;
4955	}
4956
4957	t->profile_cookie = map->profile_cookie;
4958	t->prof_id = map->prof_id;
4959	t->tcam_count = map->ptg_cnt;
4960
4961	/* create TCAM entries */
4962	for (i = 0; i < map->ptg_cnt; i++) {
4963		u16 tcam_idx;
4964
4965		/* add TCAM to change list */
4966		p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4967		if (!p) {
4968			status = ICE_ERR_NO_MEMORY;
4969			goto err_ice_add_prof_id_vsig;
4970		}
4971
4972		/* allocate the TCAM entry index */
4973		/* for entries with empty attribute masks, allocate entry from
4974		 * the bottom of the TCAM table; otherwise, allocate from the
4975		 * top of the table in order to give it higher priority
4976		 */
4977		status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
4978					    &tcam_idx);
4979		if (status) {
4980			devm_kfree(ice_hw_to_dev(hw), p);
4981			goto err_ice_add_prof_id_vsig;
4982		}
4983
4984		t->tcam[i].ptg = map->ptg[i];
4985		t->tcam[i].prof_id = map->prof_id;
4986		t->tcam[i].tcam_idx = tcam_idx;
4987		t->tcam[i].attr = map->attr[i];
4988		t->tcam[i].in_use = true;
4989
4990		p->type = ICE_TCAM_ADD;
4991		p->add_tcam_idx = true;
4992		p->prof_id = t->tcam[i].prof_id;
4993		p->ptg = t->tcam[i].ptg;
4994		p->vsig = vsig;
4995		p->tcam_idx = t->tcam[i].tcam_idx;
4996
4997		/* write the TCAM entry */
4998		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
4999					      t->tcam[i].prof_id,
5000					      t->tcam[i].ptg, vsig, 0, 0,
5001					      vl_msk, dc_msk, nm_msk);
5002		if (status) {
5003			devm_kfree(ice_hw_to_dev(hw), p);
5004			goto err_ice_add_prof_id_vsig;
5005		}
5006
5007		/* log change */
5008		list_add(&p->list_entry, chg);
5009	}
5010
5011	/* add profile to VSIG */
5012	vsig_idx = vsig & ICE_VSIG_IDX_M;
5013	if (rev)
5014		list_add_tail(&t->list,
5015			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5016	else
5017		list_add(&t->list,
5018			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5019
5020	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5021	return status;
5022
5023err_ice_add_prof_id_vsig:
5024	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5025	/* let caller clean up the change list */
5026	devm_kfree(ice_hw_to_dev(hw), t);
5027	return status;
5028}
5029
5030/**
5031 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5032 * @hw: pointer to the HW struct
5033 * @blk: hardware block
5034 * @vsi: the initial VSI that will be in VSIG
5035 * @hdl: the profile handle of the profile that will be added to the VSIG
5036 * @chg: the change list
5037 */
5038static enum ice_status
5039ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5040			struct list_head *chg)
5041{
5042	enum ice_status status;
5043	struct ice_chs_chg *p;
5044	u16 new_vsig;
 
5045
5046	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5047	if (!p)
5048		return ICE_ERR_NO_MEMORY;
5049
5050	new_vsig = ice_vsig_alloc(hw, blk);
5051	if (!new_vsig) {
5052		status = ICE_ERR_HW_TABLE;
5053		goto err_ice_create_prof_id_vsig;
5054	}
5055
5056	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5057	if (status)
5058		goto err_ice_create_prof_id_vsig;
5059
5060	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5061	if (status)
5062		goto err_ice_create_prof_id_vsig;
5063
5064	p->type = ICE_VSIG_ADD;
5065	p->vsi = vsi;
5066	p->orig_vsig = ICE_DEFAULT_VSIG;
5067	p->vsig = new_vsig;
5068
5069	list_add(&p->list_entry, chg);
5070
5071	return 0;
5072
5073err_ice_create_prof_id_vsig:
5074	/* let caller clean up the change list */
5075	devm_kfree(ice_hw_to_dev(hw), p);
5076	return status;
5077}
5078
5079/**
5080 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5081 * @hw: pointer to the HW struct
5082 * @blk: hardware block
5083 * @vsi: the initial VSI that will be in VSIG
5084 * @lst: the list of profile that will be added to the VSIG
5085 * @new_vsig: return of new VSIG
5086 * @chg: the change list
5087 */
5088static enum ice_status
5089ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5090			 struct list_head *lst, u16 *new_vsig,
5091			 struct list_head *chg)
5092{
5093	struct ice_vsig_prof *t;
5094	enum ice_status status;
5095	u16 vsig;
5096
5097	vsig = ice_vsig_alloc(hw, blk);
5098	if (!vsig)
5099		return ICE_ERR_HW_TABLE;
5100
5101	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5102	if (status)
5103		return status;
5104
5105	list_for_each_entry(t, lst, list) {
5106		/* Reverse the order here since we are copying the list */
5107		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5108					      true, chg);
5109		if (status)
5110			return status;
5111	}
5112
5113	*new_vsig = vsig;
5114
5115	return 0;
5116}
5117
5118/**
5119 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5120 * @hw: pointer to the HW struct
5121 * @blk: hardware block
5122 * @hdl: the profile handle of the profile to search for
5123 * @vsig: returns the VSIG with the matching profile
5124 */
5125static bool
5126ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5127{
5128	struct ice_vsig_prof *t;
5129	enum ice_status status;
5130	struct list_head lst;
 
5131
5132	INIT_LIST_HEAD(&lst);
5133
5134	t = kzalloc(sizeof(*t), GFP_KERNEL);
5135	if (!t)
5136		return false;
5137
5138	t->profile_cookie = hdl;
5139	list_add(&t->list, &lst);
5140
5141	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5142
5143	list_del(&t->list);
5144	kfree(t);
5145
5146	return !status;
5147}
5148
5149/**
5150 * ice_add_prof_id_flow - add profile flow
5151 * @hw: pointer to the HW struct
5152 * @blk: hardware block
5153 * @vsi: the VSI to enable with the profile specified by ID
5154 * @hdl: profile handle
5155 *
5156 * Calling this function will update the hardware tables to enable the
5157 * profile indicated by the ID parameter for the VSIs specified in the VSI
5158 * array. Once successfully called, the flow will be enabled.
5159 */
5160enum ice_status
5161ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5162{
5163	struct ice_vsig_prof *tmp1, *del1;
5164	struct ice_chs_chg *tmp, *del;
5165	struct list_head union_lst;
5166	enum ice_status status;
5167	struct list_head chg;
 
5168	u16 vsig;
5169
5170	INIT_LIST_HEAD(&union_lst);
5171	INIT_LIST_HEAD(&chg);
5172
5173	/* Get profile */
5174	status = ice_get_prof(hw, blk, hdl, &chg);
5175	if (status)
5176		return status;
5177
5178	/* determine if VSI is already part of a VSIG */
5179	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5180	if (!status && vsig) {
5181		bool only_vsi;
5182		u16 or_vsig;
5183		u16 ref;
5184
5185		/* found in VSIG */
5186		or_vsig = vsig;
5187
5188		/* make sure that there is no overlap/conflict between the new
5189		 * characteristics and the existing ones; we don't support that
5190		 * scenario
5191		 */
5192		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5193			status = ICE_ERR_ALREADY_EXISTS;
5194			goto err_ice_add_prof_id_flow;
5195		}
5196
5197		/* last VSI in the VSIG? */
5198		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5199		if (status)
5200			goto err_ice_add_prof_id_flow;
5201		only_vsi = (ref == 1);
5202
5203		/* create a union of the current profiles and the one being
5204		 * added
5205		 */
5206		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5207		if (status)
5208			goto err_ice_add_prof_id_flow;
5209
5210		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5211		if (status)
5212			goto err_ice_add_prof_id_flow;
5213
5214		/* search for an existing VSIG with an exact charc match */
5215		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5216		if (!status) {
5217			/* move VSI to the VSIG that matches */
5218			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5219			if (status)
5220				goto err_ice_add_prof_id_flow;
5221
5222			/* VSI has been moved out of or_vsig. If the or_vsig had
5223			 * only that VSI it is now empty and can be removed.
5224			 */
5225			if (only_vsi) {
5226				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5227				if (status)
5228					goto err_ice_add_prof_id_flow;
5229			}
5230		} else if (only_vsi) {
5231			/* If the original VSIG only contains one VSI, then it
5232			 * will be the requesting VSI. In this case the VSI is
5233			 * not sharing entries and we can simply add the new
5234			 * profile to the VSIG.
5235			 */
5236			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5237						      &chg);
5238			if (status)
5239				goto err_ice_add_prof_id_flow;
5240
5241			/* Adjust priorities */
5242			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5243			if (status)
5244				goto err_ice_add_prof_id_flow;
5245		} else {
5246			/* No match, so we need a new VSIG */
5247			status = ice_create_vsig_from_lst(hw, blk, vsi,
5248							  &union_lst, &vsig,
5249							  &chg);
5250			if (status)
5251				goto err_ice_add_prof_id_flow;
5252
5253			/* Adjust priorities */
5254			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5255			if (status)
5256				goto err_ice_add_prof_id_flow;
5257		}
5258	} else {
5259		/* need to find or add a VSIG */
5260		/* search for an existing VSIG with an exact charc match */
5261		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5262			/* found an exact match */
5263			/* add or move VSI to the VSIG that matches */
5264			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5265			if (status)
5266				goto err_ice_add_prof_id_flow;
5267		} else {
5268			/* we did not find an exact match */
5269			/* we need to add a VSIG */
5270			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5271							 &chg);
5272			if (status)
5273				goto err_ice_add_prof_id_flow;
5274		}
5275	}
5276
5277	/* update hardware */
5278	if (!status)
5279		status = ice_upd_prof_hw(hw, blk, &chg);
5280
5281err_ice_add_prof_id_flow:
5282	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5283		list_del(&del->list_entry);
5284		devm_kfree(ice_hw_to_dev(hw), del);
5285	}
5286
5287	list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
5288		list_del(&del1->list);
5289		devm_kfree(ice_hw_to_dev(hw), del1);
5290	}
5291
5292	return status;
5293}
5294
5295/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5296 * ice_rem_prof_from_list - remove a profile from list
5297 * @hw: pointer to the HW struct
5298 * @lst: list to remove the profile from
5299 * @hdl: the profile handle indicating the profile to remove
5300 */
5301static enum ice_status
5302ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
5303{
5304	struct ice_vsig_prof *ent, *tmp;
5305
5306	list_for_each_entry_safe(ent, tmp, lst, list)
5307		if (ent->profile_cookie == hdl) {
5308			list_del(&ent->list);
5309			devm_kfree(ice_hw_to_dev(hw), ent);
5310			return 0;
5311		}
5312
5313	return ICE_ERR_DOES_NOT_EXIST;
5314}
5315
5316/**
5317 * ice_rem_prof_id_flow - remove flow
5318 * @hw: pointer to the HW struct
5319 * @blk: hardware block
5320 * @vsi: the VSI from which to remove the profile specified by ID
5321 * @hdl: profile tracking handle
5322 *
5323 * Calling this function will update the hardware tables to remove the
5324 * profile indicated by the ID parameter for the VSIs specified in the VSI
5325 * array. Once successfully called, the flow will be disabled.
5326 */
5327enum ice_status
5328ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5329{
5330	struct ice_vsig_prof *tmp1, *del1;
5331	struct ice_chs_chg *tmp, *del;
5332	struct list_head chg, copy;
5333	enum ice_status status;
5334	u16 vsig;
5335
5336	INIT_LIST_HEAD(&copy);
5337	INIT_LIST_HEAD(&chg);
5338
5339	/* determine if VSI is already part of a VSIG */
5340	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5341	if (!status && vsig) {
5342		bool last_profile;
5343		bool only_vsi;
5344		u16 ref;
5345
5346		/* found in VSIG */
5347		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5348		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5349		if (status)
5350			goto err_ice_rem_prof_id_flow;
5351		only_vsi = (ref == 1);
5352
5353		if (only_vsi) {
5354			/* If the original VSIG only contains one reference,
5355			 * which will be the requesting VSI, then the VSI is not
5356			 * sharing entries and we can simply remove the specific
5357			 * characteristics from the VSIG.
5358			 */
5359
5360			if (last_profile) {
5361				/* If there are no profiles left for this VSIG,
5362				 * then simply remove the VSIG.
5363				 */
5364				status = ice_rem_vsig(hw, blk, vsig, &chg);
5365				if (status)
5366					goto err_ice_rem_prof_id_flow;
5367			} else {
5368				status = ice_rem_prof_id_vsig(hw, blk, vsig,
5369							      hdl, &chg);
5370				if (status)
5371					goto err_ice_rem_prof_id_flow;
5372
5373				/* Adjust priorities */
5374				status = ice_adj_prof_priorities(hw, blk, vsig,
5375								 &chg);
5376				if (status)
5377					goto err_ice_rem_prof_id_flow;
5378			}
5379
5380		} else {
5381			/* Make a copy of the VSIG's list of Profiles */
5382			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
5383			if (status)
5384				goto err_ice_rem_prof_id_flow;
5385
5386			/* Remove specified profile entry from the list */
5387			status = ice_rem_prof_from_list(hw, &copy, hdl);
5388			if (status)
5389				goto err_ice_rem_prof_id_flow;
5390
5391			if (list_empty(&copy)) {
5392				status = ice_move_vsi(hw, blk, vsi,
5393						      ICE_DEFAULT_VSIG, &chg);
5394				if (status)
5395					goto err_ice_rem_prof_id_flow;
5396
5397			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
5398							    &vsig)) {
5399				/* found an exact match */
5400				/* add or move VSI to the VSIG that matches */
5401				/* Search for a VSIG with a matching profile
5402				 * list
5403				 */
5404
5405				/* Found match, move VSI to the matching VSIG */
5406				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5407				if (status)
5408					goto err_ice_rem_prof_id_flow;
5409			} else {
5410				/* since no existing VSIG supports this
5411				 * characteristic pattern, we need to create a
5412				 * new VSIG and TCAM entries
5413				 */
5414				status = ice_create_vsig_from_lst(hw, blk, vsi,
5415								  &copy, &vsig,
5416								  &chg);
5417				if (status)
5418					goto err_ice_rem_prof_id_flow;
5419
5420				/* Adjust priorities */
5421				status = ice_adj_prof_priorities(hw, blk, vsig,
5422								 &chg);
5423				if (status)
5424					goto err_ice_rem_prof_id_flow;
5425			}
5426		}
5427	} else {
5428		status = ICE_ERR_DOES_NOT_EXIST;
5429	}
5430
5431	/* update hardware tables */
5432	if (!status)
5433		status = ice_upd_prof_hw(hw, blk, &chg);
5434
5435err_ice_rem_prof_id_flow:
5436	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5437		list_del(&del->list_entry);
5438		devm_kfree(ice_hw_to_dev(hw), del);
5439	}
5440
5441	list_for_each_entry_safe(del1, tmp1, &copy, list) {
5442		list_del(&del1->list);
5443		devm_kfree(ice_hw_to_dev(hw), del1);
5444	}
5445
5446	return status;
5447}