Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_switch.h"
   5
   6#define ICE_ETH_DA_OFFSET		0
   7#define ICE_ETH_ETHTYPE_OFFSET		12
   8#define ICE_ETH_VLAN_TCI_OFFSET		14
   9#define ICE_MAX_VLAN_ID			0xFFF
  10
  11/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  12 * struct to configure any switch filter rules.
  13 * {DA (6 bytes), SA(6 bytes),
  14 * Ether type (2 bytes for header without VLAN tag) OR
  15 * VLAN tag (4 bytes for header with VLAN tag) }
  16 *
  17 * Word on Hardcoded values
  18 * byte 0 = 0x2: to identify it as locally administered DA MAC
  19 * byte 6 = 0x2: to identify it as locally administered SA MAC
  20 * byte 12 = 0x81 & byte 13 = 0x00:
  21 *	In case of VLAN filter first two bytes defines ether type (0x8100)
  22 *	and remaining two bytes are placeholder for programming a given VLAN id
  23 *	In case of Ether type filter it is treated as header without VLAN tag
  24 *	and byte 12 and 13 is used to program a given Ether type instead
  25 */
  26#define DUMMY_ETH_HDR_LEN		16
  27static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  28							0x2, 0, 0, 0, 0, 0,
  29							0x81, 0, 0, 0};
  30
  31#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  32	(sizeof(struct ice_aqc_sw_rules_elem) - \
  33	 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34	 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  35#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  36	(sizeof(struct ice_aqc_sw_rules_elem) - \
  37	 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38	 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  39#define ICE_SW_RULE_LG_ACT_SIZE(n) \
  40	(sizeof(struct ice_aqc_sw_rules_elem) - \
  41	 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  42	 sizeof(struct ice_sw_rule_lg_act) - \
  43	 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  44	 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  45#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  46	(sizeof(struct ice_aqc_sw_rules_elem) - \
  47	 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  48	 sizeof(struct ice_sw_rule_vsi_list) - \
  49	 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  50	 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  51
  52/**
  53 * ice_aq_alloc_free_res - command to allocate/free resources
  54 * @hw: pointer to the hw struct
  55 * @num_entries: number of resource entries in buffer
  56 * @buf: Indirect buffer to hold data parameters and response
  57 * @buf_size: size of buffer for indirect commands
  58 * @opc: pass in the command opcode
  59 * @cd: pointer to command details structure or NULL
  60 *
  61 * Helper function to allocate/free resources using the admin queue commands
 
  62 */
  63static enum ice_status
  64ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  65		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  66		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  67{
  68	struct ice_aqc_alloc_free_res_cmd *cmd;
  69	struct ice_aq_desc desc;
  70
  71	cmd = &desc.params.sw_res_ctrl;
  72
  73	if (!buf)
  74		return ICE_ERR_PARAM;
  75
  76	if (buf_size < (num_entries * sizeof(buf->elem[0])))
  77		return ICE_ERR_PARAM;
  78
  79	ice_fill_dflt_direct_cmd_desc(&desc, opc);
  80
  81	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 
 
 
 
 
  82
  83	cmd->num_entries = cpu_to_le16(num_entries);
  84
  85	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  86}
  87
  88/**
  89 * ice_aq_get_sw_cfg - get switch configuration
  90 * @hw: pointer to the hardware structure
  91 * @buf: pointer to the result buffer
  92 * @buf_size: length of the buffer available for response
  93 * @req_desc: pointer to requested descriptor
  94 * @num_elems: pointer to number of elements
  95 * @cd: pointer to command details structure or NULL
  96 *
  97 * Get switch configuration (0x0200) to be placed in 'buff'.
  98 * This admin command returns information such as initial VSI/port number
  99 * and switch ID it belongs to.
 100 *
 101 * NOTE: *req_desc is both an input/output parameter.
 102 * The caller of this function first calls this function with *request_desc set
 103 * to 0.  If the response from f/w has *req_desc set to 0, all the switch
 104 * configuration information has been returned; if non-zero (meaning not all
 105 * the information was returned), the caller should call this function again
 106 * with *req_desc set to the previous value returned by f/w to get the
 107 * next block of switch configuration information.
 108 *
 109 * *num_elems is output only parameter. This reflects the number of elements
 110 * in response buffer. The caller of this function to use *num_elems while
 111 * parsing the response buffer.
 112 */
 113static enum ice_status
 114ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
 115		  u16 buf_size, u16 *req_desc, u16 *num_elems,
 116		  struct ice_sq_cd *cd)
 117{
 118	struct ice_aqc_get_sw_cfg *cmd;
 119	enum ice_status status;
 120	struct ice_aq_desc desc;
 
 121
 122	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
 123	cmd = &desc.params.get_sw_conf;
 124	cmd->element = cpu_to_le16(*req_desc);
 125
 126	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 127	if (!status) {
 128		*req_desc = le16_to_cpu(cmd->element);
 129		*num_elems = le16_to_cpu(cmd->num_elems);
 130	}
 131
 132	return status;
 133}
 134
 135/**
 136 * ice_aq_add_vsi
 137 * @hw: pointer to the hw struct
 138 * @vsi_ctx: pointer to a VSI context struct
 139 * @cd: pointer to command details structure or NULL
 140 *
 141 * Add a VSI context to the hardware (0x0210)
 142 */
 143enum ice_status
 144ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
 145	       struct ice_sq_cd *cd)
 146{
 147	struct ice_aqc_add_update_free_vsi_resp *res;
 148	struct ice_aqc_add_get_update_free_vsi *cmd;
 149	enum ice_status status;
 150	struct ice_aq_desc desc;
 
 151
 152	cmd = &desc.params.vsi_cmd;
 153	res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
 154
 155	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
 156
 157	if (!vsi_ctx->alloc_from_pool)
 158		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
 159					   ICE_AQ_VSI_IS_VALID);
 
 160
 161	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
 162
 163	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 164
 165	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
 166				 sizeof(vsi_ctx->info), cd);
 167
 168	if (!status) {
 169		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
 170		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
 171		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
 172	}
 173
 174	return status;
 175}
 176
 177/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178 * ice_aq_update_vsi
 179 * @hw: pointer to the hw struct
 180 * @vsi_ctx: pointer to a VSI context struct
 181 * @cd: pointer to command details structure or NULL
 182 *
 183 * Update VSI context in the hardware (0x0211)
 184 */
 185enum ice_status
 186ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
 187		  struct ice_sq_cd *cd)
 188{
 189	struct ice_aqc_add_update_free_vsi_resp *resp;
 190	struct ice_aqc_add_get_update_free_vsi *cmd;
 191	struct ice_aq_desc desc;
 192	enum ice_status status;
 193
 194	cmd = &desc.params.vsi_cmd;
 195	resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
 196
 197	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
 198
 199	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
 200
 201	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 202
 203	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
 204				 sizeof(vsi_ctx->info), cd);
 205
 206	if (!status) {
 207		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
 208		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
 209	}
 210
 211	return status;
 212}
 213
 214/**
 215 * ice_aq_free_vsi
 216 * @hw: pointer to the hw struct
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217 * @vsi_ctx: pointer to a VSI context struct
 218 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
 219 * @cd: pointer to command details structure or NULL
 220 *
 221 * Get VSI context info from hardware (0x0213)
 
 
 222 */
 223enum ice_status
 224ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
 225		bool keep_vsi_alloc, struct ice_sq_cd *cd)
 226{
 227	struct ice_aqc_add_update_free_vsi_resp *resp;
 228	struct ice_aqc_add_get_update_free_vsi *cmd;
 229	struct ice_aq_desc desc;
 230	enum ice_status status;
 231
 232	cmd = &desc.params.vsi_cmd;
 233	resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
 234
 235	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 236
 237	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
 238	if (keep_vsi_alloc)
 239		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
 240
 241	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 242	if (!status) {
 243		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
 244		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
 245	}
 
 
 
 
 
 
 
 
 
 
 246
 
 
 
 
 
 
 247	return status;
 248}
 249
 250/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251 * ice_aq_alloc_free_vsi_list
 252 * @hw: pointer to the hw struct
 253 * @vsi_list_id: VSI list id returned or used for lookup
 254 * @lkup_type: switch rule filter lookup type
 255 * @opc: switch rules population command type - pass in the command opcode
 256 *
 257 * allocates or free a VSI list resource
 258 */
 259static enum ice_status
 260ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
 261			   enum ice_sw_lkup_type lkup_type,
 262			   enum ice_adminq_opc opc)
 263{
 264	struct ice_aqc_alloc_free_res_elem *sw_buf;
 265	struct ice_aqc_res_elem *vsi_ele;
 266	enum ice_status status;
 267	u16 buf_len;
 268
 269	buf_len = sizeof(*sw_buf);
 270	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
 271	if (!sw_buf)
 272		return ICE_ERR_NO_MEMORY;
 273	sw_buf->num_elems = cpu_to_le16(1);
 274
 275	if (lkup_type == ICE_SW_LKUP_MAC ||
 276	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
 277	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
 278	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
 279	    lkup_type == ICE_SW_LKUP_PROMISC ||
 280	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
 281		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
 282	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
 283		sw_buf->res_type =
 284			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
 285	} else {
 286		status = ICE_ERR_PARAM;
 287		goto ice_aq_alloc_free_vsi_list_exit;
 288	}
 289
 290	if (opc == ice_aqc_opc_free_res)
 291		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
 292
 293	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
 294	if (status)
 295		goto ice_aq_alloc_free_vsi_list_exit;
 296
 297	if (opc == ice_aqc_opc_alloc_res) {
 298		vsi_ele = &sw_buf->elem[0];
 299		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
 300	}
 301
 302ice_aq_alloc_free_vsi_list_exit:
 303	devm_kfree(ice_hw_to_dev(hw), sw_buf);
 304	return status;
 305}
 306
 307/**
 308 * ice_aq_sw_rules - add/update/remove switch rules
 309 * @hw: pointer to the hw struct
 310 * @rule_list: pointer to switch rule population list
 311 * @rule_list_sz: total size of the rule list in bytes
 312 * @num_rules: number of switch rules in the rule_list
 313 * @opc: switch rules population command type - pass in the command opcode
 314 * @cd: pointer to command details structure or NULL
 315 *
 316 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
 317 */
 318static enum ice_status
 319ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
 320		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
 321{
 322	struct ice_aq_desc desc;
 
 323
 324	if (opc != ice_aqc_opc_add_sw_rules &&
 325	    opc != ice_aqc_opc_update_sw_rules &&
 326	    opc != ice_aqc_opc_remove_sw_rules)
 327		return ICE_ERR_PARAM;
 328
 329	ice_fill_dflt_direct_cmd_desc(&desc, opc);
 330
 331	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 332	desc.params.sw_rules.num_rules_fltr_entry_index =
 333		cpu_to_le16(num_rules);
 334	return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
 
 
 
 
 
 335}
 336
 337/* ice_init_port_info - Initialize port_info with switch configuration data
 338 * @pi: pointer to port_info
 339 * @vsi_port_num: VSI number or port number
 340 * @type: Type of switch element (port or VSI)
 341 * @swid: switch ID of the switch the element is attached to
 342 * @pf_vf_num: PF or VF number
 343 * @is_vf: true if the element is a VF, false otherwise
 344 */
 345static void
 346ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
 347		   u16 swid, u16 pf_vf_num, bool is_vf)
 348{
 349	switch (type) {
 350	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
 351		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
 352		pi->sw_id = swid;
 353		pi->pf_vf_num = pf_vf_num;
 354		pi->is_vf = is_vf;
 355		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
 356		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
 357		break;
 358	default:
 359		ice_debug(pi->hw, ICE_DBG_SW,
 360			  "incorrect VSI/port type received\n");
 361		break;
 362	}
 363}
 364
 365/* ice_get_initial_sw_cfg - Get initial port and default VSI data
 366 * @hw: pointer to the hardware structure
 367 */
 368enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
 369{
 370	struct ice_aqc_get_sw_cfg_resp *rbuf;
 371	enum ice_status status;
 372	u16 req_desc = 0;
 373	u16 num_elems;
 374	u16 i;
 375
 376	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
 377			    GFP_KERNEL);
 378
 379	if (!rbuf)
 380		return ICE_ERR_NO_MEMORY;
 381
 382	/* Multiple calls to ice_aq_get_sw_cfg may be required
 383	 * to get all the switch configuration information. The need
 384	 * for additional calls is indicated by ice_aq_get_sw_cfg
 385	 * writing a non-zero value in req_desc
 386	 */
 387	do {
 
 
 388		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
 389					   &req_desc, &num_elems, NULL);
 390
 391		if (status)
 392			break;
 393
 394		for (i = 0; i < num_elems; i++) {
 395			struct ice_aqc_get_sw_cfg_resp_elem *ele;
 396			u16 pf_vf_num, swid, vsi_port_num;
 397			bool is_vf = false;
 398			u8 type;
 399
 400			ele = rbuf[i].elements;
 401			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
 402				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
 403
 404			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
 405				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
 406
 407			swid = le16_to_cpu(ele->swid);
 408
 409			if (le16_to_cpu(ele->pf_vf_num) &
 410			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
 411				is_vf = true;
 412
 413			type = le16_to_cpu(ele->vsi_port_num) >>
 414				ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
 415
 416			if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
 417				/* FW VSI is not needed. Just continue. */
 418				continue;
 419			}
 420
 421			ice_init_port_info(hw->port_info, vsi_port_num,
 422					   type, swid, pf_vf_num, is_vf);
 423		}
 424	} while (req_desc && !status);
 425
 426	devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
 427	return status;
 428}
 429
 430/**
 431 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
 432 * @hw: pointer to the hardware structure
 433 * @f_info: filter info structure to fill/update
 434 *
 435 * This helper function populates the lb_en and lan_en elements of the provided
 436 * ice_fltr_info struct using the switch's type and characteristics of the
 437 * switch rule being configured.
 438 */
 439static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
 440{
 441	f_info->lb_en = false;
 442	f_info->lan_en = false;
 443	if ((f_info->flag & ICE_FLTR_TX) &&
 444	    (f_info->fltr_act == ICE_FWD_TO_VSI ||
 445	     f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
 446	     f_info->fltr_act == ICE_FWD_TO_Q ||
 447	     f_info->fltr_act == ICE_FWD_TO_QGRP)) {
 448		f_info->lb_en = true;
 449		if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
 450		      is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
 451			f_info->lan_en = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452	}
 453}
 454
 455/**
 456 * ice_fill_sw_rule - Helper function to fill switch rule structure
 457 * @hw: pointer to the hardware structure
 458 * @f_info: entry containing packet forwarding information
 459 * @s_rule: switch rule structure to be filled in based on mac_entry
 460 * @opc: switch rules population command type - pass in the command opcode
 461 */
 462static void
 463ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
 464		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
 465{
 466	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
 467	u8 eth_hdr[DUMMY_ETH_HDR_LEN];
 468	void *daddr = NULL;
 
 
 469	u32 act = 0;
 470	__be16 *off;
 
 471
 472	if (opc == ice_aqc_opc_remove_sw_rules) {
 473		s_rule->pdata.lkup_tx_rx.act = 0;
 474		s_rule->pdata.lkup_tx_rx.index =
 475			cpu_to_le16(f_info->fltr_rule_id);
 476		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
 477		return;
 478	}
 479
 
 
 
 480	/* initialize the ether header with a dummy header */
 481	memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header));
 482	ice_fill_sw_info(hw, f_info);
 483
 484	switch (f_info->fltr_act) {
 485	case ICE_FWD_TO_VSI:
 486		act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
 487			ICE_SINGLE_ACT_VSI_ID_M;
 488		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
 489			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
 490				ICE_SINGLE_ACT_VALID_BIT;
 491		break;
 492	case ICE_FWD_TO_VSI_LIST:
 493		act |= ICE_SINGLE_ACT_VSI_LIST;
 494		act |= (f_info->fwd_id.vsi_list_id <<
 495			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
 496			ICE_SINGLE_ACT_VSI_LIST_ID_M;
 497		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
 498			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
 499				ICE_SINGLE_ACT_VALID_BIT;
 500		break;
 501	case ICE_FWD_TO_Q:
 502		act |= ICE_SINGLE_ACT_TO_Q;
 503		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
 504			ICE_SINGLE_ACT_Q_INDEX_M;
 505		break;
 
 
 
 
 506	case ICE_FWD_TO_QGRP:
 
 
 507		act |= ICE_SINGLE_ACT_TO_Q;
 508		act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
 
 
 509			ICE_SINGLE_ACT_Q_REGION_M;
 510		break;
 511	case ICE_DROP_PACKET:
 512		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
 513		break;
 514	default:
 515		return;
 516	}
 517
 518	if (f_info->lb_en)
 519		act |= ICE_SINGLE_ACT_LB_ENABLE;
 520	if (f_info->lan_en)
 521		act |= ICE_SINGLE_ACT_LAN_ENABLE;
 522
 523	switch (f_info->lkup_type) {
 524	case ICE_SW_LKUP_MAC:
 525		daddr = f_info->l_data.mac.mac_addr;
 526		break;
 527	case ICE_SW_LKUP_VLAN:
 528		vlan_id = f_info->l_data.vlan.vlan_id;
 529		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
 530		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
 531			act |= ICE_SINGLE_ACT_PRUNE;
 532			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
 533		}
 534		break;
 535	case ICE_SW_LKUP_ETHERTYPE_MAC:
 536		daddr = f_info->l_data.ethertype_mac.mac_addr;
 537		/* fall-through */
 538	case ICE_SW_LKUP_ETHERTYPE:
 539		off = (__be16 *)&eth_hdr[ICE_ETH_ETHTYPE_OFFSET];
 540		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
 541		break;
 542	case ICE_SW_LKUP_MAC_VLAN:
 543		daddr = f_info->l_data.mac_vlan.mac_addr;
 544		vlan_id = f_info->l_data.mac_vlan.vlan_id;
 545		break;
 546	case ICE_SW_LKUP_PROMISC_VLAN:
 547		vlan_id = f_info->l_data.mac_vlan.vlan_id;
 548		/* fall-through */
 549	case ICE_SW_LKUP_PROMISC:
 550		daddr = f_info->l_data.mac_vlan.mac_addr;
 551		break;
 552	default:
 553		break;
 554	}
 555
 556	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
 557		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
 558		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
 559
 560	/* Recipe set depending on lookup type */
 561	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
 562	s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
 563	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
 564
 565	if (daddr)
 566		ether_addr_copy(&eth_hdr[ICE_ETH_DA_OFFSET], daddr);
 567
 568	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
 569		off = (__be16 *)&eth_hdr[ICE_ETH_VLAN_TCI_OFFSET];
 570		*off = cpu_to_be16(vlan_id);
 571	}
 572
 573	/* Create the switch rule with the final dummy Ethernet header */
 574	if (opc != ice_aqc_opc_update_sw_rules)
 575		s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr));
 576
 577	memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr));
 578}
 579
 580/**
 581 * ice_add_marker_act
 582 * @hw: pointer to the hardware structure
 583 * @m_ent: the management entry for which sw marker needs to be added
 584 * @sw_marker: sw marker to tag the Rx descriptor with
 585 * @l_id: large action resource id
 586 *
 587 * Create a large action to hold software marker and update the switch rule
 588 * entry pointed by m_ent with newly created large action
 589 */
 590static enum ice_status
 591ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
 592		   u16 sw_marker, u16 l_id)
 593{
 594	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
 595	/* For software marker we need 3 large actions
 596	 * 1. FWD action: FWD TO VSI or VSI LIST
 597	 * 2. GENERIC VALUE action to hold the profile id
 598	 * 3. GENERIC VALUE action to hold the software marker id
 599	 */
 600	const u16 num_lg_acts = 3;
 601	enum ice_status status;
 602	u16 lg_act_size;
 603	u16 rules_size;
 604	u16 vsi_info;
 605	u32 act;
 
 606
 607	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
 608		return ICE_ERR_PARAM;
 609
 610	/* Create two back-to-back switch rules and submit them to the HW using
 611	 * one memory buffer:
 612	 *    1. Large Action
 613	 *    2. Look up tx rx
 614	 */
 615	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
 616	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
 617	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
 618	if (!lg_act)
 619		return ICE_ERR_NO_MEMORY;
 620
 621	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
 622
 623	/* Fill in the first switch rule i.e. large action */
 624	lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
 625	lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
 626	lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
 627
 628	/* First action VSI forwarding or VSI list forwarding depending on how
 629	 * many VSIs
 630	 */
 631	vsi_info = (m_ent->vsi_count > 1) ?
 632		m_ent->fltr_info.fwd_id.vsi_list_id :
 633		m_ent->fltr_info.fwd_id.vsi_id;
 634
 635	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
 636	act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
 637		ICE_LG_ACT_VSI_LIST_ID_M;
 638	if (m_ent->vsi_count > 1)
 639		act |= ICE_LG_ACT_VSI_LIST;
 640	lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
 641
 642	/* Second action descriptor type */
 643	act = ICE_LG_ACT_GENERIC;
 644
 645	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
 646	lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
 647
 648	act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
 
 649
 650	/* Third action Marker value */
 651	act |= ICE_LG_ACT_GENERIC;
 652	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
 653		ICE_LG_ACT_GENERIC_VALUE_M;
 654
 655	act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
 656	lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
 657
 658	/* call the fill switch rule to fill the lookup tx rx structure */
 659	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
 660			 ice_aqc_opc_update_sw_rules);
 661
 662	/* Update the action to point to the large action id */
 663	rx_tx->pdata.lkup_tx_rx.act =
 664		cpu_to_le32(ICE_SINGLE_ACT_PTR |
 665			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
 666			     ICE_SINGLE_ACT_PTR_VAL_M));
 667
 668	/* Use the filter rule id of the previously created rule with single
 669	 * act. Once the update happens, hardware will treat this as large
 670	 * action
 671	 */
 672	rx_tx->pdata.lkup_tx_rx.index =
 673		cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
 674
 675	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
 676				 ice_aqc_opc_update_sw_rules, NULL);
 677	if (!status) {
 678		m_ent->lg_act_idx = l_id;
 679		m_ent->sw_marker_id = sw_marker;
 680	}
 681
 682	devm_kfree(ice_hw_to_dev(hw), lg_act);
 683	return status;
 684}
 685
 686/**
 687 * ice_create_vsi_list_map
 688 * @hw: pointer to the hardware structure
 689 * @vsi_array: array of VSIs to form a VSI list
 690 * @num_vsi: num VSI in the array
 691 * @vsi_list_id: VSI list id generated as part of allocate resource
 692 *
 693 * Helper function to create a new entry of VSI list id to VSI mapping
 694 * using the given VSI list id
 695 */
 696static struct ice_vsi_list_map_info *
 697ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
 698			u16 vsi_list_id)
 699{
 700	struct ice_switch_info *sw = hw->switch_info;
 701	struct ice_vsi_list_map_info *v_map;
 702	int i;
 703
 704	v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
 705	if (!v_map)
 706		return NULL;
 707
 708	v_map->vsi_list_id = vsi_list_id;
 709
 710	for (i = 0; i < num_vsi; i++)
 711		set_bit(vsi_array[i], v_map->vsi_map);
 712
 713	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
 714	return v_map;
 715}
 716
 717/**
 718 * ice_update_vsi_list_rule
 719 * @hw: pointer to the hardware structure
 720 * @vsi_array: array of VSIs to form a VSI list
 721 * @num_vsi: num VSI in the array
 722 * @vsi_list_id: VSI list id generated as part of allocate resource
 723 * @remove: Boolean value to indicate if this is a remove action
 724 * @opc: switch rules population command type - pass in the command opcode
 725 * @lkup_type: lookup type of the filter
 726 *
 727 * Call AQ command to add a new switch rule or update existing switch rule
 728 * using the given VSI list id
 729 */
 730static enum ice_status
 731ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
 732			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
 733			 enum ice_sw_lkup_type lkup_type)
 734{
 735	struct ice_aqc_sw_rules_elem *s_rule;
 736	enum ice_status status;
 737	u16 s_rule_size;
 738	u16 type;
 739	int i;
 740
 741	if (!num_vsi)
 742		return ICE_ERR_PARAM;
 743
 744	if (lkup_type == ICE_SW_LKUP_MAC ||
 745	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
 746	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
 747	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
 748	    lkup_type == ICE_SW_LKUP_PROMISC ||
 749	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
 750		type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
 751				ICE_AQC_SW_RULES_T_VSI_LIST_SET;
 752	else if (lkup_type == ICE_SW_LKUP_VLAN)
 753		type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
 754				ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
 755	else
 756		return ICE_ERR_PARAM;
 757
 758	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
 759	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
 760	if (!s_rule)
 761		return ICE_ERR_NO_MEMORY;
 
 
 
 
 
 
 
 
 
 762
 763	for (i = 0; i < num_vsi; i++)
 764		s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
 765
 766	s_rule->type = cpu_to_le16(type);
 767	s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
 768	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
 769
 770	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
 771
 
 772	devm_kfree(ice_hw_to_dev(hw), s_rule);
 773	return status;
 774}
 775
 776/**
 777 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
 778 * @hw: pointer to the hw struct
 779 * @vsi_array: array of VSIs to form a VSI list
 780 * @num_vsi: number of VSIs in the array
 781 * @vsi_list_id: stores the ID of the VSI list to be created
 782 * @lkup_type: switch rule filter's lookup type
 783 */
 784static enum ice_status
 785ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
 786			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
 787{
 788	enum ice_status status;
 789	int i;
 790
 791	for (i = 0; i < num_vsi; i++)
 792		if (vsi_array[i] >= ICE_MAX_VSI)
 793			return ICE_ERR_OUT_OF_RANGE;
 794
 795	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
 796					    ice_aqc_opc_alloc_res);
 797	if (status)
 798		return status;
 799
 800	/* Update the newly created VSI list to include the specified VSIs */
 801	return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
 802					false, ice_aqc_opc_add_sw_rules,
 803					lkup_type);
 804}
 805
 806/**
 807 * ice_create_pkt_fwd_rule
 808 * @hw: pointer to the hardware structure
 809 * @f_entry: entry containing packet forwarding information
 810 *
 811 * Create switch rule with given filter information and add an entry
 812 * to the corresponding filter management list to track this switch rule
 813 * and VSI mapping
 814 */
 815static enum ice_status
 816ice_create_pkt_fwd_rule(struct ice_hw *hw,
 817			struct ice_fltr_list_entry *f_entry)
 818{
 819	struct ice_switch_info *sw = hw->switch_info;
 820	struct ice_fltr_mgmt_list_entry *fm_entry;
 821	struct ice_aqc_sw_rules_elem *s_rule;
 822	enum ice_sw_lkup_type l_type;
 
 823	enum ice_status status;
 824
 825	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
 826			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
 827	if (!s_rule)
 828		return ICE_ERR_NO_MEMORY;
 829	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
 830				GFP_KERNEL);
 831	if (!fm_entry) {
 832		status = ICE_ERR_NO_MEMORY;
 833		goto ice_create_pkt_fwd_rule_exit;
 834	}
 835
 836	fm_entry->fltr_info = f_entry->fltr_info;
 837
 838	/* Initialize all the fields for the management entry */
 839	fm_entry->vsi_count = 1;
 840	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
 841	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
 842	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
 843
 844	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
 845			 ice_aqc_opc_add_sw_rules);
 846
 847	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
 848				 ice_aqc_opc_add_sw_rules, NULL);
 849	if (status) {
 850		devm_kfree(ice_hw_to_dev(hw), fm_entry);
 851		goto ice_create_pkt_fwd_rule_exit;
 852	}
 853
 854	f_entry->fltr_info.fltr_rule_id =
 855		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
 856	fm_entry->fltr_info.fltr_rule_id =
 857		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
 858
 859	/* The book keeping entries will get removed when base driver
 860	 * calls remove filter AQ command
 861	 */
 862	l_type = fm_entry->fltr_info.lkup_type;
 863	if (l_type == ICE_SW_LKUP_MAC) {
 864		mutex_lock(&sw->mac_list_lock);
 865		list_add(&fm_entry->list_entry, &sw->mac_list_head);
 866		mutex_unlock(&sw->mac_list_lock);
 867	} else if (l_type == ICE_SW_LKUP_VLAN) {
 868		mutex_lock(&sw->vlan_list_lock);
 869		list_add(&fm_entry->list_entry, &sw->vlan_list_head);
 870		mutex_unlock(&sw->vlan_list_lock);
 871	} else if (l_type == ICE_SW_LKUP_ETHERTYPE ||
 872		   l_type == ICE_SW_LKUP_ETHERTYPE_MAC) {
 873		mutex_lock(&sw->eth_m_list_lock);
 874		list_add(&fm_entry->list_entry, &sw->eth_m_list_head);
 875		mutex_unlock(&sw->eth_m_list_lock);
 876	} else if (l_type == ICE_SW_LKUP_PROMISC ||
 877		   l_type == ICE_SW_LKUP_PROMISC_VLAN) {
 878		mutex_lock(&sw->promisc_list_lock);
 879		list_add(&fm_entry->list_entry, &sw->promisc_list_head);
 880		mutex_unlock(&sw->promisc_list_lock);
 881	} else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) {
 882		mutex_lock(&sw->mac_vlan_list_lock);
 883		list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head);
 884		mutex_unlock(&sw->mac_vlan_list_lock);
 885	} else {
 886		status = ICE_ERR_NOT_IMPL;
 887	}
 888ice_create_pkt_fwd_rule_exit:
 889	devm_kfree(ice_hw_to_dev(hw), s_rule);
 890	return status;
 891}
 892
 893/**
 894 * ice_update_pkt_fwd_rule
 895 * @hw: pointer to the hardware structure
 896 * @rule_id: rule of previously created switch rule to update
 897 * @vsi_list_id: VSI list id to be updated with
 898 * @f_info: ice_fltr_info to pull other information for switch rule
 899 *
 900 * Call AQ command to update a previously created switch rule with a
 901 * VSI list id
 902 */
 903static enum ice_status
 904ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
 905			struct ice_fltr_info f_info)
 906{
 907	struct ice_aqc_sw_rules_elem *s_rule;
 908	struct ice_fltr_info tmp_fltr;
 909	enum ice_status status;
 910
 911	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
 912			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
 913	if (!s_rule)
 914		return ICE_ERR_NO_MEMORY;
 915
 916	tmp_fltr = f_info;
 917	tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
 918	tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
 919
 920	ice_fill_sw_rule(hw, &tmp_fltr, s_rule,
 921			 ice_aqc_opc_update_sw_rules);
 922
 923	s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
 924
 925	/* Update switch rule with new rule set to forward VSI list */
 926	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
 927				 ice_aqc_opc_update_sw_rules, NULL);
 928
 929	devm_kfree(ice_hw_to_dev(hw), s_rule);
 930	return status;
 931}
 932
 933/**
 934 * ice_handle_vsi_list_mgmt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935 * @hw: pointer to the hardware structure
 936 * @m_entry: pointer to current filter management list entry
 937 * @cur_fltr: filter information from the book keeping entry
 938 * @new_fltr: filter information with the new VSI to be added
 939 *
 940 * Call AQ command to add or update previously created VSI list with new VSI.
 941 *
 942 * Helper function to do book keeping associated with adding filter information
 943 * The algorithm to do the booking keeping is described below :
 944 * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
 945 *	if only one VSI has been added till now
 946 *		Allocate a new VSI list and add two VSIs
 947 *		to this list using switch rule command
 948 *		Update the previously created switch rule with the
 949 *		newly created VSI list id
 950 *	if a VSI list was previously created
 951 *		Add the new VSI to the previously created VSI list set
 952 *		using the update switch rule command
 953 */
 954static enum ice_status
 955ice_handle_vsi_list_mgmt(struct ice_hw *hw,
 956			 struct ice_fltr_mgmt_list_entry *m_entry,
 957			 struct ice_fltr_info *cur_fltr,
 958			 struct ice_fltr_info *new_fltr)
 959{
 960	enum ice_status status = 0;
 961	u16 vsi_list_id = 0;
 962
 963	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
 964	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
 965		return ICE_ERR_NOT_IMPL;
 966
 967	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
 968	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
 969	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
 970	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
 971		return ICE_ERR_NOT_IMPL;
 972
 973	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
 974		/* Only one entry existed in the mapping and it was not already
 975		 * a part of a VSI list. So, create a VSI list with the old and
 976		 * new VSIs.
 977		 */
 978		u16 vsi_id_arr[2];
 979		u16 fltr_rule;
 980
 981		/* A rule already exists with the new VSI being added */
 982		if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
 983			return ICE_ERR_ALREADY_EXISTS;
 984
 985		vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
 986		vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
 987		status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
 988						  &vsi_list_id,
 989						  new_fltr->lkup_type);
 990		if (status)
 991			return status;
 992
 993		fltr_rule = cur_fltr->fltr_rule_id;
 
 
 
 994		/* Update the previous switch rule of "MAC forward to VSI" to
 995		 * "MAC fwd to VSI list"
 996		 */
 997		status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id,
 998						 *new_fltr);
 999		if (status)
1000			return status;
1001
1002		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
1003		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1004		m_entry->vsi_list_info =
1005			ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
1006						vsi_list_id);
1007
1008		/* If this entry was large action then the large action needs
1009		 * to be updated to point to FWD to VSI list
1010		 */
1011		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
1012			status =
1013			    ice_add_marker_act(hw, m_entry,
1014					       m_entry->sw_marker_id,
1015					       m_entry->lg_act_idx);
1016	} else {
1017		u16 vsi_id = new_fltr->fwd_id.vsi_id;
1018		enum ice_adminq_opc opcode;
1019
 
 
 
1020		/* A rule already exists with the new VSI being added */
1021		if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
1022			return 0;
1023
1024		/* Update the previously created VSI list set with
1025		 * the new VSI id passed in
1026		 */
1027		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
1028		opcode = ice_aqc_opc_update_sw_rules;
1029
1030		status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
1031						  false, opcode,
1032						  new_fltr->lkup_type);
1033		/* update VSI list mapping info with new VSI id */
1034		if (!status)
1035			set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
1036	}
1037	if (!status)
1038		m_entry->vsi_count++;
1039	return status;
1040}
1041
1042/**
1043 * ice_find_mac_entry
1044 * @hw: pointer to the hardware structure
1045 * @mac_addr: MAC address to search for
 
1046 *
1047 * Helper function to search for a MAC entry using a given MAC address
1048 * Returns pointer to the entry if found.
1049 */
1050static struct ice_fltr_mgmt_list_entry *
1051ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr)
1052{
1053	struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL;
1054	struct ice_switch_info *sw = hw->switch_info;
 
1055
1056	mutex_lock(&sw->mac_list_lock);
1057	list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) {
1058		u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
1059
1060		if (ether_addr_equal(buf, mac_addr)) {
1061			mac_ret = m_list_itr;
1062			break;
1063		}
1064	}
1065	mutex_unlock(&sw->mac_list_lock);
1066	return mac_ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067}
1068
1069/**
1070 * ice_add_shared_mac - Add one MAC shared filter rule
1071 * @hw: pointer to the hardware structure
 
1072 * @f_entry: structure containing MAC forwarding information
1073 *
1074 * Adds or updates the book keeping list for the MAC addresses
1075 */
1076static enum ice_status
1077ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
 
1078{
 
1079	struct ice_fltr_info *new_fltr, *cur_fltr;
1080	struct ice_fltr_mgmt_list_entry *m_entry;
 
 
1081
1082	new_fltr = &f_entry->fltr_info;
 
 
 
1083
1084	m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]);
1085	if (!m_entry)
 
 
 
 
 
 
 
 
 
 
1086		return ice_create_pkt_fwd_rule(hw, f_entry);
 
1087
1088	cur_fltr = &m_entry->fltr_info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1089
1090	return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr);
 
 
 
 
 
 
 
 
1091}
1092
1093/**
1094 * ice_add_mac - Add a MAC address based filter rule
1095 * @hw: pointer to the hardware structure
1096 * @m_list: list of MAC addresses and forwarding information
1097 *
1098 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
1099 * multiple unicast addresses, the function assumes that all the
1100 * addresses are unique in a given add_mac call. It doesn't
1101 * check for duplicates in this case, removing duplicates from a given
1102 * list should be taken care of in the caller of this function.
1103 */
1104enum ice_status
1105ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
1106{
1107	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
1108	struct ice_fltr_list_entry *m_list_itr;
1109	u16 elem_sent, total_elem_left;
 
 
 
1110	enum ice_status status = 0;
1111	u16 num_unicast = 0;
1112	u16 s_rule_size;
1113
1114	if (!m_list || !hw)
1115		return ICE_ERR_PARAM;
1116
 
 
 
1117	list_for_each_entry(m_list_itr, m_list, list_entry) {
1118		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
 
 
1119
1120		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
 
 
 
 
 
 
 
1121			return ICE_ERR_PARAM;
1122		if (is_zero_ether_addr(add))
 
 
1123			return ICE_ERR_PARAM;
1124		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
1125			/* Don't overwrite the unicast address */
1126			if (ice_find_mac_entry(hw, add))
 
 
 
1127				return ICE_ERR_ALREADY_EXISTS;
 
 
1128			num_unicast++;
1129		} else if (is_multicast_ether_addr(add) ||
1130			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
1131			status = ice_add_shared_mac(hw, m_list_itr);
1132			if (status) {
1133				m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
1134				return status;
1135			}
1136			m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
1137		}
1138	}
1139
 
1140	/* Exit if no suitable entries were found for adding bulk switch rule */
1141	if (!num_unicast)
1142		return 0;
 
 
 
 
1143
1144	/* Allocate switch rule buffer for the bulk update for unicast */
1145	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1146	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
1147			      GFP_KERNEL);
1148	if (!s_rule)
1149		return ICE_ERR_NO_MEMORY;
 
 
1150
1151	r_iter = s_rule;
1152	list_for_each_entry(m_list_itr, m_list, list_entry) {
1153		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1154		u8 *addr = &f_info->l_data.mac.mac_addr[0];
1155
1156		if (is_unicast_ether_addr(addr)) {
1157			ice_fill_sw_rule(hw, &m_list_itr->fltr_info,
1158					 r_iter, ice_aqc_opc_add_sw_rules);
1159			r_iter = (struct ice_aqc_sw_rules_elem *)
1160				((u8 *)r_iter + s_rule_size);
1161		}
1162	}
1163
1164	/* Call AQ bulk switch rule update for all unicast addresses */
1165	r_iter = s_rule;
1166	/* Call AQ switch rule in AQ_MAX chunk */
1167	for (total_elem_left = num_unicast; total_elem_left > 0;
1168	     total_elem_left -= elem_sent) {
1169		struct ice_aqc_sw_rules_elem *entry = r_iter;
1170
1171		elem_sent = min(total_elem_left,
1172				(u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
1173		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
1174					 elem_sent, ice_aqc_opc_add_sw_rules,
1175					 NULL);
1176		if (status)
1177			goto ice_add_mac_exit;
1178		r_iter = (struct ice_aqc_sw_rules_elem *)
1179			((u8 *)r_iter + (elem_sent * s_rule_size));
1180	}
1181
1182	/* Fill up rule id based on the value returned from FW */
1183	r_iter = s_rule;
1184	list_for_each_entry(m_list_itr, m_list, list_entry) {
1185		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1186		u8 *addr = &f_info->l_data.mac.mac_addr[0];
1187		struct ice_switch_info *sw = hw->switch_info;
1188		struct ice_fltr_mgmt_list_entry *fm_entry;
1189
1190		if (is_unicast_ether_addr(addr)) {
1191			f_info->fltr_rule_id =
1192				le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
1193			f_info->fltr_act = ICE_FWD_TO_VSI;
1194			/* Create an entry to track this MAC address */
1195			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
1196						sizeof(*fm_entry), GFP_KERNEL);
1197			if (!fm_entry) {
1198				status = ICE_ERR_NO_MEMORY;
1199				goto ice_add_mac_exit;
1200			}
1201			fm_entry->fltr_info = *f_info;
1202			fm_entry->vsi_count = 1;
1203			/* The book keeping entries will get removed when
1204			 * base driver calls remove filter AQ command
1205			 */
1206			mutex_lock(&sw->mac_list_lock);
1207			list_add(&fm_entry->list_entry, &sw->mac_list_head);
1208			mutex_unlock(&sw->mac_list_lock);
1209
 
1210			r_iter = (struct ice_aqc_sw_rules_elem *)
1211				((u8 *)r_iter + s_rule_size);
1212		}
1213	}
1214
1215ice_add_mac_exit:
1216	devm_kfree(ice_hw_to_dev(hw), s_rule);
 
 
1217	return status;
1218}
1219
1220/**
1221 * ice_find_vlan_entry
1222 * @hw: pointer to the hardware structure
1223 * @vlan_id: VLAN id to search for
1224 *
1225 * Helper function to search for a VLAN entry using a given VLAN id
1226 * Returns pointer to the entry if found.
1227 */
1228static struct ice_fltr_mgmt_list_entry *
1229ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
1230{
1231	struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL;
1232	struct ice_switch_info *sw = hw->switch_info;
1233
1234	mutex_lock(&sw->vlan_list_lock);
1235	list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry)
1236		if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) {
1237			vlan_ret = vlan_list_itr;
1238			break;
1239		}
1240
1241	mutex_unlock(&sw->vlan_list_lock);
1242	return vlan_ret;
1243}
1244
1245/**
1246 * ice_add_vlan_internal - Add one VLAN based filter rule
1247 * @hw: pointer to the hardware structure
1248 * @f_entry: filter entry containing one VLAN information
1249 */
1250static enum ice_status
1251ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
1252{
1253	struct ice_fltr_info *new_fltr, *cur_fltr;
1254	struct ice_fltr_mgmt_list_entry *v_list_itr;
1255	u16 vlan_id;
 
 
 
 
1256
 
 
 
 
 
1257	new_fltr = &f_entry->fltr_info;
1258	/* VLAN id should only be 12 bits */
 
1259	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
1260		return ICE_ERR_PARAM;
1261
1262	vlan_id = new_fltr->l_data.vlan.vlan_id;
1263	v_list_itr = ice_find_vlan_entry(hw, vlan_id);
 
 
 
 
 
 
 
1264	if (!v_list_itr) {
1265		u16 vsi_id = ICE_VSI_INVAL_ID;
1266		enum ice_status status;
1267		u16 vsi_list_id = 0;
1268
1269		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
1270			enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
1271
1272			/* All VLAN pruning rules use a VSI list.
1273			 * Convert the action to forwarding to a VSI list.
1274			 */
1275			vsi_id = new_fltr->fwd_id.vsi_id;
1276			status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
1277							  &vsi_list_id,
1278							  lkup_type);
1279			if (status)
1280				return status;
 
 
 
 
 
 
 
1281			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1282			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
1283		}
1284
1285		status = ice_create_pkt_fwd_rule(hw, f_entry);
1286		if (!status && vsi_id != ICE_VSI_INVAL_ID) {
1287			v_list_itr = ice_find_vlan_entry(hw, vlan_id);
1288			if (!v_list_itr)
1289				return ICE_ERR_DOES_NOT_EXIST;
1290			v_list_itr->vsi_list_info =
1291				ice_create_vsi_list_map(hw, &vsi_id, 1,
1292							vsi_list_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1293		}
1294
1295		return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296	}
1297
1298	cur_fltr = &v_list_itr->fltr_info;
1299	return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr);
 
1300}
1301
1302/**
1303 * ice_add_vlan - Add VLAN based filter rule
1304 * @hw: pointer to the hardware structure
1305 * @v_list: list of VLAN entries and forwarding information
1306 */
1307enum ice_status
1308ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
1309{
1310	struct ice_fltr_list_entry *v_list_itr;
1311
1312	if (!v_list || !hw)
1313		return ICE_ERR_PARAM;
1314
1315	list_for_each_entry(v_list_itr, v_list, list_entry) {
1316		enum ice_status status;
1317
1318		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
1319			return ICE_ERR_PARAM;
1320
1321		status = ice_add_vlan_internal(hw, v_list_itr);
1322		if (status) {
1323			v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
1324			return status;
1325		}
1326		v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
1327	}
1328	return 0;
1329}
1330
1331/**
1332 * ice_remove_vsi_list_rule
1333 * @hw: pointer to the hardware structure
1334 * @vsi_list_id: VSI list id generated as part of allocate resource
1335 * @lkup_type: switch rule filter lookup type
 
 
 
1336 */
1337static enum ice_status
1338ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
1339			 enum ice_sw_lkup_type lkup_type)
1340{
1341	struct ice_aqc_sw_rules_elem *s_rule;
1342	enum ice_status status;
1343	u16 s_rule_size;
1344
1345	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
1346	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1347	if (!s_rule)
1348		return ICE_ERR_NO_MEMORY;
1349
1350	s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
1351	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1352	/* FW expects number of VSIs in vsi_list resource to be 0 for clear
1353	 * command. Since memory is zero'ed out during initialization, it's not
1354	 * necessary to explicitly initialize the variable to 0.
1355	 */
1356
1357	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1,
1358				 ice_aqc_opc_remove_sw_rules, NULL);
1359	if (!status)
1360		/* Free the vsi_list resource that we allocated */
1361		status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
1362						    ice_aqc_opc_free_res);
1363
1364	devm_kfree(ice_hw_to_dev(hw), s_rule);
1365	return status;
 
 
 
 
1366}
1367
1368/**
1369 * ice_handle_rem_vsi_list_mgmt
1370 * @hw: pointer to the hardware structure
1371 * @vsi_id: ID of the VSI to remove
1372 * @fm_list_itr: filter management entry for which the VSI list management
1373 * needs to be done
1374 */
1375static enum ice_status
1376ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id,
1377			     struct ice_fltr_mgmt_list_entry *fm_list_itr)
1378{
1379	struct ice_switch_info *sw = hw->switch_info;
1380	enum ice_status status = 0;
1381	enum ice_sw_lkup_type lkup_type;
1382	bool is_last_elem = true;
1383	bool conv_list = false;
1384	bool del_list = false;
1385	u16 vsi_list_id;
1386
1387	lkup_type = fm_list_itr->fltr_info.lkup_type;
1388	vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id;
1389
1390	if (fm_list_itr->vsi_count > 1) {
1391		status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
1392						  true,
1393						  ice_aqc_opc_update_sw_rules,
1394						  lkup_type);
1395		if (status)
1396			return status;
1397		fm_list_itr->vsi_count--;
1398		is_last_elem = false;
1399		clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map);
1400	}
1401
1402	/* For non-VLAN rules that forward packets to a VSI list, convert them
1403	 * to forwarding packets to a VSI if there is only one VSI left in the
1404	 * list.  Unused lists are then removed.
1405	 * VLAN rules need to use VSI lists even with only one VSI.
1406	 */
1407	if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) {
1408		if (lkup_type == ICE_SW_LKUP_VLAN) {
1409			del_list = is_last_elem;
1410		} else if (fm_list_itr->vsi_count == 1) {
1411			conv_list = true;
1412			del_list = true;
1413		}
1414	}
1415
1416	if (del_list) {
1417		/* Remove the VSI list since it is no longer used */
1418		struct ice_vsi_list_map_info *vsi_list_info =
1419			fm_list_itr->vsi_list_info;
1420
1421		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
1422		if (status)
1423			return status;
1424
1425		if (conv_list) {
1426			u16 rem_vsi_id;
1427
1428			rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
1429						    ICE_MAX_VSI);
1430
1431			/* Error out when the expected last element is not in
1432			 * the VSI list map
1433			 */
1434			if (rem_vsi_id == ICE_MAX_VSI)
1435				return ICE_ERR_OUT_OF_RANGE;
1436
1437			/* Change the list entry action from VSI_LIST to VSI */
1438			fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1439			fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id;
1440		}
1441
1442		list_del(&vsi_list_info->list_entry);
1443		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
1444		fm_list_itr->vsi_list_info = NULL;
1445	}
1446
1447	if (conv_list) {
1448		/* Convert the rule's forward action to forwarding packets to
1449		 * a VSI
1450		 */
1451		struct ice_aqc_sw_rules_elem *s_rule;
1452
1453		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1454				      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE,
1455				      GFP_KERNEL);
1456		if (!s_rule)
1457			return ICE_ERR_NO_MEMORY;
1458
1459		ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
1460				 ice_aqc_opc_update_sw_rules);
1461
1462		s_rule->pdata.lkup_tx_rx.index =
1463			cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id);
1464
1465		status = ice_aq_sw_rules(hw, s_rule,
1466					 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1467					 ice_aqc_opc_update_sw_rules, NULL);
1468		devm_kfree(ice_hw_to_dev(hw), s_rule);
1469		if (status)
1470			return status;
1471	}
1472
1473	if (is_last_elem) {
1474		/* Remove the lookup rule */
1475		struct ice_aqc_sw_rules_elem *s_rule;
1476
1477		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1478				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
1479				      GFP_KERNEL);
1480		if (!s_rule)
1481			return ICE_ERR_NO_MEMORY;
1482
1483		ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
1484				 ice_aqc_opc_remove_sw_rules);
 
1485
1486		status = ice_aq_sw_rules(hw, s_rule,
1487					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
1488					 ice_aqc_opc_remove_sw_rules, NULL);
1489		if (status)
1490			return status;
1491
1492		/* Remove a book keeping entry from the MAC address list */
1493		mutex_lock(&sw->mac_list_lock);
1494		list_del(&fm_list_itr->list_entry);
1495		mutex_unlock(&sw->mac_list_lock);
1496		devm_kfree(ice_hw_to_dev(hw), fm_list_itr);
1497		devm_kfree(ice_hw_to_dev(hw), s_rule);
1498	}
1499	return status;
1500}
1501
1502/**
1503 * ice_remove_mac_entry
1504 * @hw: pointer to the hardware structure
1505 * @f_entry: structure containing MAC forwarding information
1506 */
1507static enum ice_status
1508ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
1509{
1510	struct ice_fltr_mgmt_list_entry *m_entry;
1511	u16 vsi_id;
1512	u8 *add;
1513
1514	add = &f_entry->fltr_info.l_data.mac.mac_addr[0];
1515
1516	m_entry = ice_find_mac_entry(hw, add);
1517	if (!m_entry)
1518		return ICE_ERR_PARAM;
1519
1520	vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
1521	return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry);
1522}
1523
1524/**
1525 * ice_remove_mac - remove a MAC address based filter rule
1526 * @hw: pointer to the hardware structure
1527 * @m_list: list of MAC addresses and forwarding information
1528 *
1529 * This function removes either a MAC filter rule or a specific VSI from a
1530 * VSI list for a multicast MAC address.
1531 *
1532 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
1533 * ice_add_mac. Caller should be aware that this call will only work if all
1534 * the entries passed into m_list were added previously. It will not attempt to
1535 * do a partial remove of entries that were found.
1536 */
1537enum ice_status
1538ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
1539{
1540	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
1541	u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
1542	struct ice_switch_info *sw = hw->switch_info;
1543	struct ice_fltr_mgmt_list_entry *m_entry;
1544	struct ice_fltr_list_entry *m_list_itr;
1545	u16 elem_sent, total_elem_left;
1546	enum ice_status status = 0;
1547	u16 num_unicast = 0;
1548
1549	if (!m_list)
1550		return ICE_ERR_PARAM;
1551
1552	list_for_each_entry(m_list_itr, m_list, list_entry) {
1553		u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
1554
1555		if (is_unicast_ether_addr(addr) && !hw->ucast_shared)
1556			num_unicast++;
1557		else if (is_multicast_ether_addr(addr) ||
1558			 (is_unicast_ether_addr(addr) && hw->ucast_shared))
1559			ice_remove_mac_entry(hw, m_list_itr);
1560	}
1561
1562	/* Exit if no unicast addresses found. Multicast switch rules
1563	 * were added individually
1564	 */
1565	if (!num_unicast)
1566		return 0;
1567
1568	/* Allocate switch rule buffer for the bulk update for unicast */
1569	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
1570			      GFP_KERNEL);
1571	if (!s_rule)
1572		return ICE_ERR_NO_MEMORY;
1573
1574	r_iter = s_rule;
1575	list_for_each_entry(m_list_itr, m_list, list_entry) {
1576		u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
1577
1578		if (is_unicast_ether_addr(addr)) {
1579			m_entry = ice_find_mac_entry(hw, addr);
1580			if (!m_entry) {
1581				status = ICE_ERR_DOES_NOT_EXIST;
1582				goto ice_remove_mac_exit;
1583			}
1584
1585			ice_fill_sw_rule(hw, &m_entry->fltr_info,
1586					 r_iter, ice_aqc_opc_remove_sw_rules);
1587			r_iter = (struct ice_aqc_sw_rules_elem *)
1588				((u8 *)r_iter + s_rule_size);
1589		}
1590	}
1591
1592	/* Call AQ bulk switch rule update for all unicast addresses */
1593	r_iter = s_rule;
1594	/* Call AQ switch rule in AQ_MAX chunk */
1595	for (total_elem_left = num_unicast; total_elem_left > 0;
1596	     total_elem_left -= elem_sent) {
1597		struct ice_aqc_sw_rules_elem *entry = r_iter;
1598
1599		elem_sent = min(total_elem_left,
1600				(u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
1601		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
1602					 elem_sent, ice_aqc_opc_remove_sw_rules,
1603					 NULL);
1604		if (status)
1605			break;
1606		r_iter = (struct ice_aqc_sw_rules_elem *)
1607			((u8 *)r_iter + s_rule_size);
1608	}
1609
1610	list_for_each_entry(m_list_itr, m_list, list_entry) {
1611		u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
1612
1613		if (is_unicast_ether_addr(addr)) {
1614			m_entry = ice_find_mac_entry(hw, addr);
1615			if (!m_entry)
1616				return ICE_ERR_OUT_OF_RANGE;
1617			mutex_lock(&sw->mac_list_lock);
1618			list_del(&m_entry->list_entry);
1619			mutex_unlock(&sw->mac_list_lock);
1620			devm_kfree(ice_hw_to_dev(hw), m_entry);
1621		}
1622	}
1623
1624ice_remove_mac_exit:
1625	devm_kfree(ice_hw_to_dev(hw), s_rule);
1626	return status;
1627}
1628
1629/**
1630 * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default
1631 * VSI for the switch (represented by swid)
1632 * @hw: pointer to the hardware structure
1633 * @vsi_id: number of VSI to set as default
1634 * @set: true to add the above mentioned switch rule, false to remove it
1635 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
 
 
 
1636 */
1637enum ice_status
1638ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
1639{
1640	struct ice_aqc_sw_rules_elem *s_rule;
1641	struct ice_fltr_info f_info;
1642	enum ice_adminq_opc opcode;
1643	enum ice_status status;
1644	u16 s_rule_size;
 
 
 
 
 
1645
1646	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
1647			    ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
 
1648	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1649	if (!s_rule)
1650		return ICE_ERR_NO_MEMORY;
1651
1652	memset(&f_info, 0, sizeof(f_info));
1653
1654	f_info.lkup_type = ICE_SW_LKUP_DFLT;
1655	f_info.flag = direction;
1656	f_info.fltr_act = ICE_FWD_TO_VSI;
1657	f_info.fwd_id.vsi_id = vsi_id;
1658
1659	if (f_info.flag & ICE_FLTR_RX) {
1660		f_info.src = hw->port_info->lport;
 
1661		if (!set)
1662			f_info.fltr_rule_id =
1663				hw->port_info->dflt_rx_vsi_rule_id;
1664	} else if (f_info.flag & ICE_FLTR_TX) {
1665		f_info.src = vsi_id;
 
1666		if (!set)
1667			f_info.fltr_rule_id =
1668				hw->port_info->dflt_tx_vsi_rule_id;
1669	}
1670
1671	if (set)
1672		opcode = ice_aqc_opc_add_sw_rules;
1673	else
1674		opcode = ice_aqc_opc_remove_sw_rules;
1675
1676	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
1677
1678	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
1679	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
1680		goto out;
1681	if (set) {
1682		u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1683
1684		if (f_info.flag & ICE_FLTR_TX) {
1685			hw->port_info->dflt_tx_vsi_num = vsi_id;
1686			hw->port_info->dflt_tx_vsi_rule_id = index;
1687		} else if (f_info.flag & ICE_FLTR_RX) {
1688			hw->port_info->dflt_rx_vsi_num = vsi_id;
1689			hw->port_info->dflt_rx_vsi_rule_id = index;
1690		}
1691	} else {
1692		if (f_info.flag & ICE_FLTR_TX) {
1693			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1694			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
1695		} else if (f_info.flag & ICE_FLTR_RX) {
1696			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1697			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
1698		}
1699	}
1700
1701out:
1702	devm_kfree(ice_hw_to_dev(hw), s_rule);
1703	return status;
1704}
1705
1706/**
1707 * ice_remove_vlan_internal - Remove one VLAN based filter rule
1708 * @hw: pointer to the hardware structure
1709 * @f_entry: filter entry containing one VLAN information
 
 
 
 
 
 
 
1710 */
1711static enum ice_status
1712ice_remove_vlan_internal(struct ice_hw *hw,
1713			 struct ice_fltr_list_entry *f_entry)
1714{
1715	struct ice_fltr_info *new_fltr;
1716	struct ice_fltr_mgmt_list_entry *v_list_elem;
1717	u16 vsi_id;
1718
1719	new_fltr = &f_entry->fltr_info;
 
 
 
 
 
 
 
 
 
 
1720
1721	v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id);
1722	if (!v_list_elem)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1723		return ICE_ERR_PARAM;
1724
1725	vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
1726	return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1727}
1728
1729/**
1730 * ice_remove_vlan - Remove VLAN based filter rule
1731 * @hw: pointer to the hardware structure
1732 * @v_list: list of VLAN entries and forwarding information
1733 */
1734enum ice_status
1735ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
1736{
1737	struct ice_fltr_list_entry *v_list_itr;
1738	enum ice_status status = 0;
1739
1740	if (!v_list || !hw)
1741		return ICE_ERR_PARAM;
1742
1743	list_for_each_entry(v_list_itr, v_list, list_entry) {
1744		status = ice_remove_vlan_internal(hw, v_list_itr);
1745		if (status) {
1746			v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
1747			return status;
1748		}
1749		v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
 
 
 
1750	}
1751	return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1752}
1753
1754/**
1755 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
1756 * @hw: pointer to the hardware structure
1757 * @vsi_id: ID of VSI to remove filters from
1758 * @lkup_list_head: pointer to the list that has certain lookup type filters
1759 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
 
 
 
 
 
 
1760 */
1761static enum ice_status
1762ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
1763			 struct list_head *lkup_list_head,
1764			 struct list_head *vsi_list_head)
1765{
1766	struct ice_fltr_mgmt_list_entry *fm_entry;
 
1767
1768	/* check to make sure VSI id is valid and within boundary */
1769	if (vsi_id >=
1770	    (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1))
1771		return ICE_ERR_PARAM;
1772
1773	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
1774		struct ice_fltr_info *fi;
1775
1776		fi = &fm_entry->fltr_info;
1777		if ((fi->fltr_act == ICE_FWD_TO_VSI &&
1778		     fi->fwd_id.vsi_id == vsi_id) ||
1779		    (fi->fltr_act == ICE_FWD_TO_VSI_LIST &&
1780		     (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) {
1781			struct ice_fltr_list_entry *tmp;
1782
1783			/* this memory is freed up in the caller function
1784			 * ice_remove_vsi_lkup_fltr() once filters for
1785			 * this VSI are removed
1786			 */
1787			tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp),
1788					   GFP_KERNEL);
1789			if (!tmp)
1790				return ICE_ERR_NO_MEMORY;
1791
1792			memcpy(&tmp->fltr_info, fi, sizeof(*fi));
1793
1794			/* Expected below fields to be set to ICE_FWD_TO_VSI and
1795			 * the particular VSI id since we are only removing this
1796			 * one VSI
1797			 */
1798			if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) {
1799				tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1800				tmp->fltr_info.fwd_id.vsi_id = vsi_id;
1801			}
1802
1803			list_add(&tmp->list_entry, vsi_list_head);
1804		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805	}
1806	return 0;
1807}
1808
1809/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
1811 * @hw: pointer to the hardware structure
1812 * @vsi_id: ID of VSI to remove filters from
1813 * @lkup: switch rule filter lookup type
1814 */
1815static void
1816ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
1817			 enum ice_sw_lkup_type lkup)
1818{
1819	struct ice_switch_info *sw = hw->switch_info;
1820	struct ice_fltr_list_entry *fm_entry;
1821	struct list_head remove_list_head;
 
1822	struct ice_fltr_list_entry *tmp;
 
1823	enum ice_status status;
1824
1825	INIT_LIST_HEAD(&remove_list_head);
 
 
 
 
 
 
 
 
 
1826	switch (lkup) {
1827	case ICE_SW_LKUP_MAC:
1828		mutex_lock(&sw->mac_list_lock);
1829		status = ice_add_to_vsi_fltr_list(hw, vsi_id,
1830						  &sw->mac_list_head,
1831						  &remove_list_head);
1832		mutex_unlock(&sw->mac_list_lock);
1833		if (!status) {
1834			ice_remove_mac(hw, &remove_list_head);
1835			goto free_fltr_list;
1836		}
1837		break;
1838	case ICE_SW_LKUP_VLAN:
1839		mutex_lock(&sw->vlan_list_lock);
1840		status = ice_add_to_vsi_fltr_list(hw, vsi_id,
1841						  &sw->vlan_list_head,
1842						  &remove_list_head);
1843		mutex_unlock(&sw->vlan_list_lock);
1844		if (!status) {
1845			ice_remove_vlan(hw, &remove_list_head);
1846			goto free_fltr_list;
1847		}
1848		break;
1849	case ICE_SW_LKUP_MAC_VLAN:
1850	case ICE_SW_LKUP_ETHERTYPE:
1851	case ICE_SW_LKUP_ETHERTYPE_MAC:
1852	case ICE_SW_LKUP_PROMISC:
1853	case ICE_SW_LKUP_PROMISC_VLAN:
1854	case ICE_SW_LKUP_DFLT:
1855		ice_debug(hw, ICE_DBG_SW,
1856			  "Remove filters for this lookup type hasn't been implemented yet\n");
 
1857		break;
1858	}
1859
1860	return;
1861free_fltr_list:
1862	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
1863		list_del(&fm_entry->list_entry);
1864		devm_kfree(ice_hw_to_dev(hw), fm_entry);
1865	}
1866}
1867
1868/**
1869 * ice_remove_vsi_fltr - Remove all filters for a VSI
1870 * @hw: pointer to the hardware structure
1871 * @vsi_id: ID of VSI to remove filters from
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1872 */
1873void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
1874{
1875	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
1876	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
1877	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
1878	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
1879	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
1880	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
1881	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
1882	ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1883}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_switch.h"
   5
   6#define ICE_ETH_DA_OFFSET		0
   7#define ICE_ETH_ETHTYPE_OFFSET		12
   8#define ICE_ETH_VLAN_TCI_OFFSET		14
   9#define ICE_MAX_VLAN_ID			0xFFF
  10
  11/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  12 * struct to configure any switch filter rules.
  13 * {DA (6 bytes), SA(6 bytes),
  14 * Ether type (2 bytes for header without VLAN tag) OR
  15 * VLAN tag (4 bytes for header with VLAN tag) }
  16 *
  17 * Word on Hardcoded values
  18 * byte 0 = 0x2: to identify it as locally administered DA MAC
  19 * byte 6 = 0x2: to identify it as locally administered SA MAC
  20 * byte 12 = 0x81 & byte 13 = 0x00:
  21 *	In case of VLAN filter first two bytes defines ether type (0x8100)
  22 *	and remaining two bytes are placeholder for programming a given VLAN ID
  23 *	In case of Ether type filter it is treated as header without VLAN tag
  24 *	and byte 12 and 13 is used to program a given Ether type instead
  25 */
  26#define DUMMY_ETH_HDR_LEN		16
  27static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  28							0x2, 0, 0, 0, 0, 0,
  29							0x81, 0, 0, 0};
  30
  31#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  32	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
  33	 (DUMMY_ETH_HDR_LEN * \
  34	  sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
  35#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  36	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
 
 
  37#define ICE_SW_RULE_LG_ACT_SIZE(n) \
  38	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
  39	 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
 
 
 
  40#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  41	(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
  42	 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
 
 
 
  43
  44/**
  45 * ice_init_def_sw_recp - initialize the recipe book keeping tables
  46 * @hw: pointer to the HW struct
 
 
 
 
 
  47 *
  48 * Allocate memory for the entire recipe table and initialize the structures/
  49 * entries corresponding to basic recipes.
  50 */
  51enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
 
 
 
  52{
  53	struct ice_sw_recipe *recps;
  54	u8 i;
 
 
 
 
 
  55
  56	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
  57			     sizeof(*recps), GFP_KERNEL);
  58	if (!recps)
  59		return ICE_ERR_NO_MEMORY;
  60
  61	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  62		recps[i].root_rid = i;
  63		INIT_LIST_HEAD(&recps[i].filt_rules);
  64		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
  65		mutex_init(&recps[i].filt_rule_lock);
  66	}
  67
  68	hw->switch_info->recp_list = recps;
  69
  70	return 0;
  71}
  72
  73/**
  74 * ice_aq_get_sw_cfg - get switch configuration
  75 * @hw: pointer to the hardware structure
  76 * @buf: pointer to the result buffer
  77 * @buf_size: length of the buffer available for response
  78 * @req_desc: pointer to requested descriptor
  79 * @num_elems: pointer to number of elements
  80 * @cd: pointer to command details structure or NULL
  81 *
  82 * Get switch configuration (0x0200) to be placed in buf.
  83 * This admin command returns information such as initial VSI/port number
  84 * and switch ID it belongs to.
  85 *
  86 * NOTE: *req_desc is both an input/output parameter.
  87 * The caller of this function first calls this function with *request_desc set
  88 * to 0. If the response from f/w has *req_desc set to 0, all the switch
  89 * configuration information has been returned; if non-zero (meaning not all
  90 * the information was returned), the caller should call this function again
  91 * with *req_desc set to the previous value returned by f/w to get the
  92 * next block of switch configuration information.
  93 *
  94 * *num_elems is output only parameter. This reflects the number of elements
  95 * in response buffer. The caller of this function to use *num_elems while
  96 * parsing the response buffer.
  97 */
  98static enum ice_status
  99ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
 100		  u16 buf_size, u16 *req_desc, u16 *num_elems,
 101		  struct ice_sq_cd *cd)
 102{
 103	struct ice_aqc_get_sw_cfg *cmd;
 
 104	struct ice_aq_desc desc;
 105	enum ice_status status;
 106
 107	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
 108	cmd = &desc.params.get_sw_conf;
 109	cmd->element = cpu_to_le16(*req_desc);
 110
 111	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 112	if (!status) {
 113		*req_desc = le16_to_cpu(cmd->element);
 114		*num_elems = le16_to_cpu(cmd->num_elems);
 115	}
 116
 117	return status;
 118}
 119
 120/**
 121 * ice_aq_add_vsi
 122 * @hw: pointer to the HW struct
 123 * @vsi_ctx: pointer to a VSI context struct
 124 * @cd: pointer to command details structure or NULL
 125 *
 126 * Add a VSI context to the hardware (0x0210)
 127 */
 128static enum ice_status
 129ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
 130	       struct ice_sq_cd *cd)
 131{
 132	struct ice_aqc_add_update_free_vsi_resp *res;
 133	struct ice_aqc_add_get_update_free_vsi *cmd;
 
 134	struct ice_aq_desc desc;
 135	enum ice_status status;
 136
 137	cmd = &desc.params.vsi_cmd;
 138	res = &desc.params.add_update_free_vsi_res;
 139
 140	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
 141
 142	if (!vsi_ctx->alloc_from_pool)
 143		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
 144					   ICE_AQ_VSI_IS_VALID);
 145	cmd->vf_id = vsi_ctx->vf_num;
 146
 147	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
 148
 149	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 150
 151	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
 152				 sizeof(vsi_ctx->info), cd);
 153
 154	if (!status) {
 155		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
 156		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
 157		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
 158	}
 159
 160	return status;
 161}
 162
 163/**
 164 * ice_aq_free_vsi
 165 * @hw: pointer to the HW struct
 166 * @vsi_ctx: pointer to a VSI context struct
 167 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
 168 * @cd: pointer to command details structure or NULL
 169 *
 170 * Free VSI context info from hardware (0x0213)
 171 */
 172static enum ice_status
 173ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
 174		bool keep_vsi_alloc, struct ice_sq_cd *cd)
 175{
 176	struct ice_aqc_add_update_free_vsi_resp *resp;
 177	struct ice_aqc_add_get_update_free_vsi *cmd;
 178	struct ice_aq_desc desc;
 179	enum ice_status status;
 180
 181	cmd = &desc.params.vsi_cmd;
 182	resp = &desc.params.add_update_free_vsi_res;
 183
 184	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
 185
 186	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
 187	if (keep_vsi_alloc)
 188		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
 189
 190	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 191	if (!status) {
 192		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
 193		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
 194	}
 195
 196	return status;
 197}
 198
 199/**
 200 * ice_aq_update_vsi
 201 * @hw: pointer to the HW struct
 202 * @vsi_ctx: pointer to a VSI context struct
 203 * @cd: pointer to command details structure or NULL
 204 *
 205 * Update VSI context in the hardware (0x0211)
 206 */
 207static enum ice_status
 208ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
 209		  struct ice_sq_cd *cd)
 210{
 211	struct ice_aqc_add_update_free_vsi_resp *resp;
 212	struct ice_aqc_add_get_update_free_vsi *cmd;
 213	struct ice_aq_desc desc;
 214	enum ice_status status;
 215
 216	cmd = &desc.params.vsi_cmd;
 217	resp = &desc.params.add_update_free_vsi_res;
 218
 219	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
 220
 221	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
 222
 223	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 224
 225	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
 226				 sizeof(vsi_ctx->info), cd);
 227
 228	if (!status) {
 229		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
 230		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
 231	}
 232
 233	return status;
 234}
 235
 236/**
 237 * ice_is_vsi_valid - check whether the VSI is valid or not
 238 * @hw: pointer to the HW struct
 239 * @vsi_handle: VSI handle
 240 *
 241 * check whether the VSI is valid or not
 242 */
 243bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
 244{
 245	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
 246}
 247
 248/**
 249 * ice_get_hw_vsi_num - return the HW VSI number
 250 * @hw: pointer to the HW struct
 251 * @vsi_handle: VSI handle
 252 *
 253 * return the HW VSI number
 254 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
 255 */
 256u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
 257{
 258	return hw->vsi_ctx[vsi_handle]->vsi_num;
 259}
 260
 261/**
 262 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
 263 * @hw: pointer to the HW struct
 264 * @vsi_handle: VSI handle
 265 *
 266 * return the VSI context entry for a given VSI handle
 267 */
 268struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
 269{
 270	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
 271}
 272
 273/**
 274 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
 275 * @hw: pointer to the HW struct
 276 * @vsi_handle: VSI handle
 277 * @vsi: VSI context pointer
 278 *
 279 * save the VSI context entry for a given VSI handle
 280 */
 281static void
 282ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
 283{
 284	hw->vsi_ctx[vsi_handle] = vsi;
 285}
 286
 287/**
 288 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
 289 * @hw: pointer to the HW struct
 290 * @vsi_handle: VSI handle
 291 */
 292static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
 293{
 294	struct ice_vsi_ctx *vsi;
 295	u8 i;
 296
 297	vsi = ice_get_vsi_ctx(hw, vsi_handle);
 298	if (!vsi)
 299		return;
 300	ice_for_each_traffic_class(i) {
 301		if (vsi->lan_q_ctx[i]) {
 302			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
 303			vsi->lan_q_ctx[i] = NULL;
 304		}
 305	}
 306}
 307
 308/**
 309 * ice_clear_vsi_ctx - clear the VSI context entry
 310 * @hw: pointer to the HW struct
 311 * @vsi_handle: VSI handle
 312 *
 313 * clear the VSI context entry
 314 */
 315static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
 316{
 317	struct ice_vsi_ctx *vsi;
 318
 319	vsi = ice_get_vsi_ctx(hw, vsi_handle);
 320	if (vsi) {
 321		ice_clear_vsi_q_ctx(hw, vsi_handle);
 322		devm_kfree(ice_hw_to_dev(hw), vsi);
 323		hw->vsi_ctx[vsi_handle] = NULL;
 324	}
 325}
 326
 327/**
 328 * ice_clear_all_vsi_ctx - clear all the VSI context entries
 329 * @hw: pointer to the HW struct
 330 */
 331void ice_clear_all_vsi_ctx(struct ice_hw *hw)
 332{
 333	u16 i;
 334
 335	for (i = 0; i < ICE_MAX_VSI; i++)
 336		ice_clear_vsi_ctx(hw, i);
 337}
 338
 339/**
 340 * ice_add_vsi - add VSI context to the hardware and VSI handle list
 341 * @hw: pointer to the HW struct
 342 * @vsi_handle: unique VSI handle provided by drivers
 343 * @vsi_ctx: pointer to a VSI context struct
 
 344 * @cd: pointer to command details structure or NULL
 345 *
 346 * Add a VSI context to the hardware also add it into the VSI handle list.
 347 * If this function gets called after reset for existing VSIs then update
 348 * with the new HW VSI number in the corresponding VSI handle list entry.
 349 */
 350enum ice_status
 351ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
 352	    struct ice_sq_cd *cd)
 353{
 354	struct ice_vsi_ctx *tmp_vsi_ctx;
 
 
 355	enum ice_status status;
 356
 357	if (vsi_handle >= ICE_MAX_VSI)
 358		return ICE_ERR_PARAM;
 359	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
 360	if (status)
 361		return status;
 362	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
 363	if (!tmp_vsi_ctx) {
 364		/* Create a new VSI context */
 365		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
 366					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
 367		if (!tmp_vsi_ctx) {
 368			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
 369			return ICE_ERR_NO_MEMORY;
 370		}
 371		*tmp_vsi_ctx = *vsi_ctx;
 372		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
 373	} else {
 374		/* update with new HW VSI num */
 375		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
 376	}
 377
 378	return 0;
 379}
 
 380
 381/**
 382 * ice_free_vsi- free VSI context from hardware and VSI handle list
 383 * @hw: pointer to the HW struct
 384 * @vsi_handle: unique VSI handle
 385 * @vsi_ctx: pointer to a VSI context struct
 386 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
 387 * @cd: pointer to command details structure or NULL
 388 *
 389 * Free VSI context info from hardware as well as from VSI handle list
 390 */
 391enum ice_status
 392ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
 393	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
 394{
 395	enum ice_status status;
 396
 397	if (!ice_is_vsi_valid(hw, vsi_handle))
 398		return ICE_ERR_PARAM;
 399	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
 400	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
 401	if (!status)
 402		ice_clear_vsi_ctx(hw, vsi_handle);
 403	return status;
 404}
 405
 406/**
 407 * ice_update_vsi
 408 * @hw: pointer to the HW struct
 409 * @vsi_handle: unique VSI handle
 410 * @vsi_ctx: pointer to a VSI context struct
 411 * @cd: pointer to command details structure or NULL
 412 *
 413 * Update VSI context in the hardware
 414 */
 415enum ice_status
 416ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
 417	       struct ice_sq_cd *cd)
 418{
 419	if (!ice_is_vsi_valid(hw, vsi_handle))
 420		return ICE_ERR_PARAM;
 421	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
 422	return ice_aq_update_vsi(hw, vsi_ctx, cd);
 423}
 424
 425/**
 426 * ice_aq_alloc_free_vsi_list
 427 * @hw: pointer to the HW struct
 428 * @vsi_list_id: VSI list ID returned or used for lookup
 429 * @lkup_type: switch rule filter lookup type
 430 * @opc: switch rules population command type - pass in the command opcode
 431 *
 432 * allocates or free a VSI list resource
 433 */
 434static enum ice_status
 435ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
 436			   enum ice_sw_lkup_type lkup_type,
 437			   enum ice_adminq_opc opc)
 438{
 439	struct ice_aqc_alloc_free_res_elem *sw_buf;
 440	struct ice_aqc_res_elem *vsi_ele;
 441	enum ice_status status;
 442	u16 buf_len;
 443
 444	buf_len = struct_size(sw_buf, elem, 1);
 445	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
 446	if (!sw_buf)
 447		return ICE_ERR_NO_MEMORY;
 448	sw_buf->num_elems = cpu_to_le16(1);
 449
 450	if (lkup_type == ICE_SW_LKUP_MAC ||
 451	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
 452	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
 453	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
 454	    lkup_type == ICE_SW_LKUP_PROMISC ||
 455	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
 456		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
 457	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
 458		sw_buf->res_type =
 459			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
 460	} else {
 461		status = ICE_ERR_PARAM;
 462		goto ice_aq_alloc_free_vsi_list_exit;
 463	}
 464
 465	if (opc == ice_aqc_opc_free_res)
 466		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
 467
 468	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
 469	if (status)
 470		goto ice_aq_alloc_free_vsi_list_exit;
 471
 472	if (opc == ice_aqc_opc_alloc_res) {
 473		vsi_ele = &sw_buf->elem[0];
 474		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
 475	}
 476
 477ice_aq_alloc_free_vsi_list_exit:
 478	devm_kfree(ice_hw_to_dev(hw), sw_buf);
 479	return status;
 480}
 481
 482/**
 483 * ice_aq_sw_rules - add/update/remove switch rules
 484 * @hw: pointer to the HW struct
 485 * @rule_list: pointer to switch rule population list
 486 * @rule_list_sz: total size of the rule list in bytes
 487 * @num_rules: number of switch rules in the rule_list
 488 * @opc: switch rules population command type - pass in the command opcode
 489 * @cd: pointer to command details structure or NULL
 490 *
 491 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
 492 */
 493static enum ice_status
 494ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
 495		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
 496{
 497	struct ice_aq_desc desc;
 498	enum ice_status status;
 499
 500	if (opc != ice_aqc_opc_add_sw_rules &&
 501	    opc != ice_aqc_opc_update_sw_rules &&
 502	    opc != ice_aqc_opc_remove_sw_rules)
 503		return ICE_ERR_PARAM;
 504
 505	ice_fill_dflt_direct_cmd_desc(&desc, opc);
 506
 507	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 508	desc.params.sw_rules.num_rules_fltr_entry_index =
 509		cpu_to_le16(num_rules);
 510	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
 511	if (opc != ice_aqc_opc_add_sw_rules &&
 512	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
 513		status = ICE_ERR_DOES_NOT_EXIST;
 514
 515	return status;
 516}
 517
 518/* ice_init_port_info - Initialize port_info with switch configuration data
 519 * @pi: pointer to port_info
 520 * @vsi_port_num: VSI number or port number
 521 * @type: Type of switch element (port or VSI)
 522 * @swid: switch ID of the switch the element is attached to
 523 * @pf_vf_num: PF or VF number
 524 * @is_vf: true if the element is a VF, false otherwise
 525 */
 526static void
 527ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
 528		   u16 swid, u16 pf_vf_num, bool is_vf)
 529{
 530	switch (type) {
 531	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
 532		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
 533		pi->sw_id = swid;
 534		pi->pf_vf_num = pf_vf_num;
 535		pi->is_vf = is_vf;
 536		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
 537		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
 538		break;
 539	default:
 540		ice_debug(pi->hw, ICE_DBG_SW,
 541			  "incorrect VSI/port type received\n");
 542		break;
 543	}
 544}
 545
 546/* ice_get_initial_sw_cfg - Get initial port and default VSI data
 547 * @hw: pointer to the hardware structure
 548 */
 549enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
 550{
 551	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
 552	enum ice_status status;
 553	u16 req_desc = 0;
 554	u16 num_elems;
 555	u16 i;
 556
 557	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
 558			    GFP_KERNEL);
 559
 560	if (!rbuf)
 561		return ICE_ERR_NO_MEMORY;
 562
 563	/* Multiple calls to ice_aq_get_sw_cfg may be required
 564	 * to get all the switch configuration information. The need
 565	 * for additional calls is indicated by ice_aq_get_sw_cfg
 566	 * writing a non-zero value in req_desc
 567	 */
 568	do {
 569		struct ice_aqc_get_sw_cfg_resp_elem *ele;
 570
 571		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
 572					   &req_desc, &num_elems, NULL);
 573
 574		if (status)
 575			break;
 576
 577		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
 
 578			u16 pf_vf_num, swid, vsi_port_num;
 579			bool is_vf = false;
 580			u8 res_type;
 581
 
 582			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
 583				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
 584
 585			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
 586				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
 587
 588			swid = le16_to_cpu(ele->swid);
 589
 590			if (le16_to_cpu(ele->pf_vf_num) &
 591			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
 592				is_vf = true;
 593
 594			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
 595					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
 596
 597			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
 598				/* FW VSI is not needed. Just continue. */
 599				continue;
 600			}
 601
 602			ice_init_port_info(hw->port_info, vsi_port_num,
 603					   res_type, swid, pf_vf_num, is_vf);
 604		}
 605	} while (req_desc && !status);
 606
 607	devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
 608	return status;
 609}
 610
 611/**
 612 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
 613 * @hw: pointer to the hardware structure
 614 * @fi: filter info structure to fill/update
 615 *
 616 * This helper function populates the lb_en and lan_en elements of the provided
 617 * ice_fltr_info struct using the switch's type and characteristics of the
 618 * switch rule being configured.
 619 */
 620static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
 621{
 622	fi->lb_en = false;
 623	fi->lan_en = false;
 624	if ((fi->flag & ICE_FLTR_TX) &&
 625	    (fi->fltr_act == ICE_FWD_TO_VSI ||
 626	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
 627	     fi->fltr_act == ICE_FWD_TO_Q ||
 628	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
 629		/* Setting LB for prune actions will result in replicated
 630		 * packets to the internal switch that will be dropped.
 631		 */
 632		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
 633			fi->lb_en = true;
 634
 635		/* Set lan_en to TRUE if
 636		 * 1. The switch is a VEB AND
 637		 * 2
 638		 * 2.1 The lookup is a directional lookup like ethertype,
 639		 * promiscuous, ethertype-MAC, promiscuous-VLAN
 640		 * and default-port OR
 641		 * 2.2 The lookup is VLAN, OR
 642		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
 643		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
 644		 *
 645		 * OR
 646		 *
 647		 * The switch is a VEPA.
 648		 *
 649		 * In all other cases, the LAN enable has to be set to false.
 650		 */
 651		if (hw->evb_veb) {
 652			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
 653			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
 654			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
 655			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
 656			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
 657			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
 658			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
 659			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
 660			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
 661			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
 662				fi->lan_en = true;
 663		} else {
 664			fi->lan_en = true;
 665		}
 666	}
 667}
 668
 669/**
 670 * ice_fill_sw_rule - Helper function to fill switch rule structure
 671 * @hw: pointer to the hardware structure
 672 * @f_info: entry containing packet forwarding information
 673 * @s_rule: switch rule structure to be filled in based on mac_entry
 674 * @opc: switch rules population command type - pass in the command opcode
 675 */
 676static void
 677ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
 678		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
 679{
 680	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
 
 681	void *daddr = NULL;
 682	u16 eth_hdr_sz;
 683	u8 *eth_hdr;
 684	u32 act = 0;
 685	__be16 *off;
 686	u8 q_rgn;
 687
 688	if (opc == ice_aqc_opc_remove_sw_rules) {
 689		s_rule->pdata.lkup_tx_rx.act = 0;
 690		s_rule->pdata.lkup_tx_rx.index =
 691			cpu_to_le16(f_info->fltr_rule_id);
 692		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
 693		return;
 694	}
 695
 696	eth_hdr_sz = sizeof(dummy_eth_header);
 697	eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
 698
 699	/* initialize the ether header with a dummy header */
 700	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
 701	ice_fill_sw_info(hw, f_info);
 702
 703	switch (f_info->fltr_act) {
 704	case ICE_FWD_TO_VSI:
 705		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
 706			ICE_SINGLE_ACT_VSI_ID_M;
 707		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
 708			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
 709				ICE_SINGLE_ACT_VALID_BIT;
 710		break;
 711	case ICE_FWD_TO_VSI_LIST:
 712		act |= ICE_SINGLE_ACT_VSI_LIST;
 713		act |= (f_info->fwd_id.vsi_list_id <<
 714			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
 715			ICE_SINGLE_ACT_VSI_LIST_ID_M;
 716		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
 717			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
 718				ICE_SINGLE_ACT_VALID_BIT;
 719		break;
 720	case ICE_FWD_TO_Q:
 721		act |= ICE_SINGLE_ACT_TO_Q;
 722		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
 723			ICE_SINGLE_ACT_Q_INDEX_M;
 724		break;
 725	case ICE_DROP_PACKET:
 726		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
 727			ICE_SINGLE_ACT_VALID_BIT;
 728		break;
 729	case ICE_FWD_TO_QGRP:
 730		q_rgn = f_info->qgrp_size > 0 ?
 731			(u8)ilog2(f_info->qgrp_size) : 0;
 732		act |= ICE_SINGLE_ACT_TO_Q;
 733		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
 734			ICE_SINGLE_ACT_Q_INDEX_M;
 735		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
 736			ICE_SINGLE_ACT_Q_REGION_M;
 737		break;
 
 
 
 738	default:
 739		return;
 740	}
 741
 742	if (f_info->lb_en)
 743		act |= ICE_SINGLE_ACT_LB_ENABLE;
 744	if (f_info->lan_en)
 745		act |= ICE_SINGLE_ACT_LAN_ENABLE;
 746
 747	switch (f_info->lkup_type) {
 748	case ICE_SW_LKUP_MAC:
 749		daddr = f_info->l_data.mac.mac_addr;
 750		break;
 751	case ICE_SW_LKUP_VLAN:
 752		vlan_id = f_info->l_data.vlan.vlan_id;
 753		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
 754		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
 755			act |= ICE_SINGLE_ACT_PRUNE;
 756			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
 757		}
 758		break;
 759	case ICE_SW_LKUP_ETHERTYPE_MAC:
 760		daddr = f_info->l_data.ethertype_mac.mac_addr;
 761		fallthrough;
 762	case ICE_SW_LKUP_ETHERTYPE:
 763		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
 764		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
 765		break;
 766	case ICE_SW_LKUP_MAC_VLAN:
 767		daddr = f_info->l_data.mac_vlan.mac_addr;
 768		vlan_id = f_info->l_data.mac_vlan.vlan_id;
 769		break;
 770	case ICE_SW_LKUP_PROMISC_VLAN:
 771		vlan_id = f_info->l_data.mac_vlan.vlan_id;
 772		fallthrough;
 773	case ICE_SW_LKUP_PROMISC:
 774		daddr = f_info->l_data.mac_vlan.mac_addr;
 775		break;
 776	default:
 777		break;
 778	}
 779
 780	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
 781		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
 782		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
 783
 784	/* Recipe set depending on lookup type */
 785	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
 786	s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
 787	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
 788
 789	if (daddr)
 790		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
 791
 792	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
 793		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
 794		*off = cpu_to_be16(vlan_id);
 795	}
 796
 797	/* Create the switch rule with the final dummy Ethernet header */
 798	if (opc != ice_aqc_opc_update_sw_rules)
 799		s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
 
 
 800}
 801
 802/**
 803 * ice_add_marker_act
 804 * @hw: pointer to the hardware structure
 805 * @m_ent: the management entry for which sw marker needs to be added
 806 * @sw_marker: sw marker to tag the Rx descriptor with
 807 * @l_id: large action resource ID
 808 *
 809 * Create a large action to hold software marker and update the switch rule
 810 * entry pointed by m_ent with newly created large action
 811 */
 812static enum ice_status
 813ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
 814		   u16 sw_marker, u16 l_id)
 815{
 816	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
 817	/* For software marker we need 3 large actions
 818	 * 1. FWD action: FWD TO VSI or VSI LIST
 819	 * 2. GENERIC VALUE action to hold the profile ID
 820	 * 3. GENERIC VALUE action to hold the software marker ID
 821	 */
 822	const u16 num_lg_acts = 3;
 823	enum ice_status status;
 824	u16 lg_act_size;
 825	u16 rules_size;
 
 826	u32 act;
 827	u16 id;
 828
 829	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
 830		return ICE_ERR_PARAM;
 831
 832	/* Create two back-to-back switch rules and submit them to the HW using
 833	 * one memory buffer:
 834	 *    1. Large Action
 835	 *    2. Look up Tx Rx
 836	 */
 837	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
 838	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
 839	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
 840	if (!lg_act)
 841		return ICE_ERR_NO_MEMORY;
 842
 843	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
 844
 845	/* Fill in the first switch rule i.e. large action */
 846	lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
 847	lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
 848	lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
 849
 850	/* First action VSI forwarding or VSI list forwarding depending on how
 851	 * many VSIs
 852	 */
 853	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
 854		m_ent->fltr_info.fwd_id.hw_vsi_id;
 
 855
 856	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
 857	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
 
 858	if (m_ent->vsi_count > 1)
 859		act |= ICE_LG_ACT_VSI_LIST;
 860	lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
 861
 862	/* Second action descriptor type */
 863	act = ICE_LG_ACT_GENERIC;
 864
 865	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
 866	lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
 867
 868	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
 869	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
 870
 871	/* Third action Marker value */
 872	act |= ICE_LG_ACT_GENERIC;
 873	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
 874		ICE_LG_ACT_GENERIC_VALUE_M;
 875
 
 876	lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
 877
 878	/* call the fill switch rule to fill the lookup Tx Rx structure */
 879	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
 880			 ice_aqc_opc_update_sw_rules);
 881
 882	/* Update the action to point to the large action ID */
 883	rx_tx->pdata.lkup_tx_rx.act =
 884		cpu_to_le32(ICE_SINGLE_ACT_PTR |
 885			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
 886			     ICE_SINGLE_ACT_PTR_VAL_M));
 887
 888	/* Use the filter rule ID of the previously created rule with single
 889	 * act. Once the update happens, hardware will treat this as large
 890	 * action
 891	 */
 892	rx_tx->pdata.lkup_tx_rx.index =
 893		cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
 894
 895	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
 896				 ice_aqc_opc_update_sw_rules, NULL);
 897	if (!status) {
 898		m_ent->lg_act_idx = l_id;
 899		m_ent->sw_marker_id = sw_marker;
 900	}
 901
 902	devm_kfree(ice_hw_to_dev(hw), lg_act);
 903	return status;
 904}
 905
 906/**
 907 * ice_create_vsi_list_map
 908 * @hw: pointer to the hardware structure
 909 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
 910 * @num_vsi: number of VSI handles in the array
 911 * @vsi_list_id: VSI list ID generated as part of allocate resource
 912 *
 913 * Helper function to create a new entry of VSI list ID to VSI mapping
 914 * using the given VSI list ID
 915 */
 916static struct ice_vsi_list_map_info *
 917ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
 918			u16 vsi_list_id)
 919{
 920	struct ice_switch_info *sw = hw->switch_info;
 921	struct ice_vsi_list_map_info *v_map;
 922	int i;
 923
 924	v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
 925	if (!v_map)
 926		return NULL;
 927
 928	v_map->vsi_list_id = vsi_list_id;
 929	v_map->ref_cnt = 1;
 930	for (i = 0; i < num_vsi; i++)
 931		set_bit(vsi_handle_arr[i], v_map->vsi_map);
 932
 933	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
 934	return v_map;
 935}
 936
 937/**
 938 * ice_update_vsi_list_rule
 939 * @hw: pointer to the hardware structure
 940 * @vsi_handle_arr: array of VSI handles to form a VSI list
 941 * @num_vsi: number of VSI handles in the array
 942 * @vsi_list_id: VSI list ID generated as part of allocate resource
 943 * @remove: Boolean value to indicate if this is a remove action
 944 * @opc: switch rules population command type - pass in the command opcode
 945 * @lkup_type: lookup type of the filter
 946 *
 947 * Call AQ command to add a new switch rule or update existing switch rule
 948 * using the given VSI list ID
 949 */
 950static enum ice_status
 951ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
 952			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
 953			 enum ice_sw_lkup_type lkup_type)
 954{
 955	struct ice_aqc_sw_rules_elem *s_rule;
 956	enum ice_status status;
 957	u16 s_rule_size;
 958	u16 rule_type;
 959	int i;
 960
 961	if (!num_vsi)
 962		return ICE_ERR_PARAM;
 963
 964	if (lkup_type == ICE_SW_LKUP_MAC ||
 965	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
 966	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
 967	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
 968	    lkup_type == ICE_SW_LKUP_PROMISC ||
 969	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
 970		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
 971			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
 972	else if (lkup_type == ICE_SW_LKUP_VLAN)
 973		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
 974			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
 975	else
 976		return ICE_ERR_PARAM;
 977
 978	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
 979	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
 980	if (!s_rule)
 981		return ICE_ERR_NO_MEMORY;
 982	for (i = 0; i < num_vsi; i++) {
 983		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
 984			status = ICE_ERR_PARAM;
 985			goto exit;
 986		}
 987		/* AQ call requires hw_vsi_id(s) */
 988		s_rule->pdata.vsi_list.vsi[i] =
 989			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
 990	}
 991
 992	s_rule->type = cpu_to_le16(rule_type);
 
 
 
 993	s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
 994	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
 995
 996	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
 997
 998exit:
 999	devm_kfree(ice_hw_to_dev(hw), s_rule);
1000	return status;
1001}
1002
1003/**
1004 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1005 * @hw: pointer to the HW struct
1006 * @vsi_handle_arr: array of VSI handles to form a VSI list
1007 * @num_vsi: number of VSI handles in the array
1008 * @vsi_list_id: stores the ID of the VSI list to be created
1009 * @lkup_type: switch rule filter's lookup type
1010 */
1011static enum ice_status
1012ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1013			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1014{
1015	enum ice_status status;
 
 
 
 
 
1016
1017	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1018					    ice_aqc_opc_alloc_res);
1019	if (status)
1020		return status;
1021
1022	/* Update the newly created VSI list to include the specified VSIs */
1023	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1024					*vsi_list_id, false,
1025					ice_aqc_opc_add_sw_rules, lkup_type);
1026}
1027
1028/**
1029 * ice_create_pkt_fwd_rule
1030 * @hw: pointer to the hardware structure
1031 * @f_entry: entry containing packet forwarding information
1032 *
1033 * Create switch rule with given filter information and add an entry
1034 * to the corresponding filter management list to track this switch rule
1035 * and VSI mapping
1036 */
1037static enum ice_status
1038ice_create_pkt_fwd_rule(struct ice_hw *hw,
1039			struct ice_fltr_list_entry *f_entry)
1040{
 
1041	struct ice_fltr_mgmt_list_entry *fm_entry;
1042	struct ice_aqc_sw_rules_elem *s_rule;
1043	enum ice_sw_lkup_type l_type;
1044	struct ice_sw_recipe *recp;
1045	enum ice_status status;
1046
1047	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1048			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1049	if (!s_rule)
1050		return ICE_ERR_NO_MEMORY;
1051	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1052				GFP_KERNEL);
1053	if (!fm_entry) {
1054		status = ICE_ERR_NO_MEMORY;
1055		goto ice_create_pkt_fwd_rule_exit;
1056	}
1057
1058	fm_entry->fltr_info = f_entry->fltr_info;
1059
1060	/* Initialize all the fields for the management entry */
1061	fm_entry->vsi_count = 1;
1062	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1063	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1064	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1065
1066	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1067			 ice_aqc_opc_add_sw_rules);
1068
1069	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1070				 ice_aqc_opc_add_sw_rules, NULL);
1071	if (status) {
1072		devm_kfree(ice_hw_to_dev(hw), fm_entry);
1073		goto ice_create_pkt_fwd_rule_exit;
1074	}
1075
1076	f_entry->fltr_info.fltr_rule_id =
1077		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1078	fm_entry->fltr_info.fltr_rule_id =
1079		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1080
1081	/* The book keeping entries will get removed when base driver
1082	 * calls remove filter AQ command
1083	 */
1084	l_type = fm_entry->fltr_info.lkup_type;
1085	recp = &hw->switch_info->recp_list[l_type];
1086	list_add(&fm_entry->list_entry, &recp->filt_rules);
1087
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1088ice_create_pkt_fwd_rule_exit:
1089	devm_kfree(ice_hw_to_dev(hw), s_rule);
1090	return status;
1091}
1092
1093/**
1094 * ice_update_pkt_fwd_rule
1095 * @hw: pointer to the hardware structure
1096 * @f_info: filter information for switch rule
 
 
1097 *
1098 * Call AQ command to update a previously created switch rule with a
1099 * VSI list ID
1100 */
1101static enum ice_status
1102ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
 
1103{
1104	struct ice_aqc_sw_rules_elem *s_rule;
 
1105	enum ice_status status;
1106
1107	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1108			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1109	if (!s_rule)
1110		return ICE_ERR_NO_MEMORY;
1111
1112	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
 
 
1113
1114	s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
 
 
 
1115
1116	/* Update switch rule with new rule set to forward VSI list */
1117	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1118				 ice_aqc_opc_update_sw_rules, NULL);
1119
1120	devm_kfree(ice_hw_to_dev(hw), s_rule);
1121	return status;
1122}
1123
1124/**
1125 * ice_update_sw_rule_bridge_mode
1126 * @hw: pointer to the HW struct
1127 *
1128 * Updates unicast switch filter rules based on VEB/VEPA mode
1129 */
1130enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1131{
1132	struct ice_switch_info *sw = hw->switch_info;
1133	struct ice_fltr_mgmt_list_entry *fm_entry;
1134	enum ice_status status = 0;
1135	struct list_head *rule_head;
1136	struct mutex *rule_lock; /* Lock to protect filter rule list */
1137
1138	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1139	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1140
1141	mutex_lock(rule_lock);
1142	list_for_each_entry(fm_entry, rule_head, list_entry) {
1143		struct ice_fltr_info *fi = &fm_entry->fltr_info;
1144		u8 *addr = fi->l_data.mac.mac_addr;
1145
1146		/* Update unicast Tx rules to reflect the selected
1147		 * VEB/VEPA mode
1148		 */
1149		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
1150		    (fi->fltr_act == ICE_FWD_TO_VSI ||
1151		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1152		     fi->fltr_act == ICE_FWD_TO_Q ||
1153		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
1154			status = ice_update_pkt_fwd_rule(hw, fi);
1155			if (status)
1156				break;
1157		}
1158	}
1159
1160	mutex_unlock(rule_lock);
1161
1162	return status;
1163}
1164
1165/**
1166 * ice_add_update_vsi_list
1167 * @hw: pointer to the hardware structure
1168 * @m_entry: pointer to current filter management list entry
1169 * @cur_fltr: filter information from the book keeping entry
1170 * @new_fltr: filter information with the new VSI to be added
1171 *
1172 * Call AQ command to add or update previously created VSI list with new VSI.
1173 *
1174 * Helper function to do book keeping associated with adding filter information
1175 * The algorithm to do the book keeping is described below :
1176 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
1177 *	if only one VSI has been added till now
1178 *		Allocate a new VSI list and add two VSIs
1179 *		to this list using switch rule command
1180 *		Update the previously created switch rule with the
1181 *		newly created VSI list ID
1182 *	if a VSI list was previously created
1183 *		Add the new VSI to the previously created VSI list set
1184 *		using the update switch rule command
1185 */
1186static enum ice_status
1187ice_add_update_vsi_list(struct ice_hw *hw,
1188			struct ice_fltr_mgmt_list_entry *m_entry,
1189			struct ice_fltr_info *cur_fltr,
1190			struct ice_fltr_info *new_fltr)
1191{
1192	enum ice_status status = 0;
1193	u16 vsi_list_id = 0;
1194
1195	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
1196	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
1197		return ICE_ERR_NOT_IMPL;
1198
1199	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
1200	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
1201	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
1202	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
1203		return ICE_ERR_NOT_IMPL;
1204
1205	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
1206		/* Only one entry existed in the mapping and it was not already
1207		 * a part of a VSI list. So, create a VSI list with the old and
1208		 * new VSIs.
1209		 */
1210		struct ice_fltr_info tmp_fltr;
1211		u16 vsi_handle_arr[2];
1212
1213		/* A rule already exists with the new VSI being added */
1214		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
1215			return ICE_ERR_ALREADY_EXISTS;
1216
1217		vsi_handle_arr[0] = cur_fltr->vsi_handle;
1218		vsi_handle_arr[1] = new_fltr->vsi_handle;
1219		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1220						  &vsi_list_id,
1221						  new_fltr->lkup_type);
1222		if (status)
1223			return status;
1224
1225		tmp_fltr = *new_fltr;
1226		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
1227		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1228		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1229		/* Update the previous switch rule of "MAC forward to VSI" to
1230		 * "MAC fwd to VSI list"
1231		 */
1232		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
 
1233		if (status)
1234			return status;
1235
1236		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
1237		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1238		m_entry->vsi_list_info =
1239			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1240						vsi_list_id);
1241
1242		/* If this entry was large action then the large action needs
1243		 * to be updated to point to FWD to VSI list
1244		 */
1245		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
1246			status =
1247			    ice_add_marker_act(hw, m_entry,
1248					       m_entry->sw_marker_id,
1249					       m_entry->lg_act_idx);
1250	} else {
1251		u16 vsi_handle = new_fltr->vsi_handle;
1252		enum ice_adminq_opc opcode;
1253
1254		if (!m_entry->vsi_list_info)
1255			return ICE_ERR_CFG;
1256
1257		/* A rule already exists with the new VSI being added */
1258		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
1259			return 0;
1260
1261		/* Update the previously created VSI list set with
1262		 * the new VSI ID passed in
1263		 */
1264		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
1265		opcode = ice_aqc_opc_update_sw_rules;
1266
1267		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
1268						  vsi_list_id, false, opcode,
1269						  new_fltr->lkup_type);
1270		/* update VSI list mapping info with new VSI ID */
1271		if (!status)
1272			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
1273	}
1274	if (!status)
1275		m_entry->vsi_count++;
1276	return status;
1277}
1278
1279/**
1280 * ice_find_rule_entry - Search a rule entry
1281 * @hw: pointer to the hardware structure
1282 * @recp_id: lookup type for which the specified rule needs to be searched
1283 * @f_info: rule information
1284 *
1285 * Helper function to search for a given rule entry
1286 * Returns pointer to entry storing the rule if found
1287 */
1288static struct ice_fltr_mgmt_list_entry *
1289ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
1290{
1291	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
1292	struct ice_switch_info *sw = hw->switch_info;
1293	struct list_head *list_head;
1294
1295	list_head = &sw->recp_list[recp_id].filt_rules;
1296	list_for_each_entry(list_itr, list_head, list_entry) {
1297		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
1298			    sizeof(f_info->l_data)) &&
1299		    f_info->flag == list_itr->fltr_info.flag) {
1300			ret = list_itr;
1301			break;
1302		}
1303	}
1304	return ret;
1305}
1306
1307/**
1308 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
1309 * @hw: pointer to the hardware structure
1310 * @recp_id: lookup type for which VSI lists needs to be searched
1311 * @vsi_handle: VSI handle to be found in VSI list
1312 * @vsi_list_id: VSI list ID found containing vsi_handle
1313 *
1314 * Helper function to search a VSI list with single entry containing given VSI
1315 * handle element. This can be extended further to search VSI list with more
1316 * than 1 vsi_count. Returns pointer to VSI list entry if found.
1317 */
1318static struct ice_vsi_list_map_info *
1319ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
1320			u16 *vsi_list_id)
1321{
1322	struct ice_vsi_list_map_info *map_info = NULL;
1323	struct ice_switch_info *sw = hw->switch_info;
1324	struct ice_fltr_mgmt_list_entry *list_itr;
1325	struct list_head *list_head;
1326
1327	list_head = &sw->recp_list[recp_id].filt_rules;
1328	list_for_each_entry(list_itr, list_head, list_entry) {
1329		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
1330			map_info = list_itr->vsi_list_info;
1331			if (test_bit(vsi_handle, map_info->vsi_map)) {
1332				*vsi_list_id = map_info->vsi_list_id;
1333				return map_info;
1334			}
1335		}
1336	}
1337	return NULL;
1338}
1339
1340/**
1341 * ice_add_rule_internal - add rule for a given lookup type
1342 * @hw: pointer to the hardware structure
1343 * @recp_id: lookup type (recipe ID) for which rule has to be added
1344 * @f_entry: structure containing MAC forwarding information
1345 *
1346 * Adds or updates the rule lists for a given recipe
1347 */
1348static enum ice_status
1349ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
1350		      struct ice_fltr_list_entry *f_entry)
1351{
1352	struct ice_switch_info *sw = hw->switch_info;
1353	struct ice_fltr_info *new_fltr, *cur_fltr;
1354	struct ice_fltr_mgmt_list_entry *m_entry;
1355	struct mutex *rule_lock; /* Lock to protect filter rule list */
1356	enum ice_status status = 0;
1357
1358	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1359		return ICE_ERR_PARAM;
1360	f_entry->fltr_info.fwd_id.hw_vsi_id =
1361		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1362
1363	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1364
1365	mutex_lock(rule_lock);
1366	new_fltr = &f_entry->fltr_info;
1367	if (new_fltr->flag & ICE_FLTR_RX)
1368		new_fltr->src = hw->port_info->lport;
1369	else if (new_fltr->flag & ICE_FLTR_TX)
1370		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
1371
1372	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
1373	if (!m_entry) {
1374		mutex_unlock(rule_lock);
1375		return ice_create_pkt_fwd_rule(hw, f_entry);
1376	}
1377
1378	cur_fltr = &m_entry->fltr_info;
1379	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
1380	mutex_unlock(rule_lock);
1381
1382	return status;
1383}
1384
1385/**
1386 * ice_remove_vsi_list_rule
1387 * @hw: pointer to the hardware structure
1388 * @vsi_list_id: VSI list ID generated as part of allocate resource
1389 * @lkup_type: switch rule filter lookup type
1390 *
1391 * The VSI list should be emptied before this function is called to remove the
1392 * VSI list.
1393 */
1394static enum ice_status
1395ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
1396			 enum ice_sw_lkup_type lkup_type)
1397{
1398	struct ice_aqc_sw_rules_elem *s_rule;
1399	enum ice_status status;
1400	u16 s_rule_size;
1401
1402	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
1403	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1404	if (!s_rule)
1405		return ICE_ERR_NO_MEMORY;
1406
1407	s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
1408	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1409
1410	/* Free the vsi_list resource that we allocated. It is assumed that the
1411	 * list is empty at this point.
1412	 */
1413	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
1414					    ice_aqc_opc_free_res);
1415
1416	devm_kfree(ice_hw_to_dev(hw), s_rule);
1417	return status;
1418}
1419
1420/**
1421 * ice_rem_update_vsi_list
1422 * @hw: pointer to the hardware structure
1423 * @vsi_handle: VSI handle of the VSI to remove
1424 * @fm_list: filter management entry for which the VSI list management needs to
1425 *           be done
1426 */
1427static enum ice_status
1428ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
1429			struct ice_fltr_mgmt_list_entry *fm_list)
1430{
1431	enum ice_sw_lkup_type lkup_type;
1432	enum ice_status status = 0;
1433	u16 vsi_list_id;
1434
1435	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
1436	    fm_list->vsi_count == 0)
1437		return ICE_ERR_PARAM;
1438
1439	/* A rule with the VSI being removed does not exist */
1440	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
1441		return ICE_ERR_DOES_NOT_EXIST;
1442
1443	lkup_type = fm_list->fltr_info.lkup_type;
1444	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
1445	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
1446					  ice_aqc_opc_update_sw_rules,
1447					  lkup_type);
1448	if (status)
1449		return status;
1450
1451	fm_list->vsi_count--;
1452	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
1453
1454	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
1455		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
1456		struct ice_vsi_list_map_info *vsi_list_info =
1457			fm_list->vsi_list_info;
1458		u16 rem_vsi_handle;
1459
1460		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
1461						ICE_MAX_VSI);
1462		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
1463			return ICE_ERR_OUT_OF_RANGE;
1464
1465		/* Make sure VSI list is empty before removing it below */
1466		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
1467						  vsi_list_id, true,
1468						  ice_aqc_opc_update_sw_rules,
1469						  lkup_type);
1470		if (status)
1471			return status;
1472
1473		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
1474		tmp_fltr_info.fwd_id.hw_vsi_id =
1475			ice_get_hw_vsi_num(hw, rem_vsi_handle);
1476		tmp_fltr_info.vsi_handle = rem_vsi_handle;
1477		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
1478		if (status) {
1479			ice_debug(hw, ICE_DBG_SW,
1480				  "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
1481				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
1482			return status;
1483		}
1484
1485		fm_list->fltr_info = tmp_fltr_info;
1486	}
1487
1488	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
1489	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
1490		struct ice_vsi_list_map_info *vsi_list_info =
1491			fm_list->vsi_list_info;
1492
1493		/* Remove the VSI list since it is no longer used */
1494		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
1495		if (status) {
1496			ice_debug(hw, ICE_DBG_SW,
1497				  "Failed to remove VSI list %d, error %d\n",
1498				  vsi_list_id, status);
1499			return status;
1500		}
1501
1502		list_del(&vsi_list_info->list_entry);
1503		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
1504		fm_list->vsi_list_info = NULL;
1505	}
1506
1507	return status;
1508}
1509
1510/**
1511 * ice_remove_rule_internal - Remove a filter rule of a given type
1512 * @hw: pointer to the hardware structure
1513 * @recp_id: recipe ID for which the rule needs to removed
1514 * @f_entry: rule entry containing filter information
1515 */
1516static enum ice_status
1517ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
1518			 struct ice_fltr_list_entry *f_entry)
1519{
1520	struct ice_switch_info *sw = hw->switch_info;
1521	struct ice_fltr_mgmt_list_entry *list_elem;
1522	struct mutex *rule_lock; /* Lock to protect filter rule list */
1523	enum ice_status status = 0;
1524	bool remove_rule = false;
1525	u16 vsi_handle;
1526
1527	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1528		return ICE_ERR_PARAM;
1529	f_entry->fltr_info.fwd_id.hw_vsi_id =
1530		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1531
1532	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1533	mutex_lock(rule_lock);
1534	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
1535	if (!list_elem) {
1536		status = ICE_ERR_DOES_NOT_EXIST;
1537		goto exit;
1538	}
1539
1540	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
1541		remove_rule = true;
1542	} else if (!list_elem->vsi_list_info) {
1543		status = ICE_ERR_DOES_NOT_EXIST;
1544		goto exit;
1545	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
1546		/* a ref_cnt > 1 indicates that the vsi_list is being
1547		 * shared by multiple rules. Decrement the ref_cnt and
1548		 * remove this rule, but do not modify the list, as it
1549		 * is in-use by other rules.
1550		 */
1551		list_elem->vsi_list_info->ref_cnt--;
1552		remove_rule = true;
1553	} else {
1554		/* a ref_cnt of 1 indicates the vsi_list is only used
1555		 * by one rule. However, the original removal request is only
1556		 * for a single VSI. Update the vsi_list first, and only
1557		 * remove the rule if there are no further VSIs in this list.
1558		 */
1559		vsi_handle = f_entry->fltr_info.vsi_handle;
1560		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
1561		if (status)
1562			goto exit;
1563		/* if VSI count goes to zero after updating the VSI list */
1564		if (list_elem->vsi_count == 0)
1565			remove_rule = true;
1566	}
1567
1568	if (remove_rule) {
1569		/* Remove the lookup rule */
1570		struct ice_aqc_sw_rules_elem *s_rule;
1571
1572		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1573				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
1574				      GFP_KERNEL);
1575		if (!s_rule) {
1576			status = ICE_ERR_NO_MEMORY;
1577			goto exit;
1578		}
1579
1580		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
1581				 ice_aqc_opc_remove_sw_rules);
1582
1583		status = ice_aq_sw_rules(hw, s_rule,
1584					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
1585					 ice_aqc_opc_remove_sw_rules, NULL);
1586
1587		/* Remove a book keeping from the list */
1588		devm_kfree(ice_hw_to_dev(hw), s_rule);
1589
1590		if (status)
1591			goto exit;
1592
1593		list_del(&list_elem->list_entry);
1594		devm_kfree(ice_hw_to_dev(hw), list_elem);
1595	}
1596exit:
1597	mutex_unlock(rule_lock);
1598	return status;
1599}
1600
1601/**
1602 * ice_add_mac - Add a MAC address based filter rule
1603 * @hw: pointer to the hardware structure
1604 * @m_list: list of MAC addresses and forwarding information
1605 *
1606 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
1607 * multiple unicast addresses, the function assumes that all the
1608 * addresses are unique in a given add_mac call. It doesn't
1609 * check for duplicates in this case, removing duplicates from a given
1610 * list should be taken care of in the caller of this function.
1611 */
1612enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
 
1613{
1614	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
1615	struct ice_fltr_list_entry *m_list_itr;
1616	struct list_head *rule_head;
1617	u16 total_elem_left, s_rule_size;
1618	struct ice_switch_info *sw;
1619	struct mutex *rule_lock; /* Lock to protect filter rule list */
1620	enum ice_status status = 0;
1621	u16 num_unicast = 0;
1622	u8 elem_sent;
1623
1624	if (!m_list || !hw)
1625		return ICE_ERR_PARAM;
1626
1627	s_rule = NULL;
1628	sw = hw->switch_info;
1629	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1630	list_for_each_entry(m_list_itr, m_list, list_entry) {
1631		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
1632		u16 vsi_handle;
1633		u16 hw_vsi_id;
1634
1635		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
1636		vsi_handle = m_list_itr->fltr_info.vsi_handle;
1637		if (!ice_is_vsi_valid(hw, vsi_handle))
1638			return ICE_ERR_PARAM;
1639		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
1640		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
1641		/* update the src in case it is VSI num */
1642		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
1643			return ICE_ERR_PARAM;
1644		m_list_itr->fltr_info.src = hw_vsi_id;
1645		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
1646		    is_zero_ether_addr(add))
1647			return ICE_ERR_PARAM;
1648		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
1649			/* Don't overwrite the unicast address */
1650			mutex_lock(rule_lock);
1651			if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
1652						&m_list_itr->fltr_info)) {
1653				mutex_unlock(rule_lock);
1654				return ICE_ERR_ALREADY_EXISTS;
1655			}
1656			mutex_unlock(rule_lock);
1657			num_unicast++;
1658		} else if (is_multicast_ether_addr(add) ||
1659			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
1660			m_list_itr->status =
1661				ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
1662						      m_list_itr);
1663			if (m_list_itr->status)
1664				return m_list_itr->status;
 
1665		}
1666	}
1667
1668	mutex_lock(rule_lock);
1669	/* Exit if no suitable entries were found for adding bulk switch rule */
1670	if (!num_unicast) {
1671		status = 0;
1672		goto ice_add_mac_exit;
1673	}
1674
1675	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1676
1677	/* Allocate switch rule buffer for the bulk update for unicast */
1678	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1679	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
1680			      GFP_KERNEL);
1681	if (!s_rule) {
1682		status = ICE_ERR_NO_MEMORY;
1683		goto ice_add_mac_exit;
1684	}
1685
1686	r_iter = s_rule;
1687	list_for_each_entry(m_list_itr, m_list, list_entry) {
1688		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1689		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
1690
1691		if (is_unicast_ether_addr(mac_addr)) {
1692			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
1693					 ice_aqc_opc_add_sw_rules);
1694			r_iter = (struct ice_aqc_sw_rules_elem *)
1695				((u8 *)r_iter + s_rule_size);
1696		}
1697	}
1698
1699	/* Call AQ bulk switch rule update for all unicast addresses */
1700	r_iter = s_rule;
1701	/* Call AQ switch rule in AQ_MAX chunk */
1702	for (total_elem_left = num_unicast; total_elem_left > 0;
1703	     total_elem_left -= elem_sent) {
1704		struct ice_aqc_sw_rules_elem *entry = r_iter;
1705
1706		elem_sent = min_t(u8, total_elem_left,
1707				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
1708		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
1709					 elem_sent, ice_aqc_opc_add_sw_rules,
1710					 NULL);
1711		if (status)
1712			goto ice_add_mac_exit;
1713		r_iter = (struct ice_aqc_sw_rules_elem *)
1714			((u8 *)r_iter + (elem_sent * s_rule_size));
1715	}
1716
1717	/* Fill up rule ID based on the value returned from FW */
1718	r_iter = s_rule;
1719	list_for_each_entry(m_list_itr, m_list, list_entry) {
1720		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1721		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
 
1722		struct ice_fltr_mgmt_list_entry *fm_entry;
1723
1724		if (is_unicast_ether_addr(mac_addr)) {
1725			f_info->fltr_rule_id =
1726				le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
1727			f_info->fltr_act = ICE_FWD_TO_VSI;
1728			/* Create an entry to track this MAC address */
1729			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
1730						sizeof(*fm_entry), GFP_KERNEL);
1731			if (!fm_entry) {
1732				status = ICE_ERR_NO_MEMORY;
1733				goto ice_add_mac_exit;
1734			}
1735			fm_entry->fltr_info = *f_info;
1736			fm_entry->vsi_count = 1;
1737			/* The book keeping entries will get removed when
1738			 * base driver calls remove filter AQ command
1739			 */
 
 
 
1740
1741			list_add(&fm_entry->list_entry, rule_head);
1742			r_iter = (struct ice_aqc_sw_rules_elem *)
1743				((u8 *)r_iter + s_rule_size);
1744		}
1745	}
1746
1747ice_add_mac_exit:
1748	mutex_unlock(rule_lock);
1749	if (s_rule)
1750		devm_kfree(ice_hw_to_dev(hw), s_rule);
1751	return status;
1752}
1753
1754/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1755 * ice_add_vlan_internal - Add one VLAN based filter rule
1756 * @hw: pointer to the hardware structure
1757 * @f_entry: filter entry containing one VLAN information
1758 */
1759static enum ice_status
1760ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
1761{
1762	struct ice_switch_info *sw = hw->switch_info;
1763	struct ice_fltr_mgmt_list_entry *v_list_itr;
1764	struct ice_fltr_info *new_fltr, *cur_fltr;
1765	enum ice_sw_lkup_type lkup_type;
1766	u16 vsi_list_id = 0, vsi_handle;
1767	struct mutex *rule_lock; /* Lock to protect filter rule list */
1768	enum ice_status status = 0;
1769
1770	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1771		return ICE_ERR_PARAM;
1772
1773	f_entry->fltr_info.fwd_id.hw_vsi_id =
1774		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1775	new_fltr = &f_entry->fltr_info;
1776
1777	/* VLAN ID should only be 12 bits */
1778	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
1779		return ICE_ERR_PARAM;
1780
1781	if (new_fltr->src_id != ICE_SRC_ID_VSI)
1782		return ICE_ERR_PARAM;
1783
1784	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
1785	lkup_type = new_fltr->lkup_type;
1786	vsi_handle = new_fltr->vsi_handle;
1787	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
1788	mutex_lock(rule_lock);
1789	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
1790	if (!v_list_itr) {
1791		struct ice_vsi_list_map_info *map_info = NULL;
 
 
1792
1793		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
1794			/* All VLAN pruning rules use a VSI list. Check if
1795			 * there is already a VSI list containing VSI that we
1796			 * want to add. If found, use the same vsi_list_id for
1797			 * this new VLAN rule or else create a new list.
1798			 */
1799			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
1800							   vsi_handle,
1801							   &vsi_list_id);
1802			if (!map_info) {
1803				status = ice_create_vsi_list_rule(hw,
1804								  &vsi_handle,
1805								  1,
1806								  &vsi_list_id,
1807								  lkup_type);
1808				if (status)
1809					goto exit;
1810			}
1811			/* Convert the action to forwarding to a VSI list. */
1812			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1813			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
1814		}
1815
1816		status = ice_create_pkt_fwd_rule(hw, f_entry);
1817		if (!status) {
1818			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
1819							 new_fltr);
1820			if (!v_list_itr) {
1821				status = ICE_ERR_DOES_NOT_EXIST;
1822				goto exit;
1823			}
1824			/* reuse VSI list for new rule and increment ref_cnt */
1825			if (map_info) {
1826				v_list_itr->vsi_list_info = map_info;
1827				map_info->ref_cnt++;
1828			} else {
1829				v_list_itr->vsi_list_info =
1830					ice_create_vsi_list_map(hw, &vsi_handle,
1831								1, vsi_list_id);
1832			}
1833		}
1834	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
1835		/* Update existing VSI list to add new VSI ID only if it used
1836		 * by one VLAN rule.
1837		 */
1838		cur_fltr = &v_list_itr->fltr_info;
1839		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
1840						 new_fltr);
1841	} else {
1842		/* If VLAN rule exists and VSI list being used by this rule is
1843		 * referenced by more than 1 VLAN rule. Then create a new VSI
1844		 * list appending previous VSI with new VSI and update existing
1845		 * VLAN rule to point to new VSI list ID
1846		 */
1847		struct ice_fltr_info tmp_fltr;
1848		u16 vsi_handle_arr[2];
1849		u16 cur_handle;
1850
1851		/* Current implementation only supports reusing VSI list with
1852		 * one VSI count. We should never hit below condition
1853		 */
1854		if (v_list_itr->vsi_count > 1 &&
1855		    v_list_itr->vsi_list_info->ref_cnt > 1) {
1856			ice_debug(hw, ICE_DBG_SW,
1857				  "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
1858			status = ICE_ERR_CFG;
1859			goto exit;
1860		}
1861
1862		cur_handle =
1863			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
1864				       ICE_MAX_VSI);
1865
1866		/* A rule already exists with the new VSI being added */
1867		if (cur_handle == vsi_handle) {
1868			status = ICE_ERR_ALREADY_EXISTS;
1869			goto exit;
1870		}
1871
1872		vsi_handle_arr[0] = cur_handle;
1873		vsi_handle_arr[1] = vsi_handle;
1874		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1875						  &vsi_list_id, lkup_type);
1876		if (status)
1877			goto exit;
1878
1879		tmp_fltr = v_list_itr->fltr_info;
1880		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
1881		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1882		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1883		/* Update the previous switch rule to a new VSI list which
1884		 * includes current VSI that is requested
1885		 */
1886		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1887		if (status)
1888			goto exit;
1889
1890		/* before overriding VSI list map info. decrement ref_cnt of
1891		 * previous VSI list
1892		 */
1893		v_list_itr->vsi_list_info->ref_cnt--;
1894
1895		/* now update to newly created list */
1896		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
1897		v_list_itr->vsi_list_info =
1898			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1899						vsi_list_id);
1900		v_list_itr->vsi_count++;
1901	}
1902
1903exit:
1904	mutex_unlock(rule_lock);
1905	return status;
1906}
1907
1908/**
1909 * ice_add_vlan - Add VLAN based filter rule
1910 * @hw: pointer to the hardware structure
1911 * @v_list: list of VLAN entries and forwarding information
1912 */
1913enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
 
1914{
1915	struct ice_fltr_list_entry *v_list_itr;
1916
1917	if (!v_list || !hw)
1918		return ICE_ERR_PARAM;
1919
1920	list_for_each_entry(v_list_itr, v_list, list_entry) {
 
 
1921		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
1922			return ICE_ERR_PARAM;
1923		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1924		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
1925		if (v_list_itr->status)
1926			return v_list_itr->status;
 
 
 
1927	}
1928	return 0;
1929}
1930
1931/**
1932 * ice_add_eth_mac - Add ethertype and MAC based filter rule
1933 * @hw: pointer to the hardware structure
1934 * @em_list: list of ether type MAC filter, MAC is optional
1935 *
1936 * This function requires the caller to populate the entries in
1937 * the filter list with the necessary fields (including flags to
1938 * indicate Tx or Rx rules).
1939 */
1940enum ice_status
1941ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
 
1942{
1943	struct ice_fltr_list_entry *em_list_itr;
 
 
1944
1945	if (!em_list || !hw)
1946		return ICE_ERR_PARAM;
 
 
1947
1948	list_for_each_entry(em_list_itr, em_list, list_entry) {
1949		enum ice_sw_lkup_type l_type =
1950			em_list_itr->fltr_info.lkup_type;
 
 
 
1951
1952		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
1953		    l_type != ICE_SW_LKUP_ETHERTYPE)
1954			return ICE_ERR_PARAM;
 
 
 
1955
1956		em_list_itr->status = ice_add_rule_internal(hw, l_type,
1957							    em_list_itr);
1958		if (em_list_itr->status)
1959			return em_list_itr->status;
1960	}
1961	return 0;
1962}
1963
1964/**
1965 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
1966 * @hw: pointer to the hardware structure
1967 * @em_list: list of ethertype or ethertype MAC entries
 
 
1968 */
1969enum ice_status
1970ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
 
1971{
1972	struct ice_fltr_list_entry *em_list_itr, *tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1973
1974	if (!em_list || !hw)
1975		return ICE_ERR_PARAM;
 
 
 
1976
1977	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
1978		enum ice_sw_lkup_type l_type =
1979			em_list_itr->fltr_info.lkup_type;
1980
1981		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
1982		    l_type != ICE_SW_LKUP_ETHERTYPE)
1983			return ICE_ERR_PARAM;
 
 
1984
1985		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
1986							       em_list_itr);
1987		if (em_list_itr->status)
1988			return em_list_itr->status;
 
 
1989	}
1990	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1991}
1992
1993/**
1994 * ice_rem_sw_rule_info
1995 * @hw: pointer to the hardware structure
1996 * @rule_head: pointer to the switch list structure that we want to delete
 
 
 
 
 
 
 
 
1997 */
1998static void
1999ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2000{
2001	if (!list_empty(rule_head)) {
2002		struct ice_fltr_mgmt_list_entry *entry;
2003		struct ice_fltr_mgmt_list_entry *tmp;
2004
2005		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2006			list_del(&entry->list_entry);
2007			devm_kfree(ice_hw_to_dev(hw), entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2008		}
2009	}
 
 
 
 
2010}
2011
2012/**
2013 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
 
2014 * @hw: pointer to the hardware structure
2015 * @vsi_handle: VSI handle to set as default
2016 * @set: true to add the above mentioned switch rule, false to remove it
2017 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
2018 *
2019 * add filter rule to set/unset given VSI as default VSI for the switch
2020 * (represented by swid)
2021 */
2022enum ice_status
2023ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
2024{
2025	struct ice_aqc_sw_rules_elem *s_rule;
2026	struct ice_fltr_info f_info;
2027	enum ice_adminq_opc opcode;
2028	enum ice_status status;
2029	u16 s_rule_size;
2030	u16 hw_vsi_id;
2031
2032	if (!ice_is_vsi_valid(hw, vsi_handle))
2033		return ICE_ERR_PARAM;
2034	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2035
2036	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
2037		ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
2038
2039	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2040	if (!s_rule)
2041		return ICE_ERR_NO_MEMORY;
2042
2043	memset(&f_info, 0, sizeof(f_info));
2044
2045	f_info.lkup_type = ICE_SW_LKUP_DFLT;
2046	f_info.flag = direction;
2047	f_info.fltr_act = ICE_FWD_TO_VSI;
2048	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
2049
2050	if (f_info.flag & ICE_FLTR_RX) {
2051		f_info.src = hw->port_info->lport;
2052		f_info.src_id = ICE_SRC_ID_LPORT;
2053		if (!set)
2054			f_info.fltr_rule_id =
2055				hw->port_info->dflt_rx_vsi_rule_id;
2056	} else if (f_info.flag & ICE_FLTR_TX) {
2057		f_info.src_id = ICE_SRC_ID_VSI;
2058		f_info.src = hw_vsi_id;
2059		if (!set)
2060			f_info.fltr_rule_id =
2061				hw->port_info->dflt_tx_vsi_rule_id;
2062	}
2063
2064	if (set)
2065		opcode = ice_aqc_opc_add_sw_rules;
2066	else
2067		opcode = ice_aqc_opc_remove_sw_rules;
2068
2069	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
2070
2071	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
2072	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
2073		goto out;
2074	if (set) {
2075		u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
2076
2077		if (f_info.flag & ICE_FLTR_TX) {
2078			hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
2079			hw->port_info->dflt_tx_vsi_rule_id = index;
2080		} else if (f_info.flag & ICE_FLTR_RX) {
2081			hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
2082			hw->port_info->dflt_rx_vsi_rule_id = index;
2083		}
2084	} else {
2085		if (f_info.flag & ICE_FLTR_TX) {
2086			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2087			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
2088		} else if (f_info.flag & ICE_FLTR_RX) {
2089			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2090			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
2091		}
2092	}
2093
2094out:
2095	devm_kfree(ice_hw_to_dev(hw), s_rule);
2096	return status;
2097}
2098
2099/**
2100 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
2101 * @hw: pointer to the hardware structure
2102 * @recp_id: lookup type for which the specified rule needs to be searched
2103 * @f_info: rule information
2104 *
2105 * Helper function to search for a unicast rule entry - this is to be used
2106 * to remove unicast MAC filter that is not shared with other VSIs on the
2107 * PF switch.
2108 *
2109 * Returns pointer to entry storing the rule if found
2110 */
2111static struct ice_fltr_mgmt_list_entry *
2112ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
2113			  struct ice_fltr_info *f_info)
2114{
2115	struct ice_switch_info *sw = hw->switch_info;
2116	struct ice_fltr_mgmt_list_entry *list_itr;
2117	struct list_head *list_head;
2118
2119	list_head = &sw->recp_list[recp_id].filt_rules;
2120	list_for_each_entry(list_itr, list_head, list_entry) {
2121		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2122			    sizeof(f_info->l_data)) &&
2123		    f_info->fwd_id.hw_vsi_id ==
2124		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
2125		    f_info->flag == list_itr->fltr_info.flag)
2126			return list_itr;
2127	}
2128	return NULL;
2129}
2130
2131/**
2132 * ice_remove_mac - remove a MAC address based filter rule
2133 * @hw: pointer to the hardware structure
2134 * @m_list: list of MAC addresses and forwarding information
2135 *
2136 * This function removes either a MAC filter rule or a specific VSI from a
2137 * VSI list for a multicast MAC address.
2138 *
2139 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
2140 * ice_add_mac. Caller should be aware that this call will only work if all
2141 * the entries passed into m_list were added previously. It will not attempt to
2142 * do a partial remove of entries that were found.
2143 */
2144enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
2145{
2146	struct ice_fltr_list_entry *list_itr, *tmp;
2147	struct mutex *rule_lock; /* Lock to protect filter rule list */
2148
2149	if (!m_list)
2150		return ICE_ERR_PARAM;
2151
2152	rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2153	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
2154		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
2155		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
2156		u16 vsi_handle;
2157
2158		if (l_type != ICE_SW_LKUP_MAC)
2159			return ICE_ERR_PARAM;
2160
2161		vsi_handle = list_itr->fltr_info.vsi_handle;
2162		if (!ice_is_vsi_valid(hw, vsi_handle))
2163			return ICE_ERR_PARAM;
2164
2165		list_itr->fltr_info.fwd_id.hw_vsi_id =
2166					ice_get_hw_vsi_num(hw, vsi_handle);
2167		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
2168			/* Don't remove the unicast address that belongs to
2169			 * another VSI on the switch, since it is not being
2170			 * shared...
2171			 */
2172			mutex_lock(rule_lock);
2173			if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
2174						       &list_itr->fltr_info)) {
2175				mutex_unlock(rule_lock);
2176				return ICE_ERR_DOES_NOT_EXIST;
2177			}
2178			mutex_unlock(rule_lock);
2179		}
2180		list_itr->status = ice_remove_rule_internal(hw,
2181							    ICE_SW_LKUP_MAC,
2182							    list_itr);
2183		if (list_itr->status)
2184			return list_itr->status;
2185	}
2186	return 0;
2187}
2188
2189/**
2190 * ice_remove_vlan - Remove VLAN based filter rule
2191 * @hw: pointer to the hardware structure
2192 * @v_list: list of VLAN entries and forwarding information
2193 */
2194enum ice_status
2195ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
2196{
2197	struct ice_fltr_list_entry *v_list_itr, *tmp;
 
2198
2199	if (!v_list || !hw)
2200		return ICE_ERR_PARAM;
2201
2202	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2203		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
2204
2205		if (l_type != ICE_SW_LKUP_VLAN)
2206			return ICE_ERR_PARAM;
2207		v_list_itr->status = ice_remove_rule_internal(hw,
2208							      ICE_SW_LKUP_VLAN,
2209							      v_list_itr);
2210		if (v_list_itr->status)
2211			return v_list_itr->status;
2212	}
2213	return 0;
2214}
2215
2216/**
2217 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
2218 * @fm_entry: filter entry to inspect
2219 * @vsi_handle: VSI handle to compare with filter info
2220 */
2221static bool
2222ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
2223{
2224	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
2225		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
2226		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
2227		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
2228}
2229
2230/**
2231 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
2232 * @hw: pointer to the hardware structure
2233 * @vsi_handle: VSI handle to remove filters from
2234 * @vsi_list_head: pointer to the list to add entry to
2235 * @fi: pointer to fltr_info of filter entry to copy & add
2236 *
2237 * Helper function, used when creating a list of filters to remove from
2238 * a specific VSI. The entry added to vsi_list_head is a COPY of the
2239 * original filter entry, with the exception of fltr_info.fltr_act and
2240 * fltr_info.fwd_id fields. These are set such that later logic can
2241 * extract which VSI to remove the fltr from, and pass on that information.
2242 */
2243static enum ice_status
2244ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2245			       struct list_head *vsi_list_head,
2246			       struct ice_fltr_info *fi)
2247{
2248	struct ice_fltr_list_entry *tmp;
2249
2250	/* this memory is freed up in the caller function
2251	 * once filters for this VSI are removed
2252	 */
2253	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
2254	if (!tmp)
2255		return ICE_ERR_NO_MEMORY;
2256
2257	tmp->fltr_info = *fi;
2258
2259	/* Overwrite these fields to indicate which VSI to remove filter from,
2260	 * so find and remove logic can extract the information from the
2261	 * list entries. Note that original entries will still have proper
2262	 * values.
2263	 */
2264	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2265	tmp->fltr_info.vsi_handle = vsi_handle;
2266	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2267
2268	list_add(&tmp->list_entry, vsi_list_head);
2269
2270	return 0;
2271}
2272
2273/**
2274 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
2275 * @hw: pointer to the hardware structure
2276 * @vsi_handle: VSI handle to remove filters from
2277 * @lkup_list_head: pointer to the list that has certain lookup type filters
2278 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
2279 *
2280 * Locates all filters in lkup_list_head that are used by the given VSI,
2281 * and adds COPIES of those entries to vsi_list_head (intended to be used
2282 * to remove the listed filters).
2283 * Note that this means all entries in vsi_list_head must be explicitly
2284 * deallocated by the caller when done with list.
2285 */
2286static enum ice_status
2287ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2288			 struct list_head *lkup_list_head,
2289			 struct list_head *vsi_list_head)
2290{
2291	struct ice_fltr_mgmt_list_entry *fm_entry;
2292	enum ice_status status = 0;
2293
2294	/* check to make sure VSI ID is valid and within boundary */
2295	if (!ice_is_vsi_valid(hw, vsi_handle))
 
2296		return ICE_ERR_PARAM;
2297
2298	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
2299		struct ice_fltr_info *fi;
2300
2301		fi = &fm_entry->fltr_info;
2302		if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
2303			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2304
2305		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2306							vsi_list_head, fi);
2307		if (status)
2308			return status;
2309	}
2310	return status;
2311}
2312
2313/**
2314 * ice_determine_promisc_mask
2315 * @fi: filter info to parse
2316 *
2317 * Helper function to determine which ICE_PROMISC_ mask corresponds
2318 * to given filter into.
2319 */
2320static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
2321{
2322	u16 vid = fi->l_data.mac_vlan.vlan_id;
2323	u8 *macaddr = fi->l_data.mac.mac_addr;
2324	bool is_tx_fltr = false;
2325	u8 promisc_mask = 0;
2326
2327	if (fi->flag == ICE_FLTR_TX)
2328		is_tx_fltr = true;
2329
2330	if (is_broadcast_ether_addr(macaddr))
2331		promisc_mask |= is_tx_fltr ?
2332			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
2333	else if (is_multicast_ether_addr(macaddr))
2334		promisc_mask |= is_tx_fltr ?
2335			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
2336	else if (is_unicast_ether_addr(macaddr))
2337		promisc_mask |= is_tx_fltr ?
2338			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
2339	if (vid)
2340		promisc_mask |= is_tx_fltr ?
2341			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
2342
2343	return promisc_mask;
2344}
2345
2346/**
2347 * ice_remove_promisc - Remove promisc based filter rules
2348 * @hw: pointer to the hardware structure
2349 * @recp_id: recipe ID for which the rule needs to removed
2350 * @v_list: list of promisc entries
2351 */
2352static enum ice_status
2353ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
2354		   struct list_head *v_list)
2355{
2356	struct ice_fltr_list_entry *v_list_itr, *tmp;
2357
2358	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2359		v_list_itr->status =
2360			ice_remove_rule_internal(hw, recp_id, v_list_itr);
2361		if (v_list_itr->status)
2362			return v_list_itr->status;
2363	}
2364	return 0;
2365}
2366
2367/**
2368 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
2369 * @hw: pointer to the hardware structure
2370 * @vsi_handle: VSI handle to clear mode
2371 * @promisc_mask: mask of promiscuous config bits to clear
2372 * @vid: VLAN ID to clear VLAN promiscuous
2373 */
2374enum ice_status
2375ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2376		      u16 vid)
2377{
2378	struct ice_switch_info *sw = hw->switch_info;
2379	struct ice_fltr_list_entry *fm_entry, *tmp;
2380	struct list_head remove_list_head;
2381	struct ice_fltr_mgmt_list_entry *itr;
2382	struct list_head *rule_head;
2383	struct mutex *rule_lock;	/* Lock to protect filter rule list */
2384	enum ice_status status = 0;
2385	u8 recipe_id;
2386
2387	if (!ice_is_vsi_valid(hw, vsi_handle))
2388		return ICE_ERR_PARAM;
2389
2390	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
2391		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2392	else
2393		recipe_id = ICE_SW_LKUP_PROMISC;
2394
2395	rule_head = &sw->recp_list[recipe_id].filt_rules;
2396	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
2397
2398	INIT_LIST_HEAD(&remove_list_head);
2399
2400	mutex_lock(rule_lock);
2401	list_for_each_entry(itr, rule_head, list_entry) {
2402		struct ice_fltr_info *fltr_info;
2403		u8 fltr_promisc_mask = 0;
2404
2405		if (!ice_vsi_uses_fltr(itr, vsi_handle))
2406			continue;
2407		fltr_info = &itr->fltr_info;
2408
2409		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
2410		    vid != fltr_info->l_data.mac_vlan.vlan_id)
2411			continue;
2412
2413		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
2414
2415		/* Skip if filter is not completely specified by given mask */
2416		if (fltr_promisc_mask & ~promisc_mask)
2417			continue;
2418
2419		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2420							&remove_list_head,
2421							fltr_info);
2422		if (status) {
2423			mutex_unlock(rule_lock);
2424			goto free_fltr_list;
2425		}
2426	}
2427	mutex_unlock(rule_lock);
2428
2429	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
2430
2431free_fltr_list:
2432	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2433		list_del(&fm_entry->list_entry);
2434		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2435	}
2436
2437	return status;
2438}
2439
2440/**
2441 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
2442 * @hw: pointer to the hardware structure
2443 * @vsi_handle: VSI handle to configure
2444 * @promisc_mask: mask of promiscuous config bits
2445 * @vid: VLAN ID to set VLAN promiscuous
2446 */
2447enum ice_status
2448ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
2449{
2450	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
2451	struct ice_fltr_list_entry f_list_entry;
2452	struct ice_fltr_info new_fltr;
2453	enum ice_status status = 0;
2454	bool is_tx_fltr;
2455	u16 hw_vsi_id;
2456	int pkt_type;
2457	u8 recipe_id;
2458
2459	if (!ice_is_vsi_valid(hw, vsi_handle))
2460		return ICE_ERR_PARAM;
2461	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2462
2463	memset(&new_fltr, 0, sizeof(new_fltr));
2464
2465	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
2466		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
2467		new_fltr.l_data.mac_vlan.vlan_id = vid;
2468		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2469	} else {
2470		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
2471		recipe_id = ICE_SW_LKUP_PROMISC;
2472	}
2473
2474	/* Separate filters must be set for each direction/packet type
2475	 * combination, so we will loop over the mask value, store the
2476	 * individual type, and clear it out in the input mask as it
2477	 * is found.
2478	 */
2479	while (promisc_mask) {
2480		u8 *mac_addr;
2481
2482		pkt_type = 0;
2483		is_tx_fltr = false;
2484
2485		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
2486			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
2487			pkt_type = UCAST_FLTR;
2488		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
2489			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
2490			pkt_type = UCAST_FLTR;
2491			is_tx_fltr = true;
2492		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
2493			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
2494			pkt_type = MCAST_FLTR;
2495		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
2496			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
2497			pkt_type = MCAST_FLTR;
2498			is_tx_fltr = true;
2499		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
2500			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
2501			pkt_type = BCAST_FLTR;
2502		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
2503			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
2504			pkt_type = BCAST_FLTR;
2505			is_tx_fltr = true;
2506		}
2507
2508		/* Check for VLAN promiscuous flag */
2509		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
2510			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
2511		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
2512			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
2513			is_tx_fltr = true;
2514		}
2515
2516		/* Set filter DA based on packet type */
2517		mac_addr = new_fltr.l_data.mac.mac_addr;
2518		if (pkt_type == BCAST_FLTR) {
2519			eth_broadcast_addr(mac_addr);
2520		} else if (pkt_type == MCAST_FLTR ||
2521			   pkt_type == UCAST_FLTR) {
2522			/* Use the dummy ether header DA */
2523			ether_addr_copy(mac_addr, dummy_eth_header);
2524			if (pkt_type == MCAST_FLTR)
2525				mac_addr[0] |= 0x1;	/* Set multicast bit */
2526		}
2527
2528		/* Need to reset this to zero for all iterations */
2529		new_fltr.flag = 0;
2530		if (is_tx_fltr) {
2531			new_fltr.flag |= ICE_FLTR_TX;
2532			new_fltr.src = hw_vsi_id;
2533		} else {
2534			new_fltr.flag |= ICE_FLTR_RX;
2535			new_fltr.src = hw->port_info->lport;
2536		}
2537
2538		new_fltr.fltr_act = ICE_FWD_TO_VSI;
2539		new_fltr.vsi_handle = vsi_handle;
2540		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
2541		f_list_entry.fltr_info = new_fltr;
2542
2543		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
2544		if (status)
2545			goto set_promisc_exit;
2546	}
2547
2548set_promisc_exit:
2549	return status;
2550}
2551
2552/**
2553 * ice_set_vlan_vsi_promisc
2554 * @hw: pointer to the hardware structure
2555 * @vsi_handle: VSI handle to configure
2556 * @promisc_mask: mask of promiscuous config bits
2557 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
2558 *
2559 * Configure VSI with all associated VLANs to given promiscuous mode(s)
2560 */
2561enum ice_status
2562ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2563			 bool rm_vlan_promisc)
2564{
2565	struct ice_switch_info *sw = hw->switch_info;
2566	struct ice_fltr_list_entry *list_itr, *tmp;
2567	struct list_head vsi_list_head;
2568	struct list_head *vlan_head;
2569	struct mutex *vlan_lock; /* Lock to protect filter rule list */
2570	enum ice_status status;
2571	u16 vlan_id;
2572
2573	INIT_LIST_HEAD(&vsi_list_head);
2574	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2575	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2576	mutex_lock(vlan_lock);
2577	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
2578					  &vsi_list_head);
2579	mutex_unlock(vlan_lock);
2580	if (status)
2581		goto free_fltr_list;
2582
2583	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
2584		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
2585		if (rm_vlan_promisc)
2586			status = ice_clear_vsi_promisc(hw, vsi_handle,
2587						       promisc_mask, vlan_id);
2588		else
2589			status = ice_set_vsi_promisc(hw, vsi_handle,
2590						     promisc_mask, vlan_id);
2591		if (status)
2592			break;
2593	}
2594
2595free_fltr_list:
2596	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
2597		list_del(&list_itr->list_entry);
2598		devm_kfree(ice_hw_to_dev(hw), list_itr);
2599	}
2600	return status;
2601}
2602
2603/**
2604 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
2605 * @hw: pointer to the hardware structure
2606 * @vsi_handle: VSI handle to remove filters from
2607 * @lkup: switch rule filter lookup type
2608 */
2609static void
2610ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
2611			 enum ice_sw_lkup_type lkup)
2612{
2613	struct ice_switch_info *sw = hw->switch_info;
2614	struct ice_fltr_list_entry *fm_entry;
2615	struct list_head remove_list_head;
2616	struct list_head *rule_head;
2617	struct ice_fltr_list_entry *tmp;
2618	struct mutex *rule_lock;	/* Lock to protect filter rule list */
2619	enum ice_status status;
2620
2621	INIT_LIST_HEAD(&remove_list_head);
2622	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
2623	rule_head = &sw->recp_list[lkup].filt_rules;
2624	mutex_lock(rule_lock);
2625	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
2626					  &remove_list_head);
2627	mutex_unlock(rule_lock);
2628	if (status)
2629		return;
2630
2631	switch (lkup) {
2632	case ICE_SW_LKUP_MAC:
2633		ice_remove_mac(hw, &remove_list_head);
 
 
 
 
 
 
 
 
2634		break;
2635	case ICE_SW_LKUP_VLAN:
2636		ice_remove_vlan(hw, &remove_list_head);
2637		break;
2638	case ICE_SW_LKUP_PROMISC:
2639	case ICE_SW_LKUP_PROMISC_VLAN:
2640		ice_remove_promisc(hw, lkup, &remove_list_head);
 
 
 
 
2641		break;
2642	case ICE_SW_LKUP_MAC_VLAN:
2643	case ICE_SW_LKUP_ETHERTYPE:
2644	case ICE_SW_LKUP_ETHERTYPE_MAC:
 
 
2645	case ICE_SW_LKUP_DFLT:
2646	case ICE_SW_LKUP_LAST:
2647	default:
2648		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
2649		break;
2650	}
2651
 
 
2652	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2653		list_del(&fm_entry->list_entry);
2654		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2655	}
2656}
2657
2658/**
2659 * ice_remove_vsi_fltr - Remove all filters for a VSI
2660 * @hw: pointer to the hardware structure
2661 * @vsi_handle: VSI handle to remove filters from
2662 */
2663void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
2664{
2665	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
2666	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
2667	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
2668	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
2669	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
2670	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
2671	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
2672	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
2673}
2674
2675/**
2676 * ice_alloc_res_cntr - allocating resource counter
2677 * @hw: pointer to the hardware structure
2678 * @type: type of resource
2679 * @alloc_shared: if set it is shared else dedicated
2680 * @num_items: number of entries requested for FD resource type
2681 * @counter_id: counter index returned by AQ call
2682 */
2683enum ice_status
2684ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
2685		   u16 *counter_id)
2686{
2687	struct ice_aqc_alloc_free_res_elem *buf;
2688	enum ice_status status;
2689	u16 buf_len;
2690
2691	/* Allocate resource */
2692	buf_len = struct_size(buf, elem, 1);
2693	buf = kzalloc(buf_len, GFP_KERNEL);
2694	if (!buf)
2695		return ICE_ERR_NO_MEMORY;
2696
2697	buf->num_elems = cpu_to_le16(num_items);
2698	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
2699				      ICE_AQC_RES_TYPE_M) | alloc_shared);
2700
2701	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2702				       ice_aqc_opc_alloc_res, NULL);
2703	if (status)
2704		goto exit;
2705
2706	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
2707
2708exit:
2709	kfree(buf);
2710	return status;
2711}
2712
2713/**
2714 * ice_free_res_cntr - free resource counter
2715 * @hw: pointer to the hardware structure
2716 * @type: type of resource
2717 * @alloc_shared: if set it is shared else dedicated
2718 * @num_items: number of entries to be freed for FD resource type
2719 * @counter_id: counter ID resource which needs to be freed
2720 */
2721enum ice_status
2722ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
2723		  u16 counter_id)
2724{
2725	struct ice_aqc_alloc_free_res_elem *buf;
2726	enum ice_status status;
2727	u16 buf_len;
2728
2729	/* Free resource */
2730	buf_len = struct_size(buf, elem, 1);
2731	buf = kzalloc(buf_len, GFP_KERNEL);
2732	if (!buf)
2733		return ICE_ERR_NO_MEMORY;
2734
2735	buf->num_elems = cpu_to_le16(num_items);
2736	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
2737				      ICE_AQC_RES_TYPE_M) | alloc_shared);
2738	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
2739
2740	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2741				       ice_aqc_opc_free_res, NULL);
2742	if (status)
2743		ice_debug(hw, ICE_DBG_SW,
2744			  "counter resource could not be freed\n");
2745
2746	kfree(buf);
2747	return status;
2748}
2749
2750/**
2751 * ice_replay_vsi_fltr - Replay filters for requested VSI
2752 * @hw: pointer to the hardware structure
2753 * @vsi_handle: driver VSI handle
2754 * @recp_id: Recipe ID for which rules need to be replayed
2755 * @list_head: list for which filters need to be replayed
2756 *
2757 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
2758 * It is required to pass valid VSI handle.
2759 */
2760static enum ice_status
2761ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
2762		    struct list_head *list_head)
2763{
2764	struct ice_fltr_mgmt_list_entry *itr;
2765	enum ice_status status = 0;
2766	u16 hw_vsi_id;
2767
2768	if (list_empty(list_head))
2769		return status;
2770	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2771
2772	list_for_each_entry(itr, list_head, list_entry) {
2773		struct ice_fltr_list_entry f_entry;
2774
2775		f_entry.fltr_info = itr->fltr_info;
2776		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
2777		    itr->fltr_info.vsi_handle == vsi_handle) {
2778			/* update the src in case it is VSI num */
2779			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2780				f_entry.fltr_info.src = hw_vsi_id;
2781			status = ice_add_rule_internal(hw, recp_id, &f_entry);
2782			if (status)
2783				goto end;
2784			continue;
2785		}
2786		if (!itr->vsi_list_info ||
2787		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
2788			continue;
2789		/* Clearing it so that the logic can add it back */
2790		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
2791		f_entry.fltr_info.vsi_handle = vsi_handle;
2792		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
2793		/* update the src in case it is VSI num */
2794		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2795			f_entry.fltr_info.src = hw_vsi_id;
2796		if (recp_id == ICE_SW_LKUP_VLAN)
2797			status = ice_add_vlan_internal(hw, &f_entry);
2798		else
2799			status = ice_add_rule_internal(hw, recp_id, &f_entry);
2800		if (status)
2801			goto end;
2802	}
2803end:
2804	return status;
2805}
2806
2807/**
2808 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
2809 * @hw: pointer to the hardware structure
2810 * @vsi_handle: driver VSI handle
2811 *
2812 * Replays filters for requested VSI via vsi_handle.
2813 */
2814enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
2815{
2816	struct ice_switch_info *sw = hw->switch_info;
2817	enum ice_status status = 0;
2818	u8 i;
2819
2820	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2821		struct list_head *head;
2822
2823		head = &sw->recp_list[i].filt_replay_rules;
2824		status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
2825		if (status)
2826			return status;
2827	}
2828	return status;
2829}
2830
2831/**
2832 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
2833 * @hw: pointer to the HW struct
2834 *
2835 * Deletes the filter replay rules.
2836 */
2837void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
2838{
2839	struct ice_switch_info *sw = hw->switch_info;
2840	u8 i;
2841
2842	if (!sw)
2843		return;
2844
2845	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2846		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
2847			struct list_head *l_head;
2848
2849			l_head = &sw->recp_list[i].filt_replay_rules;
2850			ice_rem_sw_rule_info(hw, l_head);
2851		}
2852	}
2853}