Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/* Copyright (c) 2016 - 2021 Intel Corporation */
  3#include <linux/etherdevice.h>
  4
  5#include "osdep.h"
 
  6#include "hmc.h"
  7#include "defs.h"
  8#include "type.h"
  9#include "protos.h"
 10#include "uda.h"
 11#include "uda_d.h"
 12
 13/**
 14 * irdma_sc_access_ah() - Create, modify or delete AH
 15 * @cqp: struct for cqp hw
 16 * @info: ah information
 17 * @op: Operation
 18 * @scratch: u64 saved to be used during cqp completion
 19 */
 20int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
 21		       u32 op, u64 scratch)
 
 22{
 23	__le64 *wqe;
 24	u64 qw1, qw2;
 25
 26	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 27	if (!wqe)
 28		return -ENOMEM;
 29
 30	set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
 31	qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
 32	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
 33	      FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
 34
 35	qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
 36	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
 37	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
 38	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
 39
 40	if (!info->ipv4_valid) {
 41		set_64bit_val(wqe, 40,
 42			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
 43			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
 44		set_64bit_val(wqe, 32,
 45			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
 46			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
 47
 48		set_64bit_val(wqe, 56,
 49			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
 50			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
 51		set_64bit_val(wqe, 48,
 52			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
 53			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
 54	} else {
 55		set_64bit_val(wqe, 32,
 56			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
 57
 58		set_64bit_val(wqe, 48,
 59			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
 60	}
 61
 62	set_64bit_val(wqe, 8, qw1);
 63	set_64bit_val(wqe, 16, qw2);
 64
 65	dma_wmb(); /* need write block before writing WQE header */
 66
 67	set_64bit_val(
 68		wqe, 24,
 69		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
 70		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
 71		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
 72		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
 73		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
 74		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
 75
 76	print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
 77			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
 78	irdma_sc_cqp_post_sq(cqp);
 79
 80	return 0;
 81}
 82
 83/**
 84 * irdma_create_mg_ctx() - create a mcg context
 85 * @info: multicast group context info
 86 */
 87static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
 
 88{
 89	struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
 90	u8 idx = 0; /* index in the array */
 91	u8 ctx_idx = 0; /* index in the MG context */
 92
 93	memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
 94
 95	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
 96		entry_info = &info->mg_ctx_info[idx];
 97		if (entry_info->valid_entry) {
 98			set_64bit_val((__le64 *)info->dma_mem_mc.va,
 99				      ctx_idx * sizeof(u64),
100				      FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
101				      FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
102				      FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
103			ctx_idx++;
104		}
105	}
 
 
106}
107
108/**
109 * irdma_access_mcast_grp() - Access mcast group based on op
110 * @cqp: Control QP
111 * @info: multicast group context info
112 * @op: operation to perform
113 * @scratch: u64 saved to be used during cqp completion
114 */
115int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
116			   struct irdma_mcast_grp_info *info, u32 op,
117			   u64 scratch)
118{
119	__le64 *wqe;
 
120
121	if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
122		ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
123		return -EINVAL;
124	}
125
126	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
127	if (!wqe) {
128		ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
129		return -ENOMEM;
130	}
131
132	irdma_create_mg_ctx(info);
 
 
133
134	set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
135	set_64bit_val(wqe, 16,
136		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
137		      FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
138	set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
139	set_64bit_val(wqe, 8,
140		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
141
142	if (!info->ipv4_valid) {
143		set_64bit_val(wqe, 56,
144			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
145			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
146		set_64bit_val(wqe, 48,
147			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
148			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
149	} else {
150		set_64bit_val(wqe, 48,
151			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
152	}
153
154	dma_wmb(); /* need write memory block before writing the WQE header. */
155
156	set_64bit_val(wqe, 24,
157		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
158		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
159		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
160		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
161		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
162
163	print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
164			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
165	print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
166			     8, info->dma_mem_mc.va,
167			     IRDMA_MAX_MGS_PER_CTX * 8, false);
168	irdma_sc_cqp_post_sq(cqp);
169
170	return 0;
171}
172
173/**
174 * irdma_compare_mgs - Compares two multicast group structures
175 * @entry1: Multcast group info
176 * @entry2: Multcast group info in context
177 */
178static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
179			      struct irdma_mcast_grp_ctx_entry_info *entry2)
180{
181	if (entry1->dest_port == entry2->dest_port &&
182	    entry1->qp_id == entry2->qp_id)
183		return true;
184
185	return false;
186}
187
188/**
189 * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
190 * @ctx: Multcast group context
191 * @mg: Multcast group info
192 */
193int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
194			   struct irdma_mcast_grp_ctx_entry_info *mg)
195{
196	u32 idx;
197	bool free_entry_found = false;
198	u32 free_entry_idx = 0;
199
200	/* find either an identical or a free entry for a multicast group */
201	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
202		if (ctx->mg_ctx_info[idx].valid_entry) {
203			if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
204				ctx->mg_ctx_info[idx].use_cnt++;
205				return 0;
206			}
207			continue;
208		}
209		if (!free_entry_found) {
210			free_entry_found = true;
211			free_entry_idx = idx;
212		}
213	}
214
215	if (free_entry_found) {
216		ctx->mg_ctx_info[free_entry_idx] = *mg;
217		ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
218		ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
219		ctx->no_of_mgs++;
220		return 0;
221	}
222
223	return -ENOMEM;
224}
225
226/**
227 * irdma_sc_del_mcast_grp - Delete mcast group
228 * @ctx: Multcast group context
229 * @mg: Multcast group info
230 *
231 * Finds and removes a specific mulicast group from context, all
232 * parameters must match to remove a multicast group.
233 */
234int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
235			   struct irdma_mcast_grp_ctx_entry_info *mg)
236{
237	u32 idx;
238
239	/* find an entry in multicast group context */
240	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
241		if (!ctx->mg_ctx_info[idx].valid_entry)
242			continue;
243
244		if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
245			ctx->mg_ctx_info[idx].use_cnt--;
246
247			if (!ctx->mg_ctx_info[idx].use_cnt) {
248				ctx->mg_ctx_info[idx].valid_entry = false;
249				ctx->no_of_mgs--;
250				/* Remove gap if element was not the last */
251				if (idx != ctx->no_of_mgs &&
252				    ctx->no_of_mgs > 0) {
253					memcpy(&ctx->mg_ctx_info[idx],
254					       &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
255					       sizeof(ctx->mg_ctx_info[idx]));
256					ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
257				}
258			}
259
260			return 0;
261		}
262	}
263
264	return -EINVAL;
265}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
  2/* Copyright (c) 2016 - 2021 Intel Corporation */
 
 
  3#include "osdep.h"
  4#include "status.h"
  5#include "hmc.h"
  6#include "defs.h"
  7#include "type.h"
  8#include "protos.h"
  9#include "uda.h"
 10#include "uda_d.h"
 11
 12/**
 13 * irdma_sc_access_ah() - Create, modify or delete AH
 14 * @cqp: struct for cqp hw
 15 * @info: ah information
 16 * @op: Operation
 17 * @scratch: u64 saved to be used during cqp completion
 18 */
 19enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
 20					  struct irdma_ah_info *info,
 21					  u32 op, u64 scratch)
 22{
 23	__le64 *wqe;
 24	u64 qw1, qw2;
 25
 26	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
 27	if (!wqe)
 28		return IRDMA_ERR_RING_FULL;
 29
 30	set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
 31	qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
 32	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
 33	      FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
 34
 35	qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
 36	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
 37	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
 38	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
 39
 40	if (!info->ipv4_valid) {
 41		set_64bit_val(wqe, 40,
 42			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
 43			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
 44		set_64bit_val(wqe, 32,
 45			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
 46			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
 47
 48		set_64bit_val(wqe, 56,
 49			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
 50			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
 51		set_64bit_val(wqe, 48,
 52			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
 53			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
 54	} else {
 55		set_64bit_val(wqe, 32,
 56			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
 57
 58		set_64bit_val(wqe, 48,
 59			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
 60	}
 61
 62	set_64bit_val(wqe, 8, qw1);
 63	set_64bit_val(wqe, 16, qw2);
 64
 65	dma_wmb(); /* need write block before writing WQE header */
 66
 67	set_64bit_val(
 68		wqe, 24,
 69		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
 70		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
 71		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
 72		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
 73		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
 74		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
 75
 76	print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
 77			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
 78	irdma_sc_cqp_post_sq(cqp);
 79
 80	return 0;
 81}
 82
 83/**
 84 * irdma_create_mg_ctx() - create a mcg context
 85 * @info: multicast group context info
 86 */
 87static enum irdma_status_code
 88irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
 89{
 90	struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
 91	u8 idx = 0; /* index in the array */
 92	u8 ctx_idx = 0; /* index in the MG context */
 93
 94	memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
 95
 96	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
 97		entry_info = &info->mg_ctx_info[idx];
 98		if (entry_info->valid_entry) {
 99			set_64bit_val((__le64 *)info->dma_mem_mc.va,
100				      ctx_idx * sizeof(u64),
101				      FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
102				      FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
103				      FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
104			ctx_idx++;
105		}
106	}
107
108	return 0;
109}
110
111/**
112 * irdma_access_mcast_grp() - Access mcast group based on op
113 * @cqp: Control QP
114 * @info: multicast group context info
115 * @op: operation to perform
116 * @scratch: u64 saved to be used during cqp completion
117 */
118enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
119					      struct irdma_mcast_grp_info *info,
120					      u32 op, u64 scratch)
121{
122	__le64 *wqe;
123	enum irdma_status_code ret_code = 0;
124
125	if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
126		ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
127		return IRDMA_ERR_PARAM;
128	}
129
130	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
131	if (!wqe) {
132		ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
133		return IRDMA_ERR_RING_FULL;
134	}
135
136	ret_code = irdma_create_mg_ctx(info);
137	if (ret_code)
138		return ret_code;
139
140	set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
141	set_64bit_val(wqe, 16,
142		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
143		      FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
144	set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
145	set_64bit_val(wqe, 8,
146		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
147
148	if (!info->ipv4_valid) {
149		set_64bit_val(wqe, 56,
150			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
151			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
152		set_64bit_val(wqe, 48,
153			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
154			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
155	} else {
156		set_64bit_val(wqe, 48,
157			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
158	}
159
160	dma_wmb(); /* need write memory block before writing the WQE header. */
161
162	set_64bit_val(wqe, 24,
163		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
164		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
165		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
166		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
167		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
168
169	print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
170			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
171	print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
172			     8, info->dma_mem_mc.va,
173			     IRDMA_MAX_MGS_PER_CTX * 8, false);
174	irdma_sc_cqp_post_sq(cqp);
175
176	return 0;
177}
178
179/**
180 * irdma_compare_mgs - Compares two multicast group structures
181 * @entry1: Multcast group info
182 * @entry2: Multcast group info in context
183 */
184static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
185			      struct irdma_mcast_grp_ctx_entry_info *entry2)
186{
187	if (entry1->dest_port == entry2->dest_port &&
188	    entry1->qp_id == entry2->qp_id)
189		return true;
190
191	return false;
192}
193
194/**
195 * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
196 * @ctx: Multcast group context
197 * @mg: Multcast group info
198 */
199enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
200					      struct irdma_mcast_grp_ctx_entry_info *mg)
201{
202	u32 idx;
203	bool free_entry_found = false;
204	u32 free_entry_idx = 0;
205
206	/* find either an identical or a free entry for a multicast group */
207	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
208		if (ctx->mg_ctx_info[idx].valid_entry) {
209			if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
210				ctx->mg_ctx_info[idx].use_cnt++;
211				return 0;
212			}
213			continue;
214		}
215		if (!free_entry_found) {
216			free_entry_found = true;
217			free_entry_idx = idx;
218		}
219	}
220
221	if (free_entry_found) {
222		ctx->mg_ctx_info[free_entry_idx] = *mg;
223		ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
224		ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
225		ctx->no_of_mgs++;
226		return 0;
227	}
228
229	return IRDMA_ERR_NO_MEMORY;
230}
231
232/**
233 * irdma_sc_del_mcast_grp - Delete mcast group
234 * @ctx: Multcast group context
235 * @mg: Multcast group info
236 *
237 * Finds and removes a specific mulicast group from context, all
238 * parameters must match to remove a multicast group.
239 */
240enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
241					      struct irdma_mcast_grp_ctx_entry_info *mg)
242{
243	u32 idx;
244
245	/* find an entry in multicast group context */
246	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
247		if (!ctx->mg_ctx_info[idx].valid_entry)
248			continue;
249
250		if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
251			ctx->mg_ctx_info[idx].use_cnt--;
252
253			if (!ctx->mg_ctx_info[idx].use_cnt) {
254				ctx->mg_ctx_info[idx].valid_entry = false;
255				ctx->no_of_mgs--;
256				/* Remove gap if element was not the last */
257				if (idx != ctx->no_of_mgs &&
258				    ctx->no_of_mgs > 0) {
259					memcpy(&ctx->mg_ctx_info[idx],
260					       &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
261					       sizeof(ctx->mg_ctx_info[idx]));
262					ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
263				}
264			}
265
266			return 0;
267		}
268	}
269
270	return IRDMA_ERR_PARAM;
271}