Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2021, Intel Corporation. */
  3
  4/* Inter-Driver Communication */
  5#include "ice.h"
  6#include "ice_lib.h"
  7#include "ice_dcb_lib.h"
  8
  9/**
 10 * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
 11 * @pf: pointer to PF struct
 12 *
 13 * This function has to be called with a device_lock on the
 14 * pf->adev.dev to avoid race conditions.
 15 */
 16static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
 17{
 18	struct auxiliary_device *adev;
 19
 20	adev = pf->adev;
 21	if (!adev || !adev->dev.driver)
 22		return NULL;
 23
 24	return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
 25			    adrv.driver);
 26}
 27
 28/**
 29 * ice_send_event_to_aux - send event to RDMA AUX driver
 30 * @pf: pointer to PF struct
 31 * @event: event struct
 32 */
 33void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
 34{
 35	struct iidc_auxiliary_drv *iadrv;
 36
 37	if (WARN_ON_ONCE(!in_task()))
 38		return;
 39
 40	mutex_lock(&pf->adev_mutex);
 41	if (!pf->adev)
 42		goto finish;
 43
 44	device_lock(&pf->adev->dev);
 45	iadrv = ice_get_auxiliary_drv(pf);
 46	if (iadrv && iadrv->event_handler)
 47		iadrv->event_handler(pf, event);
 48	device_unlock(&pf->adev->dev);
 49finish:
 50	mutex_unlock(&pf->adev_mutex);
 51}
 52
 53/**
 54 * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
 55 * @pf: PF struct
 56 * @qset: Resource to be allocated
 57 */
 58int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
 59{
 60	u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
 61	struct ice_vsi *vsi;
 62	struct device *dev;
 63	u32 qset_teid;
 64	u16 qs_handle;
 65	int status;
 66	int i;
 67
 68	if (WARN_ON(!pf || !qset))
 69		return -EINVAL;
 70
 71	dev = ice_pf_to_dev(pf);
 72
 73	if (!ice_is_rdma_ena(pf))
 74		return -EINVAL;
 75
 76	vsi = ice_get_main_vsi(pf);
 77	if (!vsi) {
 78		dev_err(dev, "RDMA QSet invalid VSI\n");
 79		return -EINVAL;
 80	}
 81
 82	ice_for_each_traffic_class(i)
 83		max_rdmaqs[i] = 0;
 84
 85	max_rdmaqs[qset->tc]++;
 86	qs_handle = qset->qs_handle;
 87
 88	status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
 89				  max_rdmaqs);
 90	if (status) {
 91		dev_err(dev, "Failed VSI RDMA Qset config\n");
 92		return status;
 93	}
 94
 95	status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
 96				       &qs_handle, 1, &qset_teid);
 97	if (status) {
 98		dev_err(dev, "Failed VSI RDMA Qset enable\n");
 99		return status;
100	}
101	vsi->qset_handle[qset->tc] = qset->qs_handle;
102	qset->teid = qset_teid;
103
104	return 0;
105}
106EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
107
108/**
109 * ice_del_rdma_qset - Delete leaf node for RDMA Qset
110 * @pf: PF struct
111 * @qset: Resource to be freed
112 */
113int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
114{
115	struct ice_vsi *vsi;
116	u32 teid;
117	u16 q_id;
118
119	if (WARN_ON(!pf || !qset))
120		return -EINVAL;
121
122	vsi = ice_find_vsi(pf, qset->vport_id);
123	if (!vsi) {
124		dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
125		return -EINVAL;
126	}
127
128	q_id = qset->qs_handle;
129	teid = qset->teid;
130
131	vsi->qset_handle[qset->tc] = 0;
132
133	return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
134}
135EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
136
137/**
138 * ice_rdma_request_reset - accept request from RDMA to perform a reset
139 * @pf: struct for PF
140 * @reset_type: type of reset
141 */
142int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
143{
144	enum ice_reset_req reset;
145
146	if (WARN_ON(!pf))
147		return -EINVAL;
148
149	switch (reset_type) {
150	case IIDC_PFR:
151		reset = ICE_RESET_PFR;
152		break;
153	case IIDC_CORER:
154		reset = ICE_RESET_CORER;
155		break;
156	case IIDC_GLOBR:
157		reset = ICE_RESET_GLOBR;
158		break;
159	default:
160		dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
161		return -EINVAL;
162	}
163
164	return ice_schedule_reset(pf, reset);
165}
166EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
167
168/**
169 * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
170 * @pf: pointer to struct for PF
171 * @vsi_id: VSI HW idx to update filter on
172 * @enable: bool whether to enable or disable filters
173 */
174int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
175{
176	struct ice_vsi *vsi;
177	int status;
178
179	if (WARN_ON(!pf))
180		return -EINVAL;
181
182	vsi = ice_find_vsi(pf, vsi_id);
183	if (!vsi)
184		return -EINVAL;
185
186	status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
187	if (status) {
188		dev_err(ice_pf_to_dev(pf), "Failed to  %sable RDMA filtering\n",
189			enable ? "en" : "dis");
190	} else {
191		if (enable)
192			vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
193		else
194			vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
195	}
196
197	return status;
198}
199EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
200
201/**
202 * ice_get_qos_params - parse QoS params for RDMA consumption
203 * @pf: pointer to PF struct
204 * @qos: set of QoS values
205 */
206void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
207{
208	struct ice_dcbx_cfg *dcbx_cfg;
209	unsigned int i;
210	u32 up2tc;
211
212	dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
213	up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
214
215	qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
216	for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
217		qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
218
219	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
220		qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
221
222	qos->pfc_mode = dcbx_cfg->pfc_mode;
223	if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
224		for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
225			qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
226}
227EXPORT_SYMBOL_GPL(ice_get_qos_params);
228
229/**
230 * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
231 * @pf: board private structure to initialize
232 */
233static int ice_reserve_rdma_qvector(struct ice_pf *pf)
234{
235	if (ice_is_rdma_ena(pf)) {
236		int index;
237
238		index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
239				    ICE_RES_RDMA_VEC_ID);
240		if (index < 0)
241			return index;
242		pf->num_avail_sw_msix -= pf->num_rdma_msix;
243		pf->rdma_base_vector = (u16)index;
244	}
245	return 0;
246}
247
248/**
249 * ice_adev_release - function to be mapped to AUX dev's release op
250 * @dev: pointer to device to free
251 */
252static void ice_adev_release(struct device *dev)
253{
254	struct iidc_auxiliary_dev *iadev;
255
256	iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
257	kfree(iadev);
258}
259
260/**
261 * ice_plug_aux_dev - allocate and register AUX device
262 * @pf: pointer to pf struct
263 */
264int ice_plug_aux_dev(struct ice_pf *pf)
265{
266	struct iidc_auxiliary_dev *iadev;
267	struct auxiliary_device *adev;
268	int ret;
269
270	/* if this PF doesn't support a technology that requires auxiliary
271	 * devices, then gracefully exit
272	 */
273	if (!ice_is_rdma_ena(pf))
274		return 0;
275
276	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
277	if (!iadev)
278		return -ENOMEM;
279
280	adev = &iadev->adev;
281	iadev->pf = pf;
282
283	adev->id = pf->aux_idx;
284	adev->dev.release = ice_adev_release;
285	adev->dev.parent = &pf->pdev->dev;
286	adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
287
288	ret = auxiliary_device_init(adev);
289	if (ret) {
290		kfree(iadev);
291		return ret;
292	}
293
294	ret = auxiliary_device_add(adev);
295	if (ret) {
296		auxiliary_device_uninit(adev);
297		return ret;
298	}
299
300	mutex_lock(&pf->adev_mutex);
301	pf->adev = adev;
302	mutex_unlock(&pf->adev_mutex);
303
304	return 0;
305}
306
307/* ice_unplug_aux_dev - unregister and free AUX device
308 * @pf: pointer to pf struct
309 */
310void ice_unplug_aux_dev(struct ice_pf *pf)
311{
312	struct auxiliary_device *adev;
313
314	mutex_lock(&pf->adev_mutex);
315	adev = pf->adev;
316	pf->adev = NULL;
317	mutex_unlock(&pf->adev_mutex);
318
319	if (adev) {
320		auxiliary_device_delete(adev);
321		auxiliary_device_uninit(adev);
322	}
323}
324
325/**
326 * ice_init_rdma - initializes PF for RDMA use
327 * @pf: ptr to ice_pf
328 */
329int ice_init_rdma(struct ice_pf *pf)
330{
331	struct device *dev = &pf->pdev->dev;
332	int ret;
333
334	/* Reserve vector resources */
335	ret = ice_reserve_rdma_qvector(pf);
336	if (ret < 0) {
337		dev_err(dev, "failed to reserve vectors for RDMA\n");
338		return ret;
339	}
340	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
341	return ice_plug_aux_dev(pf);
342}