Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4/* Inter-Driver Communication */
5#include "ice.h"
6#include "ice_lib.h"
7#include "ice_dcb_lib.h"
8
9static DEFINE_XARRAY_ALLOC1(ice_aux_id);
10
11/**
12 * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
13 * @pf: pointer to PF struct
14 *
15 * This function has to be called with a device_lock on the
16 * pf->adev.dev to avoid race conditions.
17 */
18static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
19{
20 struct auxiliary_device *adev;
21
22 adev = pf->adev;
23 if (!adev || !adev->dev.driver)
24 return NULL;
25
26 return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
27 adrv.driver);
28}
29
30/**
31 * ice_send_event_to_aux - send event to RDMA AUX driver
32 * @pf: pointer to PF struct
33 * @event: event struct
34 */
35void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
36{
37 struct iidc_auxiliary_drv *iadrv;
38
39 if (WARN_ON_ONCE(!in_task()))
40 return;
41
42 mutex_lock(&pf->adev_mutex);
43 if (!pf->adev)
44 goto finish;
45
46 device_lock(&pf->adev->dev);
47 iadrv = ice_get_auxiliary_drv(pf);
48 if (iadrv && iadrv->event_handler)
49 iadrv->event_handler(pf, event);
50 device_unlock(&pf->adev->dev);
51finish:
52 mutex_unlock(&pf->adev_mutex);
53}
54
55/**
56 * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
57 * @pf: PF struct
58 * @qset: Resource to be allocated
59 */
60int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
61{
62 u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
63 struct ice_vsi *vsi;
64 struct device *dev;
65 u32 qset_teid;
66 u16 qs_handle;
67 int status;
68 int i;
69
70 if (WARN_ON(!pf || !qset))
71 return -EINVAL;
72
73 dev = ice_pf_to_dev(pf);
74
75 if (!ice_is_rdma_ena(pf))
76 return -EINVAL;
77
78 vsi = ice_get_main_vsi(pf);
79 if (!vsi) {
80 dev_err(dev, "RDMA QSet invalid VSI\n");
81 return -EINVAL;
82 }
83
84 ice_for_each_traffic_class(i)
85 max_rdmaqs[i] = 0;
86
87 max_rdmaqs[qset->tc]++;
88 qs_handle = qset->qs_handle;
89
90 status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
91 max_rdmaqs);
92 if (status) {
93 dev_err(dev, "Failed VSI RDMA Qset config\n");
94 return status;
95 }
96
97 status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
98 &qs_handle, 1, &qset_teid);
99 if (status) {
100 dev_err(dev, "Failed VSI RDMA Qset enable\n");
101 return status;
102 }
103 vsi->qset_handle[qset->tc] = qset->qs_handle;
104 qset->teid = qset_teid;
105
106 return 0;
107}
108EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
109
110/**
111 * ice_del_rdma_qset - Delete leaf node for RDMA Qset
112 * @pf: PF struct
113 * @qset: Resource to be freed
114 */
115int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
116{
117 struct ice_vsi *vsi;
118 u32 teid;
119 u16 q_id;
120
121 if (WARN_ON(!pf || !qset))
122 return -EINVAL;
123
124 vsi = ice_find_vsi(pf, qset->vport_id);
125 if (!vsi) {
126 dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
127 return -EINVAL;
128 }
129
130 q_id = qset->qs_handle;
131 teid = qset->teid;
132
133 vsi->qset_handle[qset->tc] = 0;
134
135 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
136}
137EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
138
139/**
140 * ice_rdma_request_reset - accept request from RDMA to perform a reset
141 * @pf: struct for PF
142 * @reset_type: type of reset
143 */
144int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
145{
146 enum ice_reset_req reset;
147
148 if (WARN_ON(!pf))
149 return -EINVAL;
150
151 switch (reset_type) {
152 case IIDC_PFR:
153 reset = ICE_RESET_PFR;
154 break;
155 case IIDC_CORER:
156 reset = ICE_RESET_CORER;
157 break;
158 case IIDC_GLOBR:
159 reset = ICE_RESET_GLOBR;
160 break;
161 default:
162 dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
163 return -EINVAL;
164 }
165
166 return ice_schedule_reset(pf, reset);
167}
168EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
169
170/**
171 * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
172 * @pf: pointer to struct for PF
173 * @vsi_id: VSI HW idx to update filter on
174 * @enable: bool whether to enable or disable filters
175 */
176int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
177{
178 struct ice_vsi *vsi;
179 int status;
180
181 if (WARN_ON(!pf))
182 return -EINVAL;
183
184 vsi = ice_find_vsi(pf, vsi_id);
185 if (!vsi)
186 return -EINVAL;
187
188 status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
189 if (status) {
190 dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
191 enable ? "en" : "dis");
192 } else {
193 if (enable)
194 vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
195 else
196 vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
197 }
198
199 return status;
200}
201EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
202
203/**
204 * ice_get_qos_params - parse QoS params for RDMA consumption
205 * @pf: pointer to PF struct
206 * @qos: set of QoS values
207 */
208void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
209{
210 struct ice_dcbx_cfg *dcbx_cfg;
211 unsigned int i;
212 u32 up2tc;
213
214 dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
215 up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
216
217 qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
218 for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
219 qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
220
221 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
222 qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
223
224 qos->pfc_mode = dcbx_cfg->pfc_mode;
225 if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
226 for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
227 qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
228}
229EXPORT_SYMBOL_GPL(ice_get_qos_params);
230
231/**
232 * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
233 * @pf: board private structure to initialize
234 */
235static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
236{
237 if (ice_is_rdma_ena(pf)) {
238 int i;
239
240 pf->msix_entries = kcalloc(pf->num_rdma_msix,
241 sizeof(*pf->msix_entries),
242 GFP_KERNEL);
243 if (!pf->msix_entries)
244 return -ENOMEM;
245
246 /* RDMA is the only user of pf->msix_entries array */
247 pf->rdma_base_vector = 0;
248
249 for (i = 0; i < pf->num_rdma_msix; i++) {
250 struct msix_entry *entry = &pf->msix_entries[i];
251 struct msi_map map;
252
253 map = ice_alloc_irq(pf, false);
254 if (map.index < 0)
255 break;
256
257 entry->entry = map.index;
258 entry->vector = map.virq;
259 }
260 }
261 return 0;
262}
263
264/**
265 * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
266 * @pf: board private structure to initialize
267 */
268static void ice_free_rdma_qvector(struct ice_pf *pf)
269{
270 int i;
271
272 if (!pf->msix_entries)
273 return;
274
275 for (i = 0; i < pf->num_rdma_msix; i++) {
276 struct msi_map map;
277
278 map.index = pf->msix_entries[i].entry;
279 map.virq = pf->msix_entries[i].vector;
280 ice_free_irq(pf, map);
281 }
282
283 kfree(pf->msix_entries);
284 pf->msix_entries = NULL;
285}
286
287/**
288 * ice_adev_release - function to be mapped to AUX dev's release op
289 * @dev: pointer to device to free
290 */
291static void ice_adev_release(struct device *dev)
292{
293 struct iidc_auxiliary_dev *iadev;
294
295 iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
296 kfree(iadev);
297}
298
299/**
300 * ice_plug_aux_dev - allocate and register AUX device
301 * @pf: pointer to pf struct
302 */
303int ice_plug_aux_dev(struct ice_pf *pf)
304{
305 struct iidc_auxiliary_dev *iadev;
306 struct auxiliary_device *adev;
307 int ret;
308
309 /* if this PF doesn't support a technology that requires auxiliary
310 * devices, then gracefully exit
311 */
312 if (!ice_is_rdma_ena(pf))
313 return 0;
314
315 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
316 if (!iadev)
317 return -ENOMEM;
318
319 adev = &iadev->adev;
320 iadev->pf = pf;
321
322 adev->id = pf->aux_idx;
323 adev->dev.release = ice_adev_release;
324 adev->dev.parent = &pf->pdev->dev;
325 adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
326
327 ret = auxiliary_device_init(adev);
328 if (ret) {
329 kfree(iadev);
330 return ret;
331 }
332
333 ret = auxiliary_device_add(adev);
334 if (ret) {
335 auxiliary_device_uninit(adev);
336 return ret;
337 }
338
339 mutex_lock(&pf->adev_mutex);
340 pf->adev = adev;
341 mutex_unlock(&pf->adev_mutex);
342
343 return 0;
344}
345
346/* ice_unplug_aux_dev - unregister and free AUX device
347 * @pf: pointer to pf struct
348 */
349void ice_unplug_aux_dev(struct ice_pf *pf)
350{
351 struct auxiliary_device *adev;
352
353 mutex_lock(&pf->adev_mutex);
354 adev = pf->adev;
355 pf->adev = NULL;
356 mutex_unlock(&pf->adev_mutex);
357
358 if (adev) {
359 auxiliary_device_delete(adev);
360 auxiliary_device_uninit(adev);
361 }
362}
363
364/**
365 * ice_init_rdma - initializes PF for RDMA use
366 * @pf: ptr to ice_pf
367 */
368int ice_init_rdma(struct ice_pf *pf)
369{
370 struct device *dev = &pf->pdev->dev;
371 int ret;
372
373 if (!ice_is_rdma_ena(pf)) {
374 dev_warn(dev, "RDMA is not supported on this device\n");
375 return 0;
376 }
377
378 ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
379 GFP_KERNEL);
380 if (ret) {
381 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
382 return -ENOMEM;
383 }
384
385 /* Reserve vector resources */
386 ret = ice_alloc_rdma_qvectors(pf);
387 if (ret < 0) {
388 dev_err(dev, "failed to reserve vectors for RDMA\n");
389 goto err_reserve_rdma_qvector;
390 }
391 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
392 ret = ice_plug_aux_dev(pf);
393 if (ret)
394 goto err_plug_aux_dev;
395 return 0;
396
397err_plug_aux_dev:
398 ice_free_rdma_qvector(pf);
399err_reserve_rdma_qvector:
400 pf->adev = NULL;
401 xa_erase(&ice_aux_id, pf->aux_idx);
402 return ret;
403}
404
405/**
406 * ice_deinit_rdma - deinitialize RDMA on PF
407 * @pf: ptr to ice_pf
408 */
409void ice_deinit_rdma(struct ice_pf *pf)
410{
411 if (!ice_is_rdma_ena(pf))
412 return;
413
414 ice_unplug_aux_dev(pf);
415 ice_free_rdma_qvector(pf);
416 xa_erase(&ice_aux_id, pf->aux_idx);
417}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4/* Inter-Driver Communication */
5#include "ice.h"
6#include "ice_lib.h"
7#include "ice_dcb_lib.h"
8
9/**
10 * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
11 * @pf: pointer to PF struct
12 *
13 * This function has to be called with a device_lock on the
14 * pf->adev.dev to avoid race conditions.
15 */
16static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
17{
18 struct auxiliary_device *adev;
19
20 adev = pf->adev;
21 if (!adev || !adev->dev.driver)
22 return NULL;
23
24 return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
25 adrv.driver);
26}
27
28/**
29 * ice_send_event_to_aux - send event to RDMA AUX driver
30 * @pf: pointer to PF struct
31 * @event: event struct
32 */
33void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
34{
35 struct iidc_auxiliary_drv *iadrv;
36
37 if (!pf->adev)
38 return;
39
40 device_lock(&pf->adev->dev);
41 iadrv = ice_get_auxiliary_drv(pf);
42 if (iadrv && iadrv->event_handler)
43 iadrv->event_handler(pf, event);
44 device_unlock(&pf->adev->dev);
45}
46
47/**
48 * ice_find_vsi - Find the VSI from VSI ID
49 * @pf: The PF pointer to search in
50 * @vsi_num: The VSI ID to search for
51 */
52static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
53{
54 int i;
55
56 ice_for_each_vsi(pf, i)
57 if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
58 return pf->vsi[i];
59 return NULL;
60}
61
62/**
63 * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
64 * @pf: PF struct
65 * @qset: Resource to be allocated
66 */
67int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
68{
69 u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
70 struct ice_vsi *vsi;
71 struct device *dev;
72 u32 qset_teid;
73 u16 qs_handle;
74 int status;
75 int i;
76
77 if (WARN_ON(!pf || !qset))
78 return -EINVAL;
79
80 dev = ice_pf_to_dev(pf);
81
82 if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
83 return -EINVAL;
84
85 vsi = ice_get_main_vsi(pf);
86 if (!vsi) {
87 dev_err(dev, "RDMA QSet invalid VSI\n");
88 return -EINVAL;
89 }
90
91 ice_for_each_traffic_class(i)
92 max_rdmaqs[i] = 0;
93
94 max_rdmaqs[qset->tc]++;
95 qs_handle = qset->qs_handle;
96
97 status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
98 max_rdmaqs);
99 if (status) {
100 dev_err(dev, "Failed VSI RDMA Qset config\n");
101 return status;
102 }
103
104 status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
105 &qs_handle, 1, &qset_teid);
106 if (status) {
107 dev_err(dev, "Failed VSI RDMA Qset enable\n");
108 return status;
109 }
110 vsi->qset_handle[qset->tc] = qset->qs_handle;
111 qset->teid = qset_teid;
112
113 return 0;
114}
115EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
116
117/**
118 * ice_del_rdma_qset - Delete leaf node for RDMA Qset
119 * @pf: PF struct
120 * @qset: Resource to be freed
121 */
122int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
123{
124 struct ice_vsi *vsi;
125 u32 teid;
126 u16 q_id;
127
128 if (WARN_ON(!pf || !qset))
129 return -EINVAL;
130
131 vsi = ice_find_vsi(pf, qset->vport_id);
132 if (!vsi) {
133 dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
134 return -EINVAL;
135 }
136
137 q_id = qset->qs_handle;
138 teid = qset->teid;
139
140 vsi->qset_handle[qset->tc] = 0;
141
142 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
143}
144EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
145
146/**
147 * ice_rdma_request_reset - accept request from RDMA to perform a reset
148 * @pf: struct for PF
149 * @reset_type: type of reset
150 */
151int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
152{
153 enum ice_reset_req reset;
154
155 if (WARN_ON(!pf))
156 return -EINVAL;
157
158 switch (reset_type) {
159 case IIDC_PFR:
160 reset = ICE_RESET_PFR;
161 break;
162 case IIDC_CORER:
163 reset = ICE_RESET_CORER;
164 break;
165 case IIDC_GLOBR:
166 reset = ICE_RESET_GLOBR;
167 break;
168 default:
169 dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
170 return -EINVAL;
171 }
172
173 return ice_schedule_reset(pf, reset);
174}
175EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
176
177/**
178 * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
179 * @pf: pointer to struct for PF
180 * @vsi_id: VSI HW idx to update filter on
181 * @enable: bool whether to enable or disable filters
182 */
183int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
184{
185 struct ice_vsi *vsi;
186 int status;
187
188 if (WARN_ON(!pf))
189 return -EINVAL;
190
191 vsi = ice_find_vsi(pf, vsi_id);
192 if (!vsi)
193 return -EINVAL;
194
195 status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
196 if (status) {
197 dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
198 enable ? "en" : "dis");
199 } else {
200 if (enable)
201 vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
202 else
203 vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
204 }
205
206 return status;
207}
208EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
209
210/**
211 * ice_get_qos_params - parse QoS params for RDMA consumption
212 * @pf: pointer to PF struct
213 * @qos: set of QoS values
214 */
215void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
216{
217 struct ice_dcbx_cfg *dcbx_cfg;
218 unsigned int i;
219 u32 up2tc;
220
221 dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
222 up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
223
224 qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
225 for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
226 qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
227
228 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
229 qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
230}
231EXPORT_SYMBOL_GPL(ice_get_qos_params);
232
233/**
234 * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
235 * @pf: board private structure to initialize
236 */
237static int ice_reserve_rdma_qvector(struct ice_pf *pf)
238{
239 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
240 int index;
241
242 index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
243 ICE_RES_RDMA_VEC_ID);
244 if (index < 0)
245 return index;
246 pf->num_avail_sw_msix -= pf->num_rdma_msix;
247 pf->rdma_base_vector = (u16)index;
248 }
249 return 0;
250}
251
252/**
253 * ice_adev_release - function to be mapped to AUX dev's release op
254 * @dev: pointer to device to free
255 */
256static void ice_adev_release(struct device *dev)
257{
258 struct iidc_auxiliary_dev *iadev;
259
260 iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
261 kfree(iadev);
262}
263
264/**
265 * ice_plug_aux_dev - allocate and register AUX device
266 * @pf: pointer to pf struct
267 */
268int ice_plug_aux_dev(struct ice_pf *pf)
269{
270 struct iidc_auxiliary_dev *iadev;
271 struct auxiliary_device *adev;
272 int ret;
273
274 /* if this PF doesn't support a technology that requires auxiliary
275 * devices, then gracefully exit
276 */
277 if (!ice_is_aux_ena(pf))
278 return 0;
279
280 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
281 if (!iadev)
282 return -ENOMEM;
283
284 adev = &iadev->adev;
285 pf->adev = adev;
286 iadev->pf = pf;
287
288 adev->id = pf->aux_idx;
289 adev->dev.release = ice_adev_release;
290 adev->dev.parent = &pf->pdev->dev;
291 adev->name = IIDC_RDMA_ROCE_NAME;
292
293 ret = auxiliary_device_init(adev);
294 if (ret) {
295 pf->adev = NULL;
296 kfree(iadev);
297 return ret;
298 }
299
300 ret = auxiliary_device_add(adev);
301 if (ret) {
302 pf->adev = NULL;
303 auxiliary_device_uninit(adev);
304 return ret;
305 }
306
307 return 0;
308}
309
310/* ice_unplug_aux_dev - unregister and free AUX device
311 * @pf: pointer to pf struct
312 */
313void ice_unplug_aux_dev(struct ice_pf *pf)
314{
315 if (!pf->adev)
316 return;
317
318 auxiliary_device_delete(pf->adev);
319 auxiliary_device_uninit(pf->adev);
320 pf->adev = NULL;
321}
322
323/**
324 * ice_init_rdma - initializes PF for RDMA use
325 * @pf: ptr to ice_pf
326 */
327int ice_init_rdma(struct ice_pf *pf)
328{
329 struct device *dev = &pf->pdev->dev;
330 int ret;
331
332 /* Reserve vector resources */
333 ret = ice_reserve_rdma_qvector(pf);
334 if (ret < 0) {
335 dev_err(dev, "failed to reserve vectors for RDMA\n");
336 return ret;
337 }
338
339 return ice_plug_aux_dev(pf);
340}