Loading...
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2/* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7#include <linux/types.h>
8#include <asm/byteorder.h>
9#include <linux/bitops.h>
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/string.h>
22#include <linux/if_vlan.h>
23#include "qed.h"
24#include "qed_cxt.h"
25#include "qed_dcbx.h"
26#include "qed_hsi.h"
27#include "qed_hw.h"
28#include "qed_init_ops.h"
29#include "qed_int.h"
30#include "qed_ll2.h"
31#include "qed_mcp.h"
32#include "qed_reg_addr.h"
33#include <linux/qed/qed_rdma_if.h>
34#include "qed_rdma.h"
35#include "qed_roce.h"
36#include "qed_sp.h"
37
38static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
39
40static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
41 __le16 echo, union event_ring_data *data,
42 u8 fw_return_code)
43{
44 struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
45 union rdma_eqe_data *rdata = &data->rdma_data;
46
47 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
48 u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid);
49
50 /* icid release in this async event can occur only if the icid
51 * was offloaded to the FW. In case it wasn't offloaded this is
52 * handled in qed_roce_sp_destroy_qp.
53 */
54 qed_roce_free_real_icid(p_hwfn, icid);
55 } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
56 fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
57 u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo);
58
59 events.affiliated_event(events.context, fw_event_code,
60 &srq_id);
61 } else {
62 events.affiliated_event(events.context, fw_event_code,
63 (void *)&rdata->async_handle);
64 }
65
66 return 0;
67}
68
69void qed_roce_stop(struct qed_hwfn *p_hwfn)
70{
71 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
72 int wait_count = 0;
73
74 /* when destroying a_RoCE QP the control is returned to the user after
75 * the synchronous part. The asynchronous part may take a little longer.
76 * We delay for a short while if an async destroy QP is still expected.
77 * Beyond the added delay we clear the bitmap anyway.
78 */
79 while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
80 /* If the HW device is during recovery, all resources are
81 * immediately reset without receiving a per-cid indication
82 * from HW. In this case we don't expect the cid bitmap to be
83 * cleared.
84 */
85 if (p_hwfn->cdev->recov_in_prog)
86 return;
87
88 msleep(100);
89 if (wait_count++ > 20) {
90 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
91 break;
92 }
93 }
94}
95
96static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
97 __le32 *dst_gid)
98{
99 u32 i;
100
101 if (qp->roce_mode == ROCE_V2_IPV4) {
102 /* The IPv4 addresses shall be aligned to the highest word.
103 * The lower words must be zero.
104 */
105 memset(src_gid, 0, sizeof(union qed_gid));
106 memset(dst_gid, 0, sizeof(union qed_gid));
107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
109 } else {
110 /* GIDs and IPv6 addresses coincide in location and size */
111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
114 }
115 }
116}
117
118static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
119{
120 switch (roce_mode) {
121 case ROCE_V1:
122 return PLAIN_ROCE;
123 case ROCE_V2_IPV4:
124 return RROCE_IPV4;
125 case ROCE_V2_IPV6:
126 return RROCE_IPV6;
127 default:
128 return MAX_ROCE_FLAVOR;
129 }
130}
131
132static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
133{
134 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
135 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
136 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
137 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
138}
139
140int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
141{
142 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
143 u32 responder_icid;
144 u32 requester_icid;
145 int rc;
146
147 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
148 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
149 &responder_icid);
150 if (rc) {
151 spin_unlock_bh(&p_rdma_info->lock);
152 return rc;
153 }
154
155 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
156 &requester_icid);
157
158 spin_unlock_bh(&p_rdma_info->lock);
159 if (rc)
160 goto err;
161
162 /* the two icid's should be adjacent */
163 if ((requester_icid - responder_icid) != 1) {
164 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
165 rc = -EINVAL;
166 goto err;
167 }
168
169 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
170 p_rdma_info->proto);
171 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
172 p_rdma_info->proto);
173
174 /* If these icids require a new ILT line allocate DMA-able context for
175 * an ILT page
176 */
177 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
178 if (rc)
179 goto err;
180
181 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
182 if (rc)
183 goto err;
184
185 *cid = (u16)responder_icid;
186 return rc;
187
188err:
189 spin_lock_bh(&p_rdma_info->lock);
190 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
191 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
192
193 spin_unlock_bh(&p_rdma_info->lock);
194 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
195 "Allocate CID - failed, rc = %d\n", rc);
196 return rc;
197}
198
199static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
200{
201 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
202 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
203 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
204}
205
206static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
207{
208 u8 pri, tc = 0;
209
210 if (qp->vlan_id) {
211 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
212 tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
213 }
214
215 DP_VERBOSE(p_hwfn, QED_MSG_SP,
216 "qp icid %u tc: %u (vlan priority %s)\n",
217 qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
218
219 return tc;
220}
221
222static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
223 struct qed_rdma_qp *qp)
224{
225 struct roce_create_qp_resp_ramrod_data *p_ramrod;
226 u16 regular_latency_queue, low_latency_queue;
227 struct qed_sp_init_data init_data;
228 struct qed_spq_entry *p_ent;
229 enum protocol_type proto;
230 u32 flags = 0;
231 int rc;
232 u8 tc;
233
234 if (!qp->has_resp)
235 return 0;
236
237 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
238
239 /* Allocate DMA-able memory for IRQ */
240 qp->irq_num_pages = 1;
241 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
242 RDMA_RING_PAGE_SIZE,
243 &qp->irq_phys_addr, GFP_KERNEL);
244 if (!qp->irq) {
245 rc = -ENOMEM;
246 DP_NOTICE(p_hwfn,
247 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
248 rc);
249 return rc;
250 }
251
252 /* Get SPQ entry */
253 memset(&init_data, 0, sizeof(init_data));
254 init_data.cid = qp->icid;
255 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
256 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
257
258 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
259 PROTOCOLID_ROCE, &init_data);
260 if (rc)
261 goto err;
262
263 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
264 qed_roce_mode_to_flavor(qp->roce_mode));
265
266 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
267 qp->incoming_rdma_read_en);
268
269 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
270 qp->incoming_rdma_write_en);
271
272 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
273 qp->incoming_atomic_en);
274
275 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
276 qp->e2e_flow_control_en);
277
278 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
279
280 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
281 qp->fmr_and_reserved_lkey);
282
283 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
284 qp->min_rnr_nak_timer);
285
286 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
287 qed_rdma_is_xrc_qp(qp));
288
289 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
290 p_ramrod->flags = cpu_to_le32(flags);
291 p_ramrod->max_ird = qp->max_rd_atomic_resp;
292 p_ramrod->traffic_class = qp->traffic_class_tos;
293 p_ramrod->hop_limit = qp->hop_limit_ttl;
294 p_ramrod->irq_num_pages = qp->irq_num_pages;
295 p_ramrod->p_key = cpu_to_le16(qp->pkey);
296 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
297 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
298 p_ramrod->mtu = cpu_to_le16(qp->mtu);
299 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
300 p_ramrod->pd = cpu_to_le16(qp->pd);
301 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
302 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
303 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
304 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
305 p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
306 p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
307 p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
308 p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
309 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
310 qp->rq_cq_id);
311 p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
312
313 tc = qed_roce_get_qp_tc(p_hwfn, qp);
314 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
315 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
316 DP_VERBOSE(p_hwfn, QED_MSG_SP,
317 "qp icid %u pqs: regular_latency %u low_latency %u\n",
318 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
319 low_latency_queue - CM_TX_PQ_BASE);
320 p_ramrod->regular_latency_phy_queue =
321 cpu_to_le16(regular_latency_queue);
322 p_ramrod->low_latency_phy_queue =
323 cpu_to_le16(low_latency_queue);
324
325 p_ramrod->dpi = cpu_to_le16(qp->dpi);
326
327 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
328 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
329
330 p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
331 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
332 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
333 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
334
335 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
336 qp->stats_queue;
337
338 rc = qed_spq_post(p_hwfn, p_ent, NULL);
339 if (rc)
340 goto err;
341
342 qp->resp_offloaded = true;
343 qp->cq_prod = 0;
344
345 proto = p_hwfn->p_rdma_info->proto;
346 qed_roce_set_real_cid(p_hwfn, qp->icid -
347 qed_cxt_get_proto_cid_start(p_hwfn, proto));
348
349 return rc;
350
351err:
352 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
353 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
354 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
355 qp->irq, qp->irq_phys_addr);
356
357 return rc;
358}
359
360static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
361 struct qed_rdma_qp *qp)
362{
363 struct roce_create_qp_req_ramrod_data *p_ramrod;
364 u16 regular_latency_queue, low_latency_queue;
365 struct qed_sp_init_data init_data;
366 struct qed_spq_entry *p_ent;
367 enum protocol_type proto;
368 u16 flags = 0;
369 int rc;
370 u8 tc;
371
372 if (!qp->has_req)
373 return 0;
374
375 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
376
377 /* Allocate DMA-able memory for ORQ */
378 qp->orq_num_pages = 1;
379 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
380 RDMA_RING_PAGE_SIZE,
381 &qp->orq_phys_addr, GFP_KERNEL);
382 if (!qp->orq) {
383 rc = -ENOMEM;
384 DP_NOTICE(p_hwfn,
385 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
386 rc);
387 return rc;
388 }
389
390 /* Get SPQ entry */
391 memset(&init_data, 0, sizeof(init_data));
392 init_data.cid = qp->icid + 1;
393 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
394 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
395
396 rc = qed_sp_init_request(p_hwfn, &p_ent,
397 ROCE_RAMROD_CREATE_QP,
398 PROTOCOLID_ROCE, &init_data);
399 if (rc)
400 goto err;
401
402 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
403 qed_roce_mode_to_flavor(qp->roce_mode));
404
405 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
406 qp->fmr_and_reserved_lkey);
407
408 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
409 qp->signal_all);
410
411 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
412 qp->retry_cnt);
413
414 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
415 qp->rnr_retry_cnt);
416
417 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
418 qed_rdma_is_xrc_qp(qp));
419
420 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
421 p_ramrod->flags = cpu_to_le16(flags);
422
423 SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE,
424 qp->edpm_mode);
425
426 p_ramrod->max_ord = qp->max_rd_atomic_req;
427 p_ramrod->traffic_class = qp->traffic_class_tos;
428 p_ramrod->hop_limit = qp->hop_limit_ttl;
429 p_ramrod->orq_num_pages = qp->orq_num_pages;
430 p_ramrod->p_key = cpu_to_le16(qp->pkey);
431 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
432 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
433 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
434 p_ramrod->mtu = cpu_to_le16(qp->mtu);
435 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
436 p_ramrod->pd = cpu_to_le16(qp->pd);
437 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
438 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
439 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
440 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
441 p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
442 p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
443 p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
444 p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
445 p_ramrod->cq_cid =
446 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
447
448 tc = qed_roce_get_qp_tc(p_hwfn, qp);
449 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
450 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
451 DP_VERBOSE(p_hwfn, QED_MSG_SP,
452 "qp icid %u pqs: regular_latency %u low_latency %u\n",
453 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
454 low_latency_queue - CM_TX_PQ_BASE);
455 p_ramrod->regular_latency_phy_queue =
456 cpu_to_le16(regular_latency_queue);
457 p_ramrod->low_latency_phy_queue =
458 cpu_to_le16(low_latency_queue);
459
460 p_ramrod->dpi = cpu_to_le16(qp->dpi);
461
462 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
463 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
464
465 p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
466 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
467 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
468 qp->stats_queue;
469
470 rc = qed_spq_post(p_hwfn, p_ent, NULL);
471 if (rc)
472 goto err;
473
474 qp->req_offloaded = true;
475 proto = p_hwfn->p_rdma_info->proto;
476 qed_roce_set_real_cid(p_hwfn,
477 qp->icid + 1 -
478 qed_cxt_get_proto_cid_start(p_hwfn, proto));
479
480 return rc;
481
482err:
483 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
484 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
485 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
486 qp->orq, qp->orq_phys_addr);
487 return rc;
488}
489
490static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
491 struct qed_rdma_qp *qp,
492 bool move_to_err, u32 modify_flags)
493{
494 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
495 struct qed_sp_init_data init_data;
496 struct qed_spq_entry *p_ent;
497 u16 flags = 0;
498 int rc;
499
500 if (!qp->has_resp)
501 return 0;
502
503 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
504
505 if (move_to_err && !qp->resp_offloaded)
506 return 0;
507
508 /* Get SPQ entry */
509 memset(&init_data, 0, sizeof(init_data));
510 init_data.cid = qp->icid;
511 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
512 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
513
514 rc = qed_sp_init_request(p_hwfn, &p_ent,
515 ROCE_EVENT_MODIFY_QP,
516 PROTOCOLID_ROCE, &init_data);
517 if (rc) {
518 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
519 return rc;
520 }
521
522 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
523 !!move_to_err);
524
525 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
526 qp->incoming_rdma_read_en);
527
528 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
529 qp->incoming_rdma_write_en);
530
531 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
532 qp->incoming_atomic_en);
533
534 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
535 qp->e2e_flow_control_en);
536
537 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
538 GET_FIELD(modify_flags,
539 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
540
541 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
542 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
543
544 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
545 GET_FIELD(modify_flags,
546 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
547
548 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
549 GET_FIELD(modify_flags,
550 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
551
552 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
553 GET_FIELD(modify_flags,
554 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
555
556 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
557 p_ramrod->flags = cpu_to_le16(flags);
558
559 p_ramrod->fields = 0;
560 SET_FIELD(p_ramrod->fields,
561 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
562 qp->min_rnr_nak_timer);
563
564 p_ramrod->max_ird = qp->max_rd_atomic_resp;
565 p_ramrod->traffic_class = qp->traffic_class_tos;
566 p_ramrod->hop_limit = qp->hop_limit_ttl;
567 p_ramrod->p_key = cpu_to_le16(qp->pkey);
568 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
569 p_ramrod->mtu = cpu_to_le16(qp->mtu);
570 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
571 rc = qed_spq_post(p_hwfn, p_ent, NULL);
572
573 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
574 return rc;
575}
576
577static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
578 struct qed_rdma_qp *qp,
579 bool move_to_sqd,
580 bool move_to_err, u32 modify_flags)
581{
582 struct roce_modify_qp_req_ramrod_data *p_ramrod;
583 struct qed_sp_init_data init_data;
584 struct qed_spq_entry *p_ent;
585 u16 flags = 0;
586 int rc;
587
588 if (!qp->has_req)
589 return 0;
590
591 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
592
593 if (move_to_err && !(qp->req_offloaded))
594 return 0;
595
596 /* Get SPQ entry */
597 memset(&init_data, 0, sizeof(init_data));
598 init_data.cid = qp->icid + 1;
599 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
600 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
601
602 rc = qed_sp_init_request(p_hwfn, &p_ent,
603 ROCE_EVENT_MODIFY_QP,
604 PROTOCOLID_ROCE, &init_data);
605 if (rc) {
606 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
607 return rc;
608 }
609
610 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
611 !!move_to_err);
612
613 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
614 !!move_to_sqd);
615
616 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
617 qp->sqd_async);
618
619 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
620 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
621
622 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
623 GET_FIELD(modify_flags,
624 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
625
626 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
627 GET_FIELD(modify_flags,
628 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
629
630 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
631 GET_FIELD(modify_flags,
632 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
633
634 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
635 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
636
637 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
638 GET_FIELD(modify_flags,
639 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
640
641 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
642 p_ramrod->flags = cpu_to_le16(flags);
643
644 p_ramrod->fields = 0;
645 SET_FIELD(p_ramrod->fields,
646 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
647 SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
648 qp->rnr_retry_cnt);
649
650 p_ramrod->max_ord = qp->max_rd_atomic_req;
651 p_ramrod->traffic_class = qp->traffic_class_tos;
652 p_ramrod->hop_limit = qp->hop_limit_ttl;
653 p_ramrod->p_key = cpu_to_le16(qp->pkey);
654 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
655 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
656 p_ramrod->mtu = cpu_to_le16(qp->mtu);
657 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
658 rc = qed_spq_post(p_hwfn, p_ent, NULL);
659
660 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
661 return rc;
662}
663
664static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
665 struct qed_rdma_qp *qp,
666 u32 *cq_prod)
667{
668 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
669 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
670 struct qed_sp_init_data init_data;
671 struct qed_spq_entry *p_ent;
672 dma_addr_t ramrod_res_phys;
673 int rc;
674
675 if (!qp->has_resp) {
676 *cq_prod = 0;
677 return 0;
678 }
679
680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
681 *cq_prod = qp->cq_prod;
682
683 if (!qp->resp_offloaded) {
684 /* If a responder was never offload, we need to free the cids
685 * allocated in create_qp as a FW async event will never arrive
686 */
687 u32 cid;
688
689 cid = qp->icid -
690 qed_cxt_get_proto_cid_start(p_hwfn,
691 p_hwfn->p_rdma_info->proto);
692 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
693
694 return 0;
695 }
696
697 /* Get SPQ entry */
698 memset(&init_data, 0, sizeof(init_data));
699 init_data.cid = qp->icid;
700 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
701 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
702
703 rc = qed_sp_init_request(p_hwfn, &p_ent,
704 ROCE_RAMROD_DESTROY_QP,
705 PROTOCOLID_ROCE, &init_data);
706 if (rc)
707 return rc;
708
709 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
710
711 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
712 sizeof(*p_ramrod_res),
713 &ramrod_res_phys, GFP_KERNEL);
714
715 if (!p_ramrod_res) {
716 rc = -ENOMEM;
717 DP_NOTICE(p_hwfn,
718 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
719 rc);
720 qed_sp_destroy_request(p_hwfn, p_ent);
721 return rc;
722 }
723
724 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
725
726 rc = qed_spq_post(p_hwfn, p_ent, NULL);
727 if (rc)
728 goto err;
729
730 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
731 qp->cq_prod = *cq_prod;
732
733 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
734 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
735 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
736 qp->irq, qp->irq_phys_addr);
737
738 qp->resp_offloaded = false;
739
740 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
741
742err:
743 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
744 sizeof(struct roce_destroy_qp_resp_output_params),
745 p_ramrod_res, ramrod_res_phys);
746
747 return rc;
748}
749
750static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
751 struct qed_rdma_qp *qp)
752{
753 struct roce_destroy_qp_req_output_params *p_ramrod_res;
754 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
755 struct qed_sp_init_data init_data;
756 struct qed_spq_entry *p_ent;
757 dma_addr_t ramrod_res_phys;
758 int rc = -ENOMEM;
759
760 if (!qp->has_req)
761 return 0;
762
763 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
764
765 if (!qp->req_offloaded)
766 return 0;
767
768 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
769 sizeof(*p_ramrod_res),
770 &ramrod_res_phys, GFP_KERNEL);
771 if (!p_ramrod_res) {
772 DP_NOTICE(p_hwfn,
773 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
774 return rc;
775 }
776
777 /* Get SPQ entry */
778 memset(&init_data, 0, sizeof(init_data));
779 init_data.cid = qp->icid + 1;
780 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
781 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
782
783 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
784 PROTOCOLID_ROCE, &init_data);
785 if (rc)
786 goto err;
787
788 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
789 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
790
791 rc = qed_spq_post(p_hwfn, p_ent, NULL);
792 if (rc)
793 goto err;
794
795 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
796 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
797 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
798 qp->orq, qp->orq_phys_addr);
799
800 qp->req_offloaded = false;
801
802 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
803
804err:
805 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
806 p_ramrod_res, ramrod_res_phys);
807
808 return rc;
809}
810
811int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
812 struct qed_rdma_qp *qp,
813 struct qed_rdma_query_qp_out_params *out_params)
814{
815 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
816 struct roce_query_qp_req_output_params *p_req_ramrod_res;
817 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
818 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
819 struct qed_sp_init_data init_data;
820 dma_addr_t resp_ramrod_res_phys;
821 dma_addr_t req_ramrod_res_phys;
822 struct qed_spq_entry *p_ent;
823 bool rq_err_state;
824 bool sq_err_state;
825 bool sq_draining;
826 int rc = -ENOMEM;
827
828 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
829 /* We can't send ramrod to the fw since this qp wasn't offloaded
830 * to the fw yet
831 */
832 out_params->draining = false;
833 out_params->rq_psn = qp->rq_psn;
834 out_params->sq_psn = qp->sq_psn;
835 out_params->state = qp->cur_state;
836
837 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
838 return 0;
839 }
840
841 if (!(qp->resp_offloaded)) {
842 DP_NOTICE(p_hwfn,
843 "The responder's qp should be offloaded before requester's\n");
844 return -EINVAL;
845 }
846
847 /* Send a query responder ramrod to FW to get RQ-PSN and state */
848 p_resp_ramrod_res =
849 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
850 sizeof(*p_resp_ramrod_res),
851 &resp_ramrod_res_phys, GFP_KERNEL);
852 if (!p_resp_ramrod_res) {
853 DP_NOTICE(p_hwfn,
854 "qed query qp failed: cannot allocate memory (ramrod)\n");
855 return rc;
856 }
857
858 /* Get SPQ entry */
859 memset(&init_data, 0, sizeof(init_data));
860 init_data.cid = qp->icid;
861 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
862 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
863 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
864 PROTOCOLID_ROCE, &init_data);
865 if (rc)
866 goto err_resp;
867
868 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
869 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
870
871 rc = qed_spq_post(p_hwfn, p_ent, NULL);
872 if (rc)
873 goto err_resp;
874
875 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
876 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
877 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
878
879 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
880 p_resp_ramrod_res, resp_ramrod_res_phys);
881
882 if (!(qp->req_offloaded)) {
883 /* Don't send query qp for the requester */
884 out_params->sq_psn = qp->sq_psn;
885 out_params->draining = false;
886
887 if (rq_err_state)
888 qp->cur_state = QED_ROCE_QP_STATE_ERR;
889
890 out_params->state = qp->cur_state;
891
892 return 0;
893 }
894
895 /* Send a query requester ramrod to FW to get SQ-PSN and state */
896 p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
897 sizeof(*p_req_ramrod_res),
898 &req_ramrod_res_phys,
899 GFP_KERNEL);
900 if (!p_req_ramrod_res) {
901 rc = -ENOMEM;
902 DP_NOTICE(p_hwfn,
903 "qed query qp failed: cannot allocate memory (ramrod)\n");
904 return rc;
905 }
906
907 /* Get SPQ entry */
908 init_data.cid = qp->icid + 1;
909 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
910 PROTOCOLID_ROCE, &init_data);
911 if (rc)
912 goto err_req;
913
914 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
915 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
916
917 rc = qed_spq_post(p_hwfn, p_ent, NULL);
918 if (rc)
919 goto err_req;
920
921 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
922 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
923 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
924 sq_draining =
925 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
926 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
927
928 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
929 p_req_ramrod_res, req_ramrod_res_phys);
930
931 out_params->draining = false;
932
933 if (rq_err_state || sq_err_state)
934 qp->cur_state = QED_ROCE_QP_STATE_ERR;
935 else if (sq_draining)
936 out_params->draining = true;
937 out_params->state = qp->cur_state;
938
939 return 0;
940
941err_req:
942 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
943 p_req_ramrod_res, req_ramrod_res_phys);
944 return rc;
945err_resp:
946 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
947 p_resp_ramrod_res, resp_ramrod_res_phys);
948 return rc;
949}
950
951int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
952{
953 u32 cq_prod;
954 int rc;
955
956 /* Destroys the specified QP */
957 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
958 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
959 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
960 DP_NOTICE(p_hwfn,
961 "QP must be in error, reset or init state before destroying it\n");
962 return -EINVAL;
963 }
964
965 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
966 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
967 &cq_prod);
968 if (rc)
969 return rc;
970
971 /* Send destroy requester ramrod */
972 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
973 if (rc)
974 return rc;
975 }
976
977 return 0;
978}
979
980int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
981 struct qed_rdma_qp *qp,
982 enum qed_roce_qp_state prev_state,
983 struct qed_rdma_modify_qp_in_params *params)
984{
985 int rc = 0;
986
987 /* Perform additional operations according to the current state and the
988 * next state
989 */
990 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
991 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
992 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
993 /* Init->RTR or Reset->RTR */
994 rc = qed_roce_sp_create_responder(p_hwfn, qp);
995 return rc;
996 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
997 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
998 /* RTR-> RTS */
999 rc = qed_roce_sp_create_requester(p_hwfn, qp);
1000 if (rc)
1001 return rc;
1002
1003 /* Send modify responder ramrod */
1004 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1005 params->modify_flags);
1006 return rc;
1007 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1008 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1009 /* RTS->RTS */
1010 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1011 params->modify_flags);
1012 if (rc)
1013 return rc;
1014
1015 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1016 params->modify_flags);
1017 return rc;
1018 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1019 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1020 /* RTS->SQD */
1021 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1022 params->modify_flags);
1023 return rc;
1024 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1025 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1026 /* SQD->SQD */
1027 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1028 params->modify_flags);
1029 if (rc)
1030 return rc;
1031
1032 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1033 params->modify_flags);
1034 return rc;
1035 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1036 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1037 /* SQD->RTS */
1038 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1039 params->modify_flags);
1040 if (rc)
1041 return rc;
1042
1043 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1044 params->modify_flags);
1045
1046 return rc;
1047 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
1048 /* ->ERR */
1049 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1050 params->modify_flags);
1051 if (rc)
1052 return rc;
1053
1054 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1055 params->modify_flags);
1056 return rc;
1057 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1058 /* Any state -> RESET */
1059 u32 cq_prod;
1060
1061 /* Send destroy responder ramrod */
1062 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1063 qp,
1064 &cq_prod);
1065
1066 if (rc)
1067 return rc;
1068
1069 qp->cq_prod = cq_prod;
1070
1071 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
1072 } else {
1073 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1074 }
1075
1076 return rc;
1077}
1078
1079static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
1080{
1081 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1082 u32 start_cid, cid, xcid;
1083
1084 /* an even icid belongs to a responder while an odd icid belongs to a
1085 * requester. The 'cid' received as an input can be either. We calculate
1086 * the "partner" icid and call it xcid. Only if both are free then the
1087 * "cid" map can be cleared.
1088 */
1089 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1090 cid = icid - start_cid;
1091 xcid = cid ^ 1;
1092
1093 spin_lock_bh(&p_rdma_info->lock);
1094
1095 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1096 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1097 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1098 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
1099 }
1100
1101 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1102}
1103
1104void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1105{
1106 u8 val;
1107
1108 /* if any QPs are already active, we want to disable DPM, since their
1109 * context information contains information from before the latest DCBx
1110 * update. Otherwise enable it.
1111 */
1112 val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1113 p_hwfn->dcbx_no_edpm = (u8)val;
1114
1115 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1116}
1117
1118int qed_roce_setup(struct qed_hwfn *p_hwfn)
1119{
1120 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1121 qed_roce_async_event);
1122}
1123
1124int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1125{
1126 u32 ll2_ethertype_en;
1127
1128 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
1129
1130 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1131
1132 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1133 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1134 (ll2_ethertype_en | 0x01));
1135
1136 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1137 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
1138 return -EINVAL;
1139 }
1140
1141 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
1142 return 0;
1143}
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2/* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7#include <linux/types.h>
8#include <asm/byteorder.h>
9#include <linux/bitops.h>
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/string.h>
22#include <linux/if_vlan.h>
23#include "qed.h"
24#include "qed_cxt.h"
25#include "qed_dcbx.h"
26#include "qed_hsi.h"
27#include "qed_hw.h"
28#include "qed_init_ops.h"
29#include "qed_int.h"
30#include "qed_ll2.h"
31#include "qed_mcp.h"
32#include "qed_reg_addr.h"
33#include <linux/qed/qed_rdma_if.h>
34#include "qed_rdma.h"
35#include "qed_roce.h"
36#include "qed_sp.h"
37
38static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
39
40static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
41 __le16 echo, union event_ring_data *data,
42 u8 fw_return_code)
43{
44 struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
45 union rdma_eqe_data *rdata = &data->rdma_data;
46
47 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
48 u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid);
49
50 /* icid release in this async event can occur only if the icid
51 * was offloaded to the FW. In case it wasn't offloaded this is
52 * handled in qed_roce_sp_destroy_qp.
53 */
54 qed_roce_free_real_icid(p_hwfn, icid);
55 } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
56 fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
57 u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo);
58
59 events.affiliated_event(events.context, fw_event_code,
60 &srq_id);
61 } else {
62 events.affiliated_event(events.context, fw_event_code,
63 (void *)&rdata->async_handle);
64 }
65
66 return 0;
67}
68
69void qed_roce_stop(struct qed_hwfn *p_hwfn)
70{
71 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
72 int wait_count = 0;
73
74 /* when destroying a_RoCE QP the control is returned to the user after
75 * the synchronous part. The asynchronous part may take a little longer.
76 * We delay for a short while if an async destroy QP is still expected.
77 * Beyond the added delay we clear the bitmap anyway.
78 */
79 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
80 msleep(100);
81 if (wait_count++ > 20) {
82 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
83 break;
84 }
85 }
86}
87
88static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
89 __le32 *dst_gid)
90{
91 u32 i;
92
93 if (qp->roce_mode == ROCE_V2_IPV4) {
94 /* The IPv4 addresses shall be aligned to the highest word.
95 * The lower words must be zero.
96 */
97 memset(src_gid, 0, sizeof(union qed_gid));
98 memset(dst_gid, 0, sizeof(union qed_gid));
99 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
100 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
101 } else {
102 /* GIDs and IPv6 addresses coincide in location and size */
103 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
104 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
105 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
106 }
107 }
108}
109
110static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
111{
112 switch (roce_mode) {
113 case ROCE_V1:
114 return PLAIN_ROCE;
115 case ROCE_V2_IPV4:
116 return RROCE_IPV4;
117 case ROCE_V2_IPV6:
118 return RROCE_IPV6;
119 default:
120 return MAX_ROCE_FLAVOR;
121 }
122}
123
124static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
125{
126 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
127 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
128 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
129 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
130}
131
132int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
133{
134 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
135 u32 responder_icid;
136 u32 requester_icid;
137 int rc;
138
139 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
140 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
141 &responder_icid);
142 if (rc) {
143 spin_unlock_bh(&p_rdma_info->lock);
144 return rc;
145 }
146
147 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
148 &requester_icid);
149
150 spin_unlock_bh(&p_rdma_info->lock);
151 if (rc)
152 goto err;
153
154 /* the two icid's should be adjacent */
155 if ((requester_icid - responder_icid) != 1) {
156 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
157 rc = -EINVAL;
158 goto err;
159 }
160
161 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
162 p_rdma_info->proto);
163 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
164 p_rdma_info->proto);
165
166 /* If these icids require a new ILT line allocate DMA-able context for
167 * an ILT page
168 */
169 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
170 if (rc)
171 goto err;
172
173 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
174 if (rc)
175 goto err;
176
177 *cid = (u16)responder_icid;
178 return rc;
179
180err:
181 spin_lock_bh(&p_rdma_info->lock);
182 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
183 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
184
185 spin_unlock_bh(&p_rdma_info->lock);
186 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
187 "Allocate CID - failed, rc = %d\n", rc);
188 return rc;
189}
190
191static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
192{
193 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
194 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
195 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
196}
197
198static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
199{
200 u8 pri, tc = 0;
201
202 if (qp->vlan_id) {
203 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
204 tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
205 }
206
207 DP_VERBOSE(p_hwfn, QED_MSG_SP,
208 "qp icid %u tc: %u (vlan priority %s)\n",
209 qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
210
211 return tc;
212}
213
214static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
215 struct qed_rdma_qp *qp)
216{
217 struct roce_create_qp_resp_ramrod_data *p_ramrod;
218 u16 regular_latency_queue, low_latency_queue;
219 struct qed_sp_init_data init_data;
220 struct qed_spq_entry *p_ent;
221 enum protocol_type proto;
222 u32 flags = 0;
223 int rc;
224 u8 tc;
225
226 if (!qp->has_resp)
227 return 0;
228
229 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
230
231 /* Allocate DMA-able memory for IRQ */
232 qp->irq_num_pages = 1;
233 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
234 RDMA_RING_PAGE_SIZE,
235 &qp->irq_phys_addr, GFP_KERNEL);
236 if (!qp->irq) {
237 rc = -ENOMEM;
238 DP_NOTICE(p_hwfn,
239 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
240 rc);
241 return rc;
242 }
243
244 /* Get SPQ entry */
245 memset(&init_data, 0, sizeof(init_data));
246 init_data.cid = qp->icid;
247 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
248 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
249
250 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
251 PROTOCOLID_ROCE, &init_data);
252 if (rc)
253 goto err;
254
255 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
256 qed_roce_mode_to_flavor(qp->roce_mode));
257
258 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
259 qp->incoming_rdma_read_en);
260
261 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
262 qp->incoming_rdma_write_en);
263
264 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
265 qp->incoming_atomic_en);
266
267 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
268 qp->e2e_flow_control_en);
269
270 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
271
272 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
273 qp->fmr_and_reserved_lkey);
274
275 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
276 qp->min_rnr_nak_timer);
277
278 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
279 qed_rdma_is_xrc_qp(qp));
280
281 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
282 p_ramrod->flags = cpu_to_le32(flags);
283 p_ramrod->max_ird = qp->max_rd_atomic_resp;
284 p_ramrod->traffic_class = qp->traffic_class_tos;
285 p_ramrod->hop_limit = qp->hop_limit_ttl;
286 p_ramrod->irq_num_pages = qp->irq_num_pages;
287 p_ramrod->p_key = cpu_to_le16(qp->pkey);
288 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
289 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
290 p_ramrod->mtu = cpu_to_le16(qp->mtu);
291 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
292 p_ramrod->pd = cpu_to_le16(qp->pd);
293 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
294 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
295 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
296 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
297 p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
298 p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
299 p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
300 p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
301 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
302 qp->rq_cq_id);
303 p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
304
305 tc = qed_roce_get_qp_tc(p_hwfn, qp);
306 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
307 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
308 DP_VERBOSE(p_hwfn, QED_MSG_SP,
309 "qp icid %u pqs: regular_latency %u low_latency %u\n",
310 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
311 low_latency_queue - CM_TX_PQ_BASE);
312 p_ramrod->regular_latency_phy_queue =
313 cpu_to_le16(regular_latency_queue);
314 p_ramrod->low_latency_phy_queue =
315 cpu_to_le16(low_latency_queue);
316
317 p_ramrod->dpi = cpu_to_le16(qp->dpi);
318
319 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
320 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
321
322 p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
323 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
324 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
325 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
326
327 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
328 qp->stats_queue;
329
330 rc = qed_spq_post(p_hwfn, p_ent, NULL);
331 if (rc)
332 goto err;
333
334 qp->resp_offloaded = true;
335 qp->cq_prod = 0;
336
337 proto = p_hwfn->p_rdma_info->proto;
338 qed_roce_set_real_cid(p_hwfn, qp->icid -
339 qed_cxt_get_proto_cid_start(p_hwfn, proto));
340
341 return rc;
342
343err:
344 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
345 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
346 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
347 qp->irq, qp->irq_phys_addr);
348
349 return rc;
350}
351
352static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
353 struct qed_rdma_qp *qp)
354{
355 struct roce_create_qp_req_ramrod_data *p_ramrod;
356 u16 regular_latency_queue, low_latency_queue;
357 struct qed_sp_init_data init_data;
358 struct qed_spq_entry *p_ent;
359 enum protocol_type proto;
360 u16 flags = 0;
361 int rc;
362 u8 tc;
363
364 if (!qp->has_req)
365 return 0;
366
367 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
368
369 /* Allocate DMA-able memory for ORQ */
370 qp->orq_num_pages = 1;
371 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
372 RDMA_RING_PAGE_SIZE,
373 &qp->orq_phys_addr, GFP_KERNEL);
374 if (!qp->orq) {
375 rc = -ENOMEM;
376 DP_NOTICE(p_hwfn,
377 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
378 rc);
379 return rc;
380 }
381
382 /* Get SPQ entry */
383 memset(&init_data, 0, sizeof(init_data));
384 init_data.cid = qp->icid + 1;
385 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
386 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
387
388 rc = qed_sp_init_request(p_hwfn, &p_ent,
389 ROCE_RAMROD_CREATE_QP,
390 PROTOCOLID_ROCE, &init_data);
391 if (rc)
392 goto err;
393
394 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
395 qed_roce_mode_to_flavor(qp->roce_mode));
396
397 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
398 qp->fmr_and_reserved_lkey);
399
400 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
401 qp->signal_all);
402
403 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
404 qp->retry_cnt);
405
406 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
407 qp->rnr_retry_cnt);
408
409 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
410 qed_rdma_is_xrc_qp(qp));
411
412 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
413 p_ramrod->flags = cpu_to_le16(flags);
414
415 SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE,
416 qp->edpm_mode);
417
418 p_ramrod->max_ord = qp->max_rd_atomic_req;
419 p_ramrod->traffic_class = qp->traffic_class_tos;
420 p_ramrod->hop_limit = qp->hop_limit_ttl;
421 p_ramrod->orq_num_pages = qp->orq_num_pages;
422 p_ramrod->p_key = cpu_to_le16(qp->pkey);
423 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
424 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
425 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
426 p_ramrod->mtu = cpu_to_le16(qp->mtu);
427 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
428 p_ramrod->pd = cpu_to_le16(qp->pd);
429 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
430 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
431 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
432 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
433 p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
434 p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
435 p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
436 p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
437 p_ramrod->cq_cid =
438 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
439
440 tc = qed_roce_get_qp_tc(p_hwfn, qp);
441 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
442 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
443 DP_VERBOSE(p_hwfn, QED_MSG_SP,
444 "qp icid %u pqs: regular_latency %u low_latency %u\n",
445 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
446 low_latency_queue - CM_TX_PQ_BASE);
447 p_ramrod->regular_latency_phy_queue =
448 cpu_to_le16(regular_latency_queue);
449 p_ramrod->low_latency_phy_queue =
450 cpu_to_le16(low_latency_queue);
451
452 p_ramrod->dpi = cpu_to_le16(qp->dpi);
453
454 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
455 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
456
457 p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
458 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
459 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
460 qp->stats_queue;
461
462 rc = qed_spq_post(p_hwfn, p_ent, NULL);
463 if (rc)
464 goto err;
465
466 qp->req_offloaded = true;
467 proto = p_hwfn->p_rdma_info->proto;
468 qed_roce_set_real_cid(p_hwfn,
469 qp->icid + 1 -
470 qed_cxt_get_proto_cid_start(p_hwfn, proto));
471
472 return rc;
473
474err:
475 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
476 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
477 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
478 qp->orq, qp->orq_phys_addr);
479 return rc;
480}
481
482static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
483 struct qed_rdma_qp *qp,
484 bool move_to_err, u32 modify_flags)
485{
486 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
487 struct qed_sp_init_data init_data;
488 struct qed_spq_entry *p_ent;
489 u16 flags = 0;
490 int rc;
491
492 if (!qp->has_resp)
493 return 0;
494
495 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
496
497 if (move_to_err && !qp->resp_offloaded)
498 return 0;
499
500 /* Get SPQ entry */
501 memset(&init_data, 0, sizeof(init_data));
502 init_data.cid = qp->icid;
503 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
504 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
505
506 rc = qed_sp_init_request(p_hwfn, &p_ent,
507 ROCE_EVENT_MODIFY_QP,
508 PROTOCOLID_ROCE, &init_data);
509 if (rc) {
510 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
511 return rc;
512 }
513
514 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
515 !!move_to_err);
516
517 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
518 qp->incoming_rdma_read_en);
519
520 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
521 qp->incoming_rdma_write_en);
522
523 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
524 qp->incoming_atomic_en);
525
526 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
527 qp->e2e_flow_control_en);
528
529 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
530 GET_FIELD(modify_flags,
531 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
532
533 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
534 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
535
536 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
537 GET_FIELD(modify_flags,
538 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
539
540 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
541 GET_FIELD(modify_flags,
542 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
543
544 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
545 GET_FIELD(modify_flags,
546 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
547
548 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
549 p_ramrod->flags = cpu_to_le16(flags);
550
551 p_ramrod->fields = 0;
552 SET_FIELD(p_ramrod->fields,
553 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
554 qp->min_rnr_nak_timer);
555
556 p_ramrod->max_ird = qp->max_rd_atomic_resp;
557 p_ramrod->traffic_class = qp->traffic_class_tos;
558 p_ramrod->hop_limit = qp->hop_limit_ttl;
559 p_ramrod->p_key = cpu_to_le16(qp->pkey);
560 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
561 p_ramrod->mtu = cpu_to_le16(qp->mtu);
562 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
563 rc = qed_spq_post(p_hwfn, p_ent, NULL);
564
565 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
566 return rc;
567}
568
569static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
570 struct qed_rdma_qp *qp,
571 bool move_to_sqd,
572 bool move_to_err, u32 modify_flags)
573{
574 struct roce_modify_qp_req_ramrod_data *p_ramrod;
575 struct qed_sp_init_data init_data;
576 struct qed_spq_entry *p_ent;
577 u16 flags = 0;
578 int rc;
579
580 if (!qp->has_req)
581 return 0;
582
583 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
584
585 if (move_to_err && !(qp->req_offloaded))
586 return 0;
587
588 /* Get SPQ entry */
589 memset(&init_data, 0, sizeof(init_data));
590 init_data.cid = qp->icid + 1;
591 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
592 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
593
594 rc = qed_sp_init_request(p_hwfn, &p_ent,
595 ROCE_EVENT_MODIFY_QP,
596 PROTOCOLID_ROCE, &init_data);
597 if (rc) {
598 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
599 return rc;
600 }
601
602 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
603 !!move_to_err);
604
605 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
606 !!move_to_sqd);
607
608 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
609 qp->sqd_async);
610
611 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
612 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
613
614 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
615 GET_FIELD(modify_flags,
616 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
617
618 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
619 GET_FIELD(modify_flags,
620 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
621
622 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
623 GET_FIELD(modify_flags,
624 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
625
626 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
627 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
628
629 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
630 GET_FIELD(modify_flags,
631 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
632
633 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
634 p_ramrod->flags = cpu_to_le16(flags);
635
636 p_ramrod->fields = 0;
637 SET_FIELD(p_ramrod->fields,
638 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
639 SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
640 qp->rnr_retry_cnt);
641
642 p_ramrod->max_ord = qp->max_rd_atomic_req;
643 p_ramrod->traffic_class = qp->traffic_class_tos;
644 p_ramrod->hop_limit = qp->hop_limit_ttl;
645 p_ramrod->p_key = cpu_to_le16(qp->pkey);
646 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
647 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
648 p_ramrod->mtu = cpu_to_le16(qp->mtu);
649 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
650 rc = qed_spq_post(p_hwfn, p_ent, NULL);
651
652 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
653 return rc;
654}
655
656static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
657 struct qed_rdma_qp *qp,
658 u32 *cq_prod)
659{
660 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
661 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
662 struct qed_sp_init_data init_data;
663 struct qed_spq_entry *p_ent;
664 dma_addr_t ramrod_res_phys;
665 int rc;
666
667 if (!qp->has_resp) {
668 *cq_prod = 0;
669 return 0;
670 }
671
672 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
673 *cq_prod = qp->cq_prod;
674
675 if (!qp->resp_offloaded) {
676 /* If a responder was never offload, we need to free the cids
677 * allocated in create_qp as a FW async event will never arrive
678 */
679 u32 cid;
680
681 cid = qp->icid -
682 qed_cxt_get_proto_cid_start(p_hwfn,
683 p_hwfn->p_rdma_info->proto);
684 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
685
686 return 0;
687 }
688
689 /* Get SPQ entry */
690 memset(&init_data, 0, sizeof(init_data));
691 init_data.cid = qp->icid;
692 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
693 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
694
695 rc = qed_sp_init_request(p_hwfn, &p_ent,
696 ROCE_RAMROD_DESTROY_QP,
697 PROTOCOLID_ROCE, &init_data);
698 if (rc)
699 return rc;
700
701 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
702
703 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
704 sizeof(*p_ramrod_res),
705 &ramrod_res_phys, GFP_KERNEL);
706
707 if (!p_ramrod_res) {
708 rc = -ENOMEM;
709 DP_NOTICE(p_hwfn,
710 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
711 rc);
712 qed_sp_destroy_request(p_hwfn, p_ent);
713 return rc;
714 }
715
716 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
717
718 rc = qed_spq_post(p_hwfn, p_ent, NULL);
719 if (rc)
720 goto err;
721
722 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
723 qp->cq_prod = *cq_prod;
724
725 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
726 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
727 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
728 qp->irq, qp->irq_phys_addr);
729
730 qp->resp_offloaded = false;
731
732 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
733
734err:
735 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
736 sizeof(struct roce_destroy_qp_resp_output_params),
737 p_ramrod_res, ramrod_res_phys);
738
739 return rc;
740}
741
742static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
743 struct qed_rdma_qp *qp)
744{
745 struct roce_destroy_qp_req_output_params *p_ramrod_res;
746 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
747 struct qed_sp_init_data init_data;
748 struct qed_spq_entry *p_ent;
749 dma_addr_t ramrod_res_phys;
750 int rc = -ENOMEM;
751
752 if (!qp->has_req)
753 return 0;
754
755 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
756
757 if (!qp->req_offloaded)
758 return 0;
759
760 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
761 sizeof(*p_ramrod_res),
762 &ramrod_res_phys, GFP_KERNEL);
763 if (!p_ramrod_res) {
764 DP_NOTICE(p_hwfn,
765 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
766 return rc;
767 }
768
769 /* Get SPQ entry */
770 memset(&init_data, 0, sizeof(init_data));
771 init_data.cid = qp->icid + 1;
772 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
773 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
774
775 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
776 PROTOCOLID_ROCE, &init_data);
777 if (rc)
778 goto err;
779
780 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
781 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
782
783 rc = qed_spq_post(p_hwfn, p_ent, NULL);
784 if (rc)
785 goto err;
786
787
788 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
789 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
790 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
791 qp->orq, qp->orq_phys_addr);
792
793 qp->req_offloaded = false;
794
795 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
796
797err:
798 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
799 p_ramrod_res, ramrod_res_phys);
800
801 return rc;
802}
803
804int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
805 struct qed_rdma_qp *qp,
806 struct qed_rdma_query_qp_out_params *out_params)
807{
808 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
809 struct roce_query_qp_req_output_params *p_req_ramrod_res;
810 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
811 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
812 struct qed_sp_init_data init_data;
813 dma_addr_t resp_ramrod_res_phys;
814 dma_addr_t req_ramrod_res_phys;
815 struct qed_spq_entry *p_ent;
816 bool rq_err_state;
817 bool sq_err_state;
818 bool sq_draining;
819 int rc = -ENOMEM;
820
821 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
822 /* We can't send ramrod to the fw since this qp wasn't offloaded
823 * to the fw yet
824 */
825 out_params->draining = false;
826 out_params->rq_psn = qp->rq_psn;
827 out_params->sq_psn = qp->sq_psn;
828 out_params->state = qp->cur_state;
829
830 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
831 return 0;
832 }
833
834 if (!(qp->resp_offloaded)) {
835 DP_NOTICE(p_hwfn,
836 "The responder's qp should be offloaded before requester's\n");
837 return -EINVAL;
838 }
839
840 /* Send a query responder ramrod to FW to get RQ-PSN and state */
841 p_resp_ramrod_res =
842 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
843 sizeof(*p_resp_ramrod_res),
844 &resp_ramrod_res_phys, GFP_KERNEL);
845 if (!p_resp_ramrod_res) {
846 DP_NOTICE(p_hwfn,
847 "qed query qp failed: cannot allocate memory (ramrod)\n");
848 return rc;
849 }
850
851 /* Get SPQ entry */
852 memset(&init_data, 0, sizeof(init_data));
853 init_data.cid = qp->icid;
854 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
855 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
856 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
857 PROTOCOLID_ROCE, &init_data);
858 if (rc)
859 goto err_resp;
860
861 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
862 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
863
864 rc = qed_spq_post(p_hwfn, p_ent, NULL);
865 if (rc)
866 goto err_resp;
867
868 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
869 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
870 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
871
872 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
873 p_resp_ramrod_res, resp_ramrod_res_phys);
874
875 if (!(qp->req_offloaded)) {
876 /* Don't send query qp for the requester */
877 out_params->sq_psn = qp->sq_psn;
878 out_params->draining = false;
879
880 if (rq_err_state)
881 qp->cur_state = QED_ROCE_QP_STATE_ERR;
882
883 out_params->state = qp->cur_state;
884
885 return 0;
886 }
887
888 /* Send a query requester ramrod to FW to get SQ-PSN and state */
889 p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
890 sizeof(*p_req_ramrod_res),
891 &req_ramrod_res_phys,
892 GFP_KERNEL);
893 if (!p_req_ramrod_res) {
894 rc = -ENOMEM;
895 DP_NOTICE(p_hwfn,
896 "qed query qp failed: cannot allocate memory (ramrod)\n");
897 return rc;
898 }
899
900 /* Get SPQ entry */
901 init_data.cid = qp->icid + 1;
902 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
903 PROTOCOLID_ROCE, &init_data);
904 if (rc)
905 goto err_req;
906
907 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
908 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
909
910 rc = qed_spq_post(p_hwfn, p_ent, NULL);
911 if (rc)
912 goto err_req;
913
914 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
915 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
916 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
917 sq_draining =
918 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
919 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
920
921 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
922 p_req_ramrod_res, req_ramrod_res_phys);
923
924 out_params->draining = false;
925
926 if (rq_err_state || sq_err_state)
927 qp->cur_state = QED_ROCE_QP_STATE_ERR;
928 else if (sq_draining)
929 out_params->draining = true;
930 out_params->state = qp->cur_state;
931
932 return 0;
933
934err_req:
935 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
936 p_req_ramrod_res, req_ramrod_res_phys);
937 return rc;
938err_resp:
939 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
940 p_resp_ramrod_res, resp_ramrod_res_phys);
941 return rc;
942}
943
944int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
945{
946 u32 cq_prod;
947 int rc;
948
949 /* Destroys the specified QP */
950 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
951 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
952 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
953 DP_NOTICE(p_hwfn,
954 "QP must be in error, reset or init state before destroying it\n");
955 return -EINVAL;
956 }
957
958 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
959 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
960 &cq_prod);
961 if (rc)
962 return rc;
963
964 /* Send destroy requester ramrod */
965 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
966 if (rc)
967 return rc;
968 }
969
970 return 0;
971}
972
973int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
974 struct qed_rdma_qp *qp,
975 enum qed_roce_qp_state prev_state,
976 struct qed_rdma_modify_qp_in_params *params)
977{
978 int rc = 0;
979
980 /* Perform additional operations according to the current state and the
981 * next state
982 */
983 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
984 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
985 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
986 /* Init->RTR or Reset->RTR */
987 rc = qed_roce_sp_create_responder(p_hwfn, qp);
988 return rc;
989 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
990 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
991 /* RTR-> RTS */
992 rc = qed_roce_sp_create_requester(p_hwfn, qp);
993 if (rc)
994 return rc;
995
996 /* Send modify responder ramrod */
997 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
998 params->modify_flags);
999 return rc;
1000 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1001 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1002 /* RTS->RTS */
1003 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1004 params->modify_flags);
1005 if (rc)
1006 return rc;
1007
1008 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1009 params->modify_flags);
1010 return rc;
1011 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1012 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1013 /* RTS->SQD */
1014 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1015 params->modify_flags);
1016 return rc;
1017 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1018 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1019 /* SQD->SQD */
1020 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1021 params->modify_flags);
1022 if (rc)
1023 return rc;
1024
1025 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1026 params->modify_flags);
1027 return rc;
1028 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1029 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1030 /* SQD->RTS */
1031 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1032 params->modify_flags);
1033 if (rc)
1034 return rc;
1035
1036 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1037 params->modify_flags);
1038
1039 return rc;
1040 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
1041 /* ->ERR */
1042 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1043 params->modify_flags);
1044 if (rc)
1045 return rc;
1046
1047 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1048 params->modify_flags);
1049 return rc;
1050 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1051 /* Any state -> RESET */
1052 u32 cq_prod;
1053
1054 /* Send destroy responder ramrod */
1055 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1056 qp,
1057 &cq_prod);
1058
1059 if (rc)
1060 return rc;
1061
1062 qp->cq_prod = cq_prod;
1063
1064 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
1065 } else {
1066 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1067 }
1068
1069 return rc;
1070}
1071
1072static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
1073{
1074 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1075 u32 start_cid, cid, xcid;
1076
1077 /* an even icid belongs to a responder while an odd icid belongs to a
1078 * requester. The 'cid' received as an input can be either. We calculate
1079 * the "partner" icid and call it xcid. Only if both are free then the
1080 * "cid" map can be cleared.
1081 */
1082 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1083 cid = icid - start_cid;
1084 xcid = cid ^ 1;
1085
1086 spin_lock_bh(&p_rdma_info->lock);
1087
1088 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1089 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1090 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1091 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
1092 }
1093
1094 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1095}
1096
1097void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1098{
1099 u8 val;
1100
1101 /* if any QPs are already active, we want to disable DPM, since their
1102 * context information contains information from before the latest DCBx
1103 * update. Otherwise enable it.
1104 */
1105 val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1106 p_hwfn->dcbx_no_edpm = (u8)val;
1107
1108 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1109}
1110
1111int qed_roce_setup(struct qed_hwfn *p_hwfn)
1112{
1113 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1114 qed_roce_async_event);
1115}
1116
1117int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1118{
1119 u32 ll2_ethertype_en;
1120
1121 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
1122
1123 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1124
1125 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1126 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1127 (ll2_ethertype_en | 0x01));
1128
1129 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1130 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
1131 return -EINVAL;
1132 }
1133
1134 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
1135 return 0;
1136}