Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright(c) 2015 - 2018 Intel Corporation.
4 */
5
6#include <linux/io.h>
7#include <rdma/rdma_vt.h>
8#include <rdma/rdmavt_qp.h>
9
10#include "hfi.h"
11#include "qp.h"
12#include "rc.h"
13#include "verbs_txreq.h"
14#include "trace.h"
15
16struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
17 u8 *prev_ack, bool *scheduled)
18 __must_hold(&qp->s_lock)
19{
20 struct rvt_ack_entry *e = NULL;
21 u8 i, p;
22 bool s = true;
23
24 for (i = qp->r_head_ack_queue; ; i = p) {
25 if (i == qp->s_tail_ack_queue)
26 s = false;
27 if (i)
28 p = i - 1;
29 else
30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
31 if (p == qp->r_head_ack_queue) {
32 e = NULL;
33 break;
34 }
35 e = &qp->s_ack_queue[p];
36 if (!e->opcode) {
37 e = NULL;
38 break;
39 }
40 if (cmp_psn(psn, e->psn) >= 0) {
41 if (p == qp->s_tail_ack_queue &&
42 cmp_psn(psn, e->lpsn) <= 0)
43 s = false;
44 break;
45 }
46 }
47 if (prev)
48 *prev = p;
49 if (prev_ack)
50 *prev_ack = i;
51 if (scheduled)
52 *scheduled = s;
53 return e;
54}
55
56/**
57 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
58 * @dev: the device for this QP
59 * @qp: a pointer to the QP
60 * @ohdr: a pointer to the IB header being constructed
61 * @ps: the xmit packet state
62 *
63 * Return 1 if constructed; otherwise, return 0.
64 * Note that we are in the responder's side of the QP context.
65 * Note the QP s_lock must be held.
66 */
67static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
68 struct ib_other_headers *ohdr,
69 struct hfi1_pkt_state *ps)
70{
71 struct rvt_ack_entry *e;
72 u32 hwords, hdrlen;
73 u32 len = 0;
74 u32 bth0 = 0, bth2 = 0;
75 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
76 int middle = 0;
77 u32 pmtu = qp->pmtu;
78 struct hfi1_qp_priv *qpriv = qp->priv;
79 bool last_pkt;
80 u32 delta;
81 u8 next = qp->s_tail_ack_queue;
82 struct tid_rdma_request *req;
83
84 trace_hfi1_rsp_make_rc_ack(qp, 0);
85 lockdep_assert_held(&qp->s_lock);
86 /* Don't send an ACK if we aren't supposed to. */
87 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
88 goto bail;
89
90 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
91 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
92 hwords = 5;
93 else
94 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
95 hwords = 7;
96
97 switch (qp->s_ack_state) {
98 case OP(RDMA_READ_RESPONSE_LAST):
99 case OP(RDMA_READ_RESPONSE_ONLY):
100 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
101 release_rdma_sge_mr(e);
102 fallthrough;
103 case OP(ATOMIC_ACKNOWLEDGE):
104 /*
105 * We can increment the tail pointer now that the last
106 * response has been sent instead of only being
107 * constructed.
108 */
109 if (++next > rvt_size_atomic(&dev->rdi))
110 next = 0;
111 /*
112 * Only advance the s_acked_ack_queue pointer if there
113 * have been no TID RDMA requests.
114 */
115 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
116 if (e->opcode != TID_OP(WRITE_REQ) &&
117 qp->s_acked_ack_queue == qp->s_tail_ack_queue)
118 qp->s_acked_ack_queue = next;
119 qp->s_tail_ack_queue = next;
120 trace_hfi1_rsp_make_rc_ack(qp, e->psn);
121 fallthrough;
122 case OP(SEND_ONLY):
123 case OP(ACKNOWLEDGE):
124 /* Check for no next entry in the queue. */
125 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
126 if (qp->s_flags & RVT_S_ACK_PENDING)
127 goto normal;
128 goto bail;
129 }
130
131 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
132 /* Check for tid write fence */
133 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
134 hfi1_tid_rdma_ack_interlock(qp, e)) {
135 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
136 goto bail;
137 }
138 if (e->opcode == OP(RDMA_READ_REQUEST)) {
139 /*
140 * If a RDMA read response is being resent and
141 * we haven't seen the duplicate request yet,
142 * then stop sending the remaining responses the
143 * responder has seen until the requester re-sends it.
144 */
145 len = e->rdma_sge.sge_length;
146 if (len && !e->rdma_sge.mr) {
147 if (qp->s_acked_ack_queue ==
148 qp->s_tail_ack_queue)
149 qp->s_acked_ack_queue =
150 qp->r_head_ack_queue;
151 qp->s_tail_ack_queue = qp->r_head_ack_queue;
152 goto bail;
153 }
154 /* Copy SGE state in case we need to resend */
155 ps->s_txreq->mr = e->rdma_sge.mr;
156 if (ps->s_txreq->mr)
157 rvt_get_mr(ps->s_txreq->mr);
158 qp->s_ack_rdma_sge.sge = e->rdma_sge;
159 qp->s_ack_rdma_sge.num_sge = 1;
160 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
161 if (len > pmtu) {
162 len = pmtu;
163 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
164 } else {
165 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
166 e->sent = 1;
167 }
168 ohdr->u.aeth = rvt_compute_aeth(qp);
169 hwords++;
170 qp->s_ack_rdma_psn = e->psn;
171 bth2 = mask_psn(qp->s_ack_rdma_psn++);
172 } else if (e->opcode == TID_OP(WRITE_REQ)) {
173 /*
174 * If a TID RDMA WRITE RESP is being resent, we have to
175 * wait for the actual request. All requests that are to
176 * be resent will have their state set to
177 * TID_REQUEST_RESEND. When the new request arrives, the
178 * state will be changed to TID_REQUEST_RESEND_ACTIVE.
179 */
180 req = ack_to_tid_req(e);
181 if (req->state == TID_REQUEST_RESEND ||
182 req->state == TID_REQUEST_INIT_RESEND)
183 goto bail;
184 qp->s_ack_state = TID_OP(WRITE_RESP);
185 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
186 goto write_resp;
187 } else if (e->opcode == TID_OP(READ_REQ)) {
188 /*
189 * If a TID RDMA read response is being resent and
190 * we haven't seen the duplicate request yet,
191 * then stop sending the remaining responses the
192 * responder has seen until the requester re-sends it.
193 */
194 len = e->rdma_sge.sge_length;
195 if (len && !e->rdma_sge.mr) {
196 if (qp->s_acked_ack_queue ==
197 qp->s_tail_ack_queue)
198 qp->s_acked_ack_queue =
199 qp->r_head_ack_queue;
200 qp->s_tail_ack_queue = qp->r_head_ack_queue;
201 goto bail;
202 }
203 /* Copy SGE state in case we need to resend */
204 ps->s_txreq->mr = e->rdma_sge.mr;
205 if (ps->s_txreq->mr)
206 rvt_get_mr(ps->s_txreq->mr);
207 qp->s_ack_rdma_sge.sge = e->rdma_sge;
208 qp->s_ack_rdma_sge.num_sge = 1;
209 qp->s_ack_state = TID_OP(READ_RESP);
210 goto read_resp;
211 } else {
212 /* COMPARE_SWAP or FETCH_ADD */
213 ps->s_txreq->ss = NULL;
214 len = 0;
215 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
216 ohdr->u.at.aeth = rvt_compute_aeth(qp);
217 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
218 hwords += sizeof(ohdr->u.at) / sizeof(u32);
219 bth2 = mask_psn(e->psn);
220 e->sent = 1;
221 }
222 trace_hfi1_tid_write_rsp_make_rc_ack(qp);
223 bth0 = qp->s_ack_state << 24;
224 break;
225
226 case OP(RDMA_READ_RESPONSE_FIRST):
227 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
228 fallthrough;
229 case OP(RDMA_READ_RESPONSE_MIDDLE):
230 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
231 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
232 if (ps->s_txreq->mr)
233 rvt_get_mr(ps->s_txreq->mr);
234 len = qp->s_ack_rdma_sge.sge.sge_length;
235 if (len > pmtu) {
236 len = pmtu;
237 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
238 } else {
239 ohdr->u.aeth = rvt_compute_aeth(qp);
240 hwords++;
241 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
242 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
243 e->sent = 1;
244 }
245 bth0 = qp->s_ack_state << 24;
246 bth2 = mask_psn(qp->s_ack_rdma_psn++);
247 break;
248
249 case TID_OP(WRITE_RESP):
250write_resp:
251 /*
252 * 1. Check if RVT_S_ACK_PENDING is set. If yes,
253 * goto normal.
254 * 2. Attempt to allocate TID resources.
255 * 3. Remove RVT_S_RESP_PENDING flags from s_flags
256 * 4. If resources not available:
257 * 4.1 Set RVT_S_WAIT_TID_SPACE
258 * 4.2 Queue QP on RCD TID queue
259 * 4.3 Put QP on iowait list.
260 * 4.4 Build IB RNR NAK with appropriate timeout value
261 * 4.5 Return indication progress made.
262 * 5. If resources are available:
263 * 5.1 Program HW flow CSRs
264 * 5.2 Build TID RDMA WRITE RESP packet
265 * 5.3 If more resources needed, do 2.1 - 2.3.
266 * 5.4 Wake up next QP on RCD TID queue.
267 * 5.5 Return indication progress made.
268 */
269
270 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
271 req = ack_to_tid_req(e);
272
273 /*
274 * Send scheduled RNR NAK's. RNR NAK's need to be sent at
275 * segment boundaries, not at request boundaries. Don't change
276 * s_ack_state because we are still in the middle of a request
277 */
278 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
279 qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
280 req->cur_seg == req->alloc_seg) {
281 qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
282 goto normal_no_state;
283 }
284
285 bth2 = mask_psn(qp->s_ack_rdma_psn);
286 hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
287 bth2, &len,
288 &ps->s_txreq->ss);
289 if (!hdrlen)
290 return 0;
291
292 hwords += hdrlen;
293 bth0 = qp->s_ack_state << 24;
294 qp->s_ack_rdma_psn++;
295 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
296 e->lpsn, req);
297 if (req->cur_seg != req->total_segs)
298 break;
299
300 e->sent = 1;
301 /* Do not free e->rdma_sge until all data are received */
302 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
303 break;
304
305 case TID_OP(READ_RESP):
306read_resp:
307 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
308 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
309 delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
310 &bth1, &bth2, &len,
311 &last_pkt);
312 if (delta == 0)
313 goto error_qp;
314 hwords += delta;
315 if (last_pkt) {
316 e->sent = 1;
317 /*
318 * Increment qp->s_tail_ack_queue through s_ack_state
319 * transition.
320 */
321 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
322 }
323 break;
324 case TID_OP(READ_REQ):
325 goto bail;
326
327 default:
328normal:
329 /*
330 * Send a regular ACK.
331 * Set the s_ack_state so we wait until after sending
332 * the ACK before setting s_ack_state to ACKNOWLEDGE
333 * (see above).
334 */
335 qp->s_ack_state = OP(SEND_ONLY);
336normal_no_state:
337 if (qp->s_nak_state)
338 ohdr->u.aeth =
339 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
340 (qp->s_nak_state <<
341 IB_AETH_CREDIT_SHIFT));
342 else
343 ohdr->u.aeth = rvt_compute_aeth(qp);
344 hwords++;
345 len = 0;
346 bth0 = OP(ACKNOWLEDGE) << 24;
347 bth2 = mask_psn(qp->s_ack_psn);
348 qp->s_flags &= ~RVT_S_ACK_PENDING;
349 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
350 ps->s_txreq->ss = NULL;
351 }
352 qp->s_rdma_ack_cnt++;
353 ps->s_txreq->sde = qpriv->s_sde;
354 ps->s_txreq->s_cur_size = len;
355 ps->s_txreq->hdr_dwords = hwords;
356 hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
357 return 1;
358error_qp:
359 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
360 spin_lock_irqsave(&qp->r_lock, ps->flags);
361 spin_lock(&qp->s_lock);
362 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
363 spin_unlock(&qp->s_lock);
364 spin_unlock_irqrestore(&qp->r_lock, ps->flags);
365 spin_lock_irqsave(&qp->s_lock, ps->flags);
366bail:
367 qp->s_ack_state = OP(ACKNOWLEDGE);
368 /*
369 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
370 * RVT_S_RESP_PENDING
371 */
372 smp_wmb();
373 qp->s_flags &= ~(RVT_S_RESP_PENDING
374 | RVT_S_ACK_PENDING
375 | HFI1_S_AHG_VALID);
376 return 0;
377}
378
379/**
380 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
381 * @qp: a pointer to the QP
382 * @ps: the current packet state
383 *
384 * Assumes s_lock is held.
385 *
386 * Return 1 if constructed; otherwise, return 0.
387 */
388int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
389{
390 struct hfi1_qp_priv *priv = qp->priv;
391 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
392 struct ib_other_headers *ohdr;
393 struct rvt_sge_state *ss = NULL;
394 struct rvt_swqe *wqe;
395 struct hfi1_swqe_priv *wpriv;
396 struct tid_rdma_request *req = NULL;
397 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
398 u32 hwords = 5;
399 u32 len = 0;
400 u32 bth0 = 0, bth2 = 0;
401 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
402 u32 pmtu = qp->pmtu;
403 char newreq;
404 int middle = 0;
405 int delta;
406 struct tid_rdma_flow *flow = NULL;
407 struct tid_rdma_params *remote;
408
409 trace_hfi1_sender_make_rc_req(qp);
410 lockdep_assert_held(&qp->s_lock);
411 ps->s_txreq = get_txreq(ps->dev, qp);
412 if (!ps->s_txreq)
413 goto bail_no_tx;
414
415 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
416 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
417 hwords = 5;
418 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
419 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
420 else
421 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
422 } else {
423 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
424 hwords = 7;
425 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
426 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
427 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
428 else
429 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
430 }
431
432 /* Sending responses has higher priority over sending requests. */
433 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
434 make_rc_ack(dev, qp, ohdr, ps))
435 return 1;
436
437 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
438 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
439 goto bail;
440 /* We are in the error state, flush the work request. */
441 if (qp->s_last == READ_ONCE(qp->s_head))
442 goto bail;
443 /* If DMAs are in progress, we can't flush immediately. */
444 if (iowait_sdma_pending(&priv->s_iowait)) {
445 qp->s_flags |= RVT_S_WAIT_DMA;
446 goto bail;
447 }
448 clear_ahg(qp);
449 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
451 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
452 /* will get called again */
453 goto done_free_tx;
454 }
455
456 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
457 goto bail;
458
459 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
460 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
461 qp->s_flags |= RVT_S_WAIT_PSN;
462 goto bail;
463 }
464 qp->s_sending_psn = qp->s_psn;
465 qp->s_sending_hpsn = qp->s_psn - 1;
466 }
467
468 /* Send a request. */
469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
470check_s_state:
471 switch (qp->s_state) {
472 default:
473 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
474 goto bail;
475 /*
476 * Resend an old request or start a new one.
477 *
478 * We keep track of the current SWQE so that
479 * we don't reset the "furthest progress" state
480 * if we need to back up.
481 */
482 newreq = 0;
483 if (qp->s_cur == qp->s_tail) {
484 /* Check if send work queue is empty. */
485 if (qp->s_tail == READ_ONCE(qp->s_head)) {
486 clear_ahg(qp);
487 goto bail;
488 }
489 /*
490 * If a fence is requested, wait for previous
491 * RDMA read and atomic operations to finish.
492 * However, there is no need to guard against
493 * TID RDMA READ after TID RDMA READ.
494 */
495 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
496 qp->s_num_rd_atomic &&
497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
498 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
499 qp->s_flags |= RVT_S_WAIT_FENCE;
500 goto bail;
501 }
502 /*
503 * Local operations are processed immediately
504 * after all prior requests have completed
505 */
506 if (wqe->wr.opcode == IB_WR_REG_MR ||
507 wqe->wr.opcode == IB_WR_LOCAL_INV) {
508 int local_ops = 0;
509 int err = 0;
510
511 if (qp->s_last != qp->s_cur)
512 goto bail;
513 if (++qp->s_cur == qp->s_size)
514 qp->s_cur = 0;
515 if (++qp->s_tail == qp->s_size)
516 qp->s_tail = 0;
517 if (!(wqe->wr.send_flags &
518 RVT_SEND_COMPLETION_ONLY)) {
519 err = rvt_invalidate_rkey(
520 qp,
521 wqe->wr.ex.invalidate_rkey);
522 local_ops = 1;
523 }
524 rvt_send_complete(qp, wqe,
525 err ? IB_WC_LOC_PROT_ERR
526 : IB_WC_SUCCESS);
527 if (local_ops)
528 atomic_dec(&qp->local_ops_pending);
529 goto done_free_tx;
530 }
531
532 newreq = 1;
533 qp->s_psn = wqe->psn;
534 }
535 /*
536 * Note that we have to be careful not to modify the
537 * original work request since we may need to resend
538 * it.
539 */
540 len = wqe->length;
541 ss = &qp->s_sge;
542 bth2 = mask_psn(qp->s_psn);
543
544 /*
545 * Interlock between various IB requests and TID RDMA
546 * if necessary.
547 */
548 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
549 hfi1_tid_rdma_wqe_interlock(qp, wqe))
550 goto bail;
551
552 switch (wqe->wr.opcode) {
553 case IB_WR_SEND:
554 case IB_WR_SEND_WITH_IMM:
555 case IB_WR_SEND_WITH_INV:
556 /* If no credit, return. */
557 if (!rvt_rc_credit_avail(qp, wqe))
558 goto bail;
559 if (len > pmtu) {
560 qp->s_state = OP(SEND_FIRST);
561 len = pmtu;
562 break;
563 }
564 if (wqe->wr.opcode == IB_WR_SEND) {
565 qp->s_state = OP(SEND_ONLY);
566 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
567 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
568 /* Immediate data comes after the BTH */
569 ohdr->u.imm_data = wqe->wr.ex.imm_data;
570 hwords += 1;
571 } else {
572 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
573 /* Invalidate rkey comes after the BTH */
574 ohdr->u.ieth = cpu_to_be32(
575 wqe->wr.ex.invalidate_rkey);
576 hwords += 1;
577 }
578 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
579 bth0 |= IB_BTH_SOLICITED;
580 bth2 |= IB_BTH_REQ_ACK;
581 if (++qp->s_cur == qp->s_size)
582 qp->s_cur = 0;
583 break;
584
585 case IB_WR_RDMA_WRITE:
586 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
587 qp->s_lsn++;
588 goto no_flow_control;
589 case IB_WR_RDMA_WRITE_WITH_IMM:
590 /* If no credit, return. */
591 if (!rvt_rc_credit_avail(qp, wqe))
592 goto bail;
593no_flow_control:
594 put_ib_reth_vaddr(
595 wqe->rdma_wr.remote_addr,
596 &ohdr->u.rc.reth);
597 ohdr->u.rc.reth.rkey =
598 cpu_to_be32(wqe->rdma_wr.rkey);
599 ohdr->u.rc.reth.length = cpu_to_be32(len);
600 hwords += sizeof(struct ib_reth) / sizeof(u32);
601 if (len > pmtu) {
602 qp->s_state = OP(RDMA_WRITE_FIRST);
603 len = pmtu;
604 break;
605 }
606 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
607 qp->s_state = OP(RDMA_WRITE_ONLY);
608 } else {
609 qp->s_state =
610 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
611 /* Immediate data comes after RETH */
612 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
613 hwords += 1;
614 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
615 bth0 |= IB_BTH_SOLICITED;
616 }
617 bth2 |= IB_BTH_REQ_ACK;
618 if (++qp->s_cur == qp->s_size)
619 qp->s_cur = 0;
620 break;
621
622 case IB_WR_TID_RDMA_WRITE:
623 if (newreq) {
624 /*
625 * Limit the number of TID RDMA WRITE requests.
626 */
627 if (atomic_read(&priv->n_tid_requests) >=
628 HFI1_TID_RDMA_WRITE_CNT)
629 goto bail;
630
631 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
632 qp->s_lsn++;
633 }
634
635 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
636 &bth1, &bth2,
637 &len);
638 ss = NULL;
639 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
640 priv->s_tid_cur = qp->s_cur;
641 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
642 priv->s_tid_tail = qp->s_cur;
643 priv->s_state = TID_OP(WRITE_RESP);
644 }
645 } else if (priv->s_tid_cur == priv->s_tid_head) {
646 struct rvt_swqe *__w;
647 struct tid_rdma_request *__r;
648
649 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
650 __r = wqe_to_tid_req(__w);
651
652 /*
653 * The s_tid_cur pointer is advanced to s_cur if
654 * any of the following conditions about the WQE
655 * to which s_ti_cur currently points to are
656 * satisfied:
657 * 1. The request is not a TID RDMA WRITE
658 * request,
659 * 2. The request is in the INACTIVE or
660 * COMPLETE states (TID RDMA READ requests
661 * stay at INACTIVE and TID RDMA WRITE
662 * transition to COMPLETE when done),
663 * 3. The request is in the ACTIVE or SYNC
664 * state and the number of completed
665 * segments is equal to the total segment
666 * count.
667 * (If ACTIVE, the request is waiting for
668 * ACKs. If SYNC, the request has not
669 * received any responses because it's
670 * waiting on a sync point.)
671 */
672 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
673 __r->state == TID_REQUEST_INACTIVE ||
674 __r->state == TID_REQUEST_COMPLETE ||
675 ((__r->state == TID_REQUEST_ACTIVE ||
676 __r->state == TID_REQUEST_SYNC) &&
677 __r->comp_seg == __r->total_segs)) {
678 if (priv->s_tid_tail ==
679 priv->s_tid_cur &&
680 priv->s_state ==
681 TID_OP(WRITE_DATA_LAST)) {
682 priv->s_tid_tail = qp->s_cur;
683 priv->s_state =
684 TID_OP(WRITE_RESP);
685 }
686 priv->s_tid_cur = qp->s_cur;
687 }
688 /*
689 * A corner case: when the last TID RDMA WRITE
690 * request was completed, s_tid_head,
691 * s_tid_cur, and s_tid_tail all point to the
692 * same location. Other requests are posted and
693 * s_cur wraps around to the same location,
694 * where a new TID RDMA WRITE is posted. In
695 * this case, none of the indices need to be
696 * updated. However, the priv->s_state should.
697 */
698 if (priv->s_tid_tail == qp->s_cur &&
699 priv->s_state == TID_OP(WRITE_DATA_LAST))
700 priv->s_state = TID_OP(WRITE_RESP);
701 }
702 req = wqe_to_tid_req(wqe);
703 if (newreq) {
704 priv->s_tid_head = qp->s_cur;
705 priv->pending_tid_w_resp += req->total_segs;
706 atomic_inc(&priv->n_tid_requests);
707 atomic_dec(&priv->n_requests);
708 } else {
709 req->state = TID_REQUEST_RESEND;
710 req->comp_seg = delta_psn(bth2, wqe->psn);
711 /*
712 * Pull back any segments since we are going
713 * to re-receive them.
714 */
715 req->setup_head = req->clear_tail;
716 priv->pending_tid_w_resp +=
717 delta_psn(wqe->lpsn, bth2) + 1;
718 }
719
720 trace_hfi1_tid_write_sender_make_req(qp, newreq);
721 trace_hfi1_tid_req_make_req_write(qp, newreq,
722 wqe->wr.opcode,
723 wqe->psn, wqe->lpsn,
724 req);
725 if (++qp->s_cur == qp->s_size)
726 qp->s_cur = 0;
727 break;
728
729 case IB_WR_RDMA_READ:
730 /*
731 * Don't allow more operations to be started
732 * than the QP limits allow.
733 */
734 if (qp->s_num_rd_atomic >=
735 qp->s_max_rd_atomic) {
736 qp->s_flags |= RVT_S_WAIT_RDMAR;
737 goto bail;
738 }
739 qp->s_num_rd_atomic++;
740 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
741 qp->s_lsn++;
742 put_ib_reth_vaddr(
743 wqe->rdma_wr.remote_addr,
744 &ohdr->u.rc.reth);
745 ohdr->u.rc.reth.rkey =
746 cpu_to_be32(wqe->rdma_wr.rkey);
747 ohdr->u.rc.reth.length = cpu_to_be32(len);
748 qp->s_state = OP(RDMA_READ_REQUEST);
749 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
750 ss = NULL;
751 len = 0;
752 bth2 |= IB_BTH_REQ_ACK;
753 if (++qp->s_cur == qp->s_size)
754 qp->s_cur = 0;
755 break;
756
757 case IB_WR_TID_RDMA_READ:
758 trace_hfi1_tid_read_sender_make_req(qp, newreq);
759 wpriv = wqe->priv;
760 req = wqe_to_tid_req(wqe);
761 trace_hfi1_tid_req_make_req_read(qp, newreq,
762 wqe->wr.opcode,
763 wqe->psn, wqe->lpsn,
764 req);
765 delta = cmp_psn(qp->s_psn, wqe->psn);
766
767 /*
768 * Don't allow more operations to be started
769 * than the QP limits allow. We could get here under
770 * three conditions; (1) It's a new request; (2) We are
771 * sending the second or later segment of a request,
772 * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
773 * when the last segment of a previous request is
774 * received just before this; (3) We are re-sending a
775 * request.
776 */
777 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
778 qp->s_flags |= RVT_S_WAIT_RDMAR;
779 goto bail;
780 }
781 if (newreq) {
782 struct tid_rdma_flow *flow =
783 &req->flows[req->setup_head];
784
785 /*
786 * Set up s_sge as it is needed for TID
787 * allocation. However, if the pages have been
788 * walked and mapped, skip it. An earlier try
789 * has failed to allocate the TID entries.
790 */
791 if (!flow->npagesets) {
792 qp->s_sge.sge = wqe->sg_list[0];
793 qp->s_sge.sg_list = wqe->sg_list + 1;
794 qp->s_sge.num_sge = wqe->wr.num_sge;
795 qp->s_sge.total_len = wqe->length;
796 qp->s_len = wqe->length;
797 req->isge = 0;
798 req->clear_tail = req->setup_head;
799 req->flow_idx = req->setup_head;
800 req->state = TID_REQUEST_ACTIVE;
801 }
802 } else if (delta == 0) {
803 /* Re-send a request */
804 req->cur_seg = 0;
805 req->comp_seg = 0;
806 req->ack_pending = 0;
807 req->flow_idx = req->clear_tail;
808 req->state = TID_REQUEST_RESEND;
809 }
810 req->s_next_psn = qp->s_psn;
811 /* Read one segment at a time */
812 len = min_t(u32, req->seg_len,
813 wqe->length - req->seg_len * req->cur_seg);
814 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
815 &bth1, &bth2,
816 &len);
817 if (delta <= 0) {
818 /* Wait for TID space */
819 goto bail;
820 }
821 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
822 qp->s_lsn++;
823 hwords += delta;
824 ss = &wpriv->ss;
825 /* Check if this is the last segment */
826 if (req->cur_seg >= req->total_segs &&
827 ++qp->s_cur == qp->s_size)
828 qp->s_cur = 0;
829 break;
830
831 case IB_WR_ATOMIC_CMP_AND_SWP:
832 case IB_WR_ATOMIC_FETCH_AND_ADD:
833 /*
834 * Don't allow more operations to be started
835 * than the QP limits allow.
836 */
837 if (qp->s_num_rd_atomic >=
838 qp->s_max_rd_atomic) {
839 qp->s_flags |= RVT_S_WAIT_RDMAR;
840 goto bail;
841 }
842 qp->s_num_rd_atomic++;
843 fallthrough;
844 case IB_WR_OPFN:
845 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
846 qp->s_lsn++;
847 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
848 wqe->wr.opcode == IB_WR_OPFN) {
849 qp->s_state = OP(COMPARE_SWAP);
850 put_ib_ateth_swap(wqe->atomic_wr.swap,
851 &ohdr->u.atomic_eth);
852 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
853 &ohdr->u.atomic_eth);
854 } else {
855 qp->s_state = OP(FETCH_ADD);
856 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
857 &ohdr->u.atomic_eth);
858 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
859 }
860 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
861 &ohdr->u.atomic_eth);
862 ohdr->u.atomic_eth.rkey = cpu_to_be32(
863 wqe->atomic_wr.rkey);
864 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
865 ss = NULL;
866 len = 0;
867 bth2 |= IB_BTH_REQ_ACK;
868 if (++qp->s_cur == qp->s_size)
869 qp->s_cur = 0;
870 break;
871
872 default:
873 goto bail;
874 }
875 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
876 qp->s_sge.sge = wqe->sg_list[0];
877 qp->s_sge.sg_list = wqe->sg_list + 1;
878 qp->s_sge.num_sge = wqe->wr.num_sge;
879 qp->s_sge.total_len = wqe->length;
880 qp->s_len = wqe->length;
881 }
882 if (newreq) {
883 qp->s_tail++;
884 if (qp->s_tail >= qp->s_size)
885 qp->s_tail = 0;
886 }
887 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
888 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
889 qp->s_psn = wqe->lpsn + 1;
890 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
891 qp->s_psn = req->s_next_psn;
892 else
893 qp->s_psn++;
894 break;
895
896 case OP(RDMA_READ_RESPONSE_FIRST):
897 /*
898 * qp->s_state is normally set to the opcode of the
899 * last packet constructed for new requests and therefore
900 * is never set to RDMA read response.
901 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
902 * thread to indicate a SEND needs to be restarted from an
903 * earlier PSN without interfering with the sending thread.
904 * See restart_rc().
905 */
906 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
907 fallthrough;
908 case OP(SEND_FIRST):
909 qp->s_state = OP(SEND_MIDDLE);
910 fallthrough;
911 case OP(SEND_MIDDLE):
912 bth2 = mask_psn(qp->s_psn++);
913 ss = &qp->s_sge;
914 len = qp->s_len;
915 if (len > pmtu) {
916 len = pmtu;
917 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
918 break;
919 }
920 if (wqe->wr.opcode == IB_WR_SEND) {
921 qp->s_state = OP(SEND_LAST);
922 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
923 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
924 /* Immediate data comes after the BTH */
925 ohdr->u.imm_data = wqe->wr.ex.imm_data;
926 hwords += 1;
927 } else {
928 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
929 /* invalidate data comes after the BTH */
930 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
931 hwords += 1;
932 }
933 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
934 bth0 |= IB_BTH_SOLICITED;
935 bth2 |= IB_BTH_REQ_ACK;
936 qp->s_cur++;
937 if (qp->s_cur >= qp->s_size)
938 qp->s_cur = 0;
939 break;
940
941 case OP(RDMA_READ_RESPONSE_LAST):
942 /*
943 * qp->s_state is normally set to the opcode of the
944 * last packet constructed for new requests and therefore
945 * is never set to RDMA read response.
946 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
947 * thread to indicate a RDMA write needs to be restarted from
948 * an earlier PSN without interfering with the sending thread.
949 * See restart_rc().
950 */
951 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
952 fallthrough;
953 case OP(RDMA_WRITE_FIRST):
954 qp->s_state = OP(RDMA_WRITE_MIDDLE);
955 fallthrough;
956 case OP(RDMA_WRITE_MIDDLE):
957 bth2 = mask_psn(qp->s_psn++);
958 ss = &qp->s_sge;
959 len = qp->s_len;
960 if (len > pmtu) {
961 len = pmtu;
962 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
963 break;
964 }
965 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
966 qp->s_state = OP(RDMA_WRITE_LAST);
967 } else {
968 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
969 /* Immediate data comes after the BTH */
970 ohdr->u.imm_data = wqe->wr.ex.imm_data;
971 hwords += 1;
972 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
973 bth0 |= IB_BTH_SOLICITED;
974 }
975 bth2 |= IB_BTH_REQ_ACK;
976 qp->s_cur++;
977 if (qp->s_cur >= qp->s_size)
978 qp->s_cur = 0;
979 break;
980
981 case OP(RDMA_READ_RESPONSE_MIDDLE):
982 /*
983 * qp->s_state is normally set to the opcode of the
984 * last packet constructed for new requests and therefore
985 * is never set to RDMA read response.
986 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
987 * thread to indicate a RDMA read needs to be restarted from
988 * an earlier PSN without interfering with the sending thread.
989 * See restart_rc().
990 */
991 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
992 put_ib_reth_vaddr(
993 wqe->rdma_wr.remote_addr + len,
994 &ohdr->u.rc.reth);
995 ohdr->u.rc.reth.rkey =
996 cpu_to_be32(wqe->rdma_wr.rkey);
997 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
998 qp->s_state = OP(RDMA_READ_REQUEST);
999 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
1000 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
1001 qp->s_psn = wqe->lpsn + 1;
1002 ss = NULL;
1003 len = 0;
1004 qp->s_cur++;
1005 if (qp->s_cur == qp->s_size)
1006 qp->s_cur = 0;
1007 break;
1008
1009 case TID_OP(WRITE_RESP):
1010 /*
1011 * This value for s_state is used for restarting a TID RDMA
1012 * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
1013 * for more).
1014 */
1015 req = wqe_to_tid_req(wqe);
1016 req->state = TID_REQUEST_RESEND;
1017 rcu_read_lock();
1018 remote = rcu_dereference(priv->tid_rdma.remote);
1019 req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
1020 len = wqe->length - (req->comp_seg * remote->max_len);
1021 rcu_read_unlock();
1022
1023 bth2 = mask_psn(qp->s_psn);
1024 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
1025 &bth2, &len);
1026 qp->s_psn = wqe->lpsn + 1;
1027 ss = NULL;
1028 qp->s_state = TID_OP(WRITE_REQ);
1029 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
1030 priv->s_tid_cur = qp->s_cur;
1031 if (++qp->s_cur == qp->s_size)
1032 qp->s_cur = 0;
1033 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
1034 wqe->psn, wqe->lpsn, req);
1035 break;
1036
1037 case TID_OP(READ_RESP):
1038 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
1039 goto bail;
1040 /* This is used to restart a TID read request */
1041 req = wqe_to_tid_req(wqe);
1042 wpriv = wqe->priv;
1043 /*
1044 * Back down. The field qp->s_psn has been set to the psn with
1045 * which the request should be restart. It's OK to use division
1046 * as this is on the retry path.
1047 */
1048 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
1049
1050 /*
1051 * The following function need to be redefined to return the
1052 * status to make sure that we find the flow. At the same
1053 * time, we can use the req->state change to check if the
1054 * call succeeds or not.
1055 */
1056 req->state = TID_REQUEST_RESEND;
1057 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
1058 if (req->state != TID_REQUEST_ACTIVE) {
1059 /*
1060 * Failed to find the flow. Release all allocated tid
1061 * resources.
1062 */
1063 hfi1_kern_exp_rcv_clear_all(req);
1064 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1065
1066 hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
1067 goto bail;
1068 }
1069 req->state = TID_REQUEST_RESEND;
1070 len = min_t(u32, req->seg_len,
1071 wqe->length - req->seg_len * req->cur_seg);
1072 flow = &req->flows[req->flow_idx];
1073 len -= flow->sent;
1074 req->s_next_psn = flow->flow_state.ib_lpsn + 1;
1075 delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
1076 &bth2, &len);
1077 if (delta <= 0) {
1078 /* Wait for TID space */
1079 goto bail;
1080 }
1081 hwords += delta;
1082 ss = &wpriv->ss;
1083 /* Check if this is the last segment */
1084 if (req->cur_seg >= req->total_segs &&
1085 ++qp->s_cur == qp->s_size)
1086 qp->s_cur = 0;
1087 qp->s_psn = req->s_next_psn;
1088 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1089 wqe->psn, wqe->lpsn, req);
1090 break;
1091 case TID_OP(READ_REQ):
1092 req = wqe_to_tid_req(wqe);
1093 delta = cmp_psn(qp->s_psn, wqe->psn);
1094 /*
1095 * If the current WR is not TID RDMA READ, or this is the start
1096 * of a new request, we need to change the qp->s_state so that
1097 * the request can be set up properly.
1098 */
1099 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
1100 qp->s_cur == qp->s_tail) {
1101 qp->s_state = OP(RDMA_READ_REQUEST);
1102 if (delta == 0 || qp->s_cur == qp->s_tail)
1103 goto check_s_state;
1104 else
1105 goto bail;
1106 }
1107
1108 /* Rate limiting */
1109 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
1110 qp->s_flags |= RVT_S_WAIT_RDMAR;
1111 goto bail;
1112 }
1113
1114 wpriv = wqe->priv;
1115 /* Read one segment at a time */
1116 len = min_t(u32, req->seg_len,
1117 wqe->length - req->seg_len * req->cur_seg);
1118 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
1119 &bth2, &len);
1120 if (delta <= 0) {
1121 /* Wait for TID space */
1122 goto bail;
1123 }
1124 hwords += delta;
1125 ss = &wpriv->ss;
1126 /* Check if this is the last segment */
1127 if (req->cur_seg >= req->total_segs &&
1128 ++qp->s_cur == qp->s_size)
1129 qp->s_cur = 0;
1130 qp->s_psn = req->s_next_psn;
1131 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1132 wqe->psn, wqe->lpsn, req);
1133 break;
1134 }
1135 qp->s_sending_hpsn = bth2;
1136 delta = delta_psn(bth2, wqe->psn);
1137 if (delta && delta % HFI1_PSN_CREDIT == 0 &&
1138 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1139 bth2 |= IB_BTH_REQ_ACK;
1140 if (qp->s_flags & RVT_S_SEND_ONE) {
1141 qp->s_flags &= ~RVT_S_SEND_ONE;
1142 qp->s_flags |= RVT_S_WAIT_ACK;
1143 bth2 |= IB_BTH_REQ_ACK;
1144 }
1145 qp->s_len -= len;
1146 ps->s_txreq->hdr_dwords = hwords;
1147 ps->s_txreq->sde = priv->s_sde;
1148 ps->s_txreq->ss = ss;
1149 ps->s_txreq->s_cur_size = len;
1150 hfi1_make_ruc_header(
1151 qp,
1152 ohdr,
1153 bth0 | (qp->s_state << 24),
1154 bth1,
1155 bth2,
1156 middle,
1157 ps);
1158 return 1;
1159
1160done_free_tx:
1161 hfi1_put_txreq(ps->s_txreq);
1162 ps->s_txreq = NULL;
1163 return 1;
1164
1165bail:
1166 hfi1_put_txreq(ps->s_txreq);
1167
1168bail_no_tx:
1169 ps->s_txreq = NULL;
1170 qp->s_flags &= ~RVT_S_BUSY;
1171 /*
1172 * If we didn't get a txreq, the QP will be woken up later to try
1173 * again. Set the flags to indicate which work item to wake
1174 * up.
1175 */
1176 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
1177 return 0;
1178}
1179
1180static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
1181 struct ib_other_headers *ohdr,
1182 u32 bth0, u32 bth1)
1183{
1184 if (qp->r_nak_state)
1185 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
1186 (qp->r_nak_state <<
1187 IB_AETH_CREDIT_SHIFT));
1188 else
1189 ohdr->u.aeth = rvt_compute_aeth(qp);
1190
1191 ohdr->bth[0] = cpu_to_be32(bth0);
1192 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
1193 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
1194}
1195
1196static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1197{
1198 struct rvt_qp *qp = packet->qp;
1199 struct hfi1_ibport *ibp;
1200 unsigned long flags;
1201
1202 spin_lock_irqsave(&qp->s_lock, flags);
1203 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1204 goto unlock;
1205 ibp = rcd_to_iport(packet->rcd);
1206 this_cpu_inc(*ibp->rvp.rc_qacks);
1207 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
1208 qp->s_nak_state = qp->r_nak_state;
1209 qp->s_ack_psn = qp->r_ack_psn;
1210 if (is_fecn)
1211 qp->s_flags |= RVT_S_ECN;
1212
1213 /* Schedule the send tasklet. */
1214 hfi1_schedule_send(qp);
1215unlock:
1216 spin_unlock_irqrestore(&qp->s_lock, flags);
1217}
1218
1219static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
1220 struct hfi1_opa_header *opa_hdr,
1221 u8 sc5, bool is_fecn,
1222 u64 *pbc_flags, u32 *hwords,
1223 u32 *nwords)
1224{
1225 struct rvt_qp *qp = packet->qp;
1226 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1227 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1228 struct ib_header *hdr = &opa_hdr->ibh;
1229 struct ib_other_headers *ohdr;
1230 u16 lrh0 = HFI1_LRH_BTH;
1231 u16 pkey;
1232 u32 bth0, bth1;
1233
1234 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
1235 ohdr = &hdr->u.oth;
1236 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
1237 *hwords = 6;
1238
1239 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
1240 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1241 rdma_ah_read_grh(&qp->remote_ah_attr),
1242 *hwords - 2, SIZE_OF_CRC);
1243 ohdr = &hdr->u.l.oth;
1244 lrh0 = HFI1_LRH_GRH;
1245 }
1246 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1247 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
1248
1249 /* read pkey_index w/o lock (its atomic) */
1250 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1251
1252 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
1253 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
1254 IB_SL_SHIFT;
1255
1256 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
1257 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
1258 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
1259
1260 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1261 if (qp->s_mig_state == IB_MIG_MIGRATED)
1262 bth0 |= IB_BTH_MIG_REQ;
1263 bth1 = (!!is_fecn) << IB_BECN_SHIFT;
1264 /*
1265 * Inline ACKs go out without the use of the Verbs send engine, so
1266 * we need to set the STL Verbs Extended bit here
1267 */
1268 bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
1269 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1270}
1271
1272static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
1273 struct hfi1_opa_header *opa_hdr,
1274 u8 sc5, bool is_fecn,
1275 u64 *pbc_flags, u32 *hwords,
1276 u32 *nwords)
1277{
1278 struct rvt_qp *qp = packet->qp;
1279 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1280 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1281 struct hfi1_16b_header *hdr = &opa_hdr->opah;
1282 struct ib_other_headers *ohdr;
1283 u32 bth0, bth1 = 0;
1284 u16 len, pkey;
1285 bool becn = is_fecn;
1286 u8 l4 = OPA_16B_L4_IB_LOCAL;
1287 u8 extra_bytes;
1288
1289 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
1290 ohdr = &hdr->u.oth;
1291 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
1292 *hwords = 8;
1293 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
1294 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
1295
1296 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
1297 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
1298 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1299 rdma_ah_read_grh(&qp->remote_ah_attr),
1300 *hwords - 4, *nwords);
1301 ohdr = &hdr->u.l.oth;
1302 l4 = OPA_16B_L4_IB_GLOBAL;
1303 }
1304 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1305
1306 /* read pkey_index w/o lock (its atomic) */
1307 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1308
1309 /* Convert dwords to flits */
1310 len = (*hwords + *nwords) >> 1;
1311
1312 hfi1_make_16b_hdr(hdr, ppd->lid |
1313 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
1314 ((1 << ppd->lmc) - 1)),
1315 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
1316 16B), len, pkey, becn, 0, l4, sc5);
1317
1318 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1319 bth0 |= extra_bytes << 20;
1320 if (qp->s_mig_state == IB_MIG_MIGRATED)
1321 bth1 = OPA_BTH_MIG_REQ;
1322 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1323}
1324
1325typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
1326 struct hfi1_opa_header *opa_hdr,
1327 u8 sc5, bool is_fecn,
1328 u64 *pbc_flags, u32 *hwords,
1329 u32 *nwords);
1330
1331/* We support only two types - 9B and 16B for now */
1332static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
1333 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
1334 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
1335};
1336
1337/*
1338 * hfi1_send_rc_ack - Construct an ACK packet and send it
1339 *
1340 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
1341 * Note that RDMA reads and atomics are handled in the
1342 * send side QP state and send engine.
1343 */
1344void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1345{
1346 struct hfi1_ctxtdata *rcd = packet->rcd;
1347 struct rvt_qp *qp = packet->qp;
1348 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1349 struct hfi1_qp_priv *priv = qp->priv;
1350 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1351 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
1352 u64 pbc, pbc_flags = 0;
1353 u32 hwords = 0;
1354 u32 nwords = 0;
1355 u32 plen;
1356 struct pio_buf *pbuf;
1357 struct hfi1_opa_header opa_hdr;
1358
1359 /* clear the defer count */
1360 qp->r_adefered = 0;
1361
1362 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
1363 if (qp->s_flags & RVT_S_RESP_PENDING) {
1364 hfi1_queue_rc_ack(packet, is_fecn);
1365 return;
1366 }
1367
1368 /* Ensure s_rdma_ack_cnt changes are committed */
1369 if (qp->s_rdma_ack_cnt) {
1370 hfi1_queue_rc_ack(packet, is_fecn);
1371 return;
1372 }
1373
1374 /* Don't try to send ACKs if the link isn't ACTIVE */
1375 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
1376 return;
1377
1378 /* Make the appropriate header */
1379 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
1380 &pbc_flags, &hwords, &nwords);
1381
1382 plen = 2 /* PBC */ + hwords + nwords;
1383 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
1384 sc_to_vlt(ppd->dd, sc5), plen);
1385 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
1386 if (IS_ERR_OR_NULL(pbuf)) {
1387 /*
1388 * We have no room to send at the moment. Pass
1389 * responsibility for sending the ACK to the send engine
1390 * so that when enough buffer space becomes available,
1391 * the ACK is sent ahead of other outgoing packets.
1392 */
1393 hfi1_queue_rc_ack(packet, is_fecn);
1394 return;
1395 }
1396 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1397 &opa_hdr, ib_is_sc5(sc5));
1398
1399 /* write the pbc and data */
1400 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
1401 (priv->hdr_type == HFI1_PKT_TYPE_9B ?
1402 (void *)&opa_hdr.ibh :
1403 (void *)&opa_hdr.opah), hwords);
1404 return;
1405}
1406
1407/**
1408 * update_num_rd_atomic - update the qp->s_num_rd_atomic
1409 * @qp: the QP
1410 * @psn: the packet sequence number to restart at
1411 * @wqe: the wqe
1412 *
1413 * This is called from reset_psn() to update qp->s_num_rd_atomic
1414 * for the current wqe.
1415 * Called at interrupt level with the QP s_lock held.
1416 */
1417static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
1418 struct rvt_swqe *wqe)
1419{
1420 u32 opcode = wqe->wr.opcode;
1421
1422 if (opcode == IB_WR_RDMA_READ ||
1423 opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1424 opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1425 qp->s_num_rd_atomic++;
1426 } else if (opcode == IB_WR_TID_RDMA_READ) {
1427 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1428 struct hfi1_qp_priv *priv = qp->priv;
1429
1430 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1431 u32 cur_seg;
1432
1433 cur_seg = (psn - wqe->psn) / priv->pkts_ps;
1434 req->ack_pending = cur_seg - req->comp_seg;
1435 priv->pending_tid_r_segs += req->ack_pending;
1436 qp->s_num_rd_atomic += req->ack_pending;
1437 trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
1438 wqe->wr.opcode,
1439 wqe->psn,
1440 wqe->lpsn,
1441 req);
1442 } else {
1443 priv->pending_tid_r_segs += req->total_segs;
1444 qp->s_num_rd_atomic += req->total_segs;
1445 }
1446 }
1447}
1448
1449/**
1450 * reset_psn - reset the QP state to send starting from PSN
1451 * @qp: the QP
1452 * @psn: the packet sequence number to restart at
1453 *
1454 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
1455 * for the given QP.
1456 * Called at interrupt level with the QP s_lock held.
1457 */
1458static void reset_psn(struct rvt_qp *qp, u32 psn)
1459{
1460 u32 n = qp->s_acked;
1461 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
1462 u32 opcode;
1463 struct hfi1_qp_priv *priv = qp->priv;
1464
1465 lockdep_assert_held(&qp->s_lock);
1466 qp->s_cur = n;
1467 priv->pending_tid_r_segs = 0;
1468 priv->pending_tid_w_resp = 0;
1469 qp->s_num_rd_atomic = 0;
1470
1471 /*
1472 * If we are starting the request from the beginning,
1473 * let the normal send code handle initialization.
1474 */
1475 if (cmp_psn(psn, wqe->psn) <= 0) {
1476 qp->s_state = OP(SEND_LAST);
1477 goto done;
1478 }
1479 update_num_rd_atomic(qp, psn, wqe);
1480
1481 /* Find the work request opcode corresponding to the given PSN. */
1482 for (;;) {
1483 int diff;
1484
1485 if (++n == qp->s_size)
1486 n = 0;
1487 if (n == qp->s_tail)
1488 break;
1489 wqe = rvt_get_swqe_ptr(qp, n);
1490 diff = cmp_psn(psn, wqe->psn);
1491 if (diff < 0) {
1492 /* Point wqe back to the previous one*/
1493 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1494 break;
1495 }
1496 qp->s_cur = n;
1497 /*
1498 * If we are starting the request from the beginning,
1499 * let the normal send code handle initialization.
1500 */
1501 if (diff == 0) {
1502 qp->s_state = OP(SEND_LAST);
1503 goto done;
1504 }
1505
1506 update_num_rd_atomic(qp, psn, wqe);
1507 }
1508 opcode = wqe->wr.opcode;
1509
1510 /*
1511 * Set the state to restart in the middle of a request.
1512 * Don't change the s_sge, s_cur_sge, or s_cur_size.
1513 * See hfi1_make_rc_req().
1514 */
1515 switch (opcode) {
1516 case IB_WR_SEND:
1517 case IB_WR_SEND_WITH_IMM:
1518 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1519 break;
1520
1521 case IB_WR_RDMA_WRITE:
1522 case IB_WR_RDMA_WRITE_WITH_IMM:
1523 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1524 break;
1525
1526 case IB_WR_TID_RDMA_WRITE:
1527 qp->s_state = TID_OP(WRITE_RESP);
1528 break;
1529
1530 case IB_WR_RDMA_READ:
1531 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1532 break;
1533
1534 case IB_WR_TID_RDMA_READ:
1535 qp->s_state = TID_OP(READ_RESP);
1536 break;
1537
1538 default:
1539 /*
1540 * This case shouldn't happen since its only
1541 * one PSN per req.
1542 */
1543 qp->s_state = OP(SEND_LAST);
1544 }
1545done:
1546 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1547 qp->s_psn = psn;
1548 /*
1549 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1550 * asynchronously before the send engine can get scheduled.
1551 * Doing it in hfi1_make_rc_req() is too late.
1552 */
1553 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1554 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1555 qp->s_flags |= RVT_S_WAIT_PSN;
1556 qp->s_flags &= ~HFI1_S_AHG_VALID;
1557 trace_hfi1_sender_reset_psn(qp);
1558}
1559
1560/*
1561 * Back up requester to resend the last un-ACKed request.
1562 * The QP r_lock and s_lock should be held and interrupts disabled.
1563 */
1564void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1565{
1566 struct hfi1_qp_priv *priv = qp->priv;
1567 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1568 struct hfi1_ibport *ibp;
1569
1570 lockdep_assert_held(&qp->r_lock);
1571 lockdep_assert_held(&qp->s_lock);
1572 trace_hfi1_sender_restart_rc(qp);
1573 if (qp->s_retry == 0) {
1574 if (qp->s_mig_state == IB_MIG_ARMED) {
1575 hfi1_migrate_qp(qp);
1576 qp->s_retry = qp->s_retry_cnt;
1577 } else if (qp->s_last == qp->s_acked) {
1578 /*
1579 * We need special handling for the OPFN request WQEs as
1580 * they are not allowed to generate real user errors
1581 */
1582 if (wqe->wr.opcode == IB_WR_OPFN) {
1583 struct hfi1_ibport *ibp =
1584 to_iport(qp->ibqp.device, qp->port_num);
1585 /*
1586 * Call opfn_conn_reply() with capcode and
1587 * remaining data as 0 to close out the
1588 * current request
1589 */
1590 opfn_conn_reply(qp, priv->opfn.curr);
1591 wqe = do_rc_completion(qp, wqe, ibp);
1592 qp->s_flags &= ~RVT_S_WAIT_ACK;
1593 } else {
1594 trace_hfi1_tid_write_sender_restart_rc(qp, 0);
1595 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
1596 struct tid_rdma_request *req;
1597
1598 req = wqe_to_tid_req(wqe);
1599 hfi1_kern_exp_rcv_clear_all(req);
1600 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1601 }
1602
1603 hfi1_trdma_send_complete(qp, wqe,
1604 IB_WC_RETRY_EXC_ERR);
1605 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1606 }
1607 return;
1608 } else { /* need to handle delayed completion */
1609 return;
1610 }
1611 } else {
1612 qp->s_retry--;
1613 }
1614
1615 ibp = to_iport(qp->ibqp.device, qp->port_num);
1616 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1617 wqe->wr.opcode == IB_WR_TID_RDMA_READ)
1618 ibp->rvp.n_rc_resends++;
1619 else
1620 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1621
1622 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1623 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1624 RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
1625 if (wait)
1626 qp->s_flags |= RVT_S_SEND_ONE;
1627 reset_psn(qp, psn);
1628}
1629
1630/*
1631 * Set qp->s_sending_psn to the next PSN after the given one.
1632 * This would be psn+1 except when RDMA reads or TID RDMA ops
1633 * are present.
1634 */
1635static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1636{
1637 struct rvt_swqe *wqe;
1638 u32 n = qp->s_last;
1639
1640 lockdep_assert_held(&qp->s_lock);
1641 /* Find the work request corresponding to the given PSN. */
1642 for (;;) {
1643 wqe = rvt_get_swqe_ptr(qp, n);
1644 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1645 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1646 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
1647 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
1648 qp->s_sending_psn = wqe->lpsn + 1;
1649 else
1650 qp->s_sending_psn = psn + 1;
1651 break;
1652 }
1653 if (++n == qp->s_size)
1654 n = 0;
1655 if (n == qp->s_tail)
1656 break;
1657 }
1658}
1659
1660/**
1661 * hfi1_rc_verbs_aborted - handle abort status
1662 * @qp: the QP
1663 * @opah: the opa header
1664 *
1665 * This code modifies both ACK bit in BTH[2]
1666 * and the s_flags to go into send one mode.
1667 *
1668 * This serves to throttle the send engine to only
1669 * send a single packet in the likely case the
1670 * a link has gone down.
1671 */
1672void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1673{
1674 struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
1675 u8 opcode = ib_bth_get_opcode(ohdr);
1676 u32 psn;
1677
1678 /* ignore responses */
1679 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1680 opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1681 opcode == TID_OP(READ_RESP) ||
1682 opcode == TID_OP(WRITE_RESP))
1683 return;
1684
1685 psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
1686 ohdr->bth[2] = cpu_to_be32(psn);
1687 qp->s_flags |= RVT_S_SEND_ONE;
1688}
1689
1690/*
1691 * This should be called with the QP s_lock held and interrupts disabled.
1692 */
1693void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1694{
1695 struct ib_other_headers *ohdr;
1696 struct hfi1_qp_priv *priv = qp->priv;
1697 struct rvt_swqe *wqe;
1698 u32 opcode, head, tail;
1699 u32 psn;
1700 struct tid_rdma_request *req;
1701
1702 lockdep_assert_held(&qp->s_lock);
1703 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
1704 return;
1705
1706 ohdr = hfi1_get_rc_ohdr(opah);
1707 opcode = ib_bth_get_opcode(ohdr);
1708 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1709 opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1710 opcode == TID_OP(READ_RESP) ||
1711 opcode == TID_OP(WRITE_RESP)) {
1712 WARN_ON(!qp->s_rdma_ack_cnt);
1713 qp->s_rdma_ack_cnt--;
1714 return;
1715 }
1716
1717 psn = ib_bth_get_psn(ohdr);
1718 /*
1719 * Don't attempt to reset the sending PSN for packets in the
1720 * KDETH PSN space since the PSN does not match anything.
1721 */
1722 if (opcode != TID_OP(WRITE_DATA) &&
1723 opcode != TID_OP(WRITE_DATA_LAST) &&
1724 opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
1725 reset_sending_psn(qp, psn);
1726
1727 /* Handle TID RDMA WRITE packets differently */
1728 if (opcode >= TID_OP(WRITE_REQ) &&
1729 opcode <= TID_OP(WRITE_DATA_LAST)) {
1730 head = priv->s_tid_head;
1731 tail = priv->s_tid_cur;
1732 /*
1733 * s_tid_cur is set to s_tid_head in the case, where
1734 * a new TID RDMA request is being started and all
1735 * previous ones have been completed.
1736 * Therefore, we need to do a secondary check in order
1737 * to properly determine whether we should start the
1738 * RC timer.
1739 */
1740 wqe = rvt_get_swqe_ptr(qp, tail);
1741 req = wqe_to_tid_req(wqe);
1742 if (head == tail && req->comp_seg < req->total_segs) {
1743 if (tail == 0)
1744 tail = qp->s_size - 1;
1745 else
1746 tail -= 1;
1747 }
1748 } else {
1749 head = qp->s_tail;
1750 tail = qp->s_acked;
1751 }
1752
1753 /*
1754 * Start timer after a packet requesting an ACK has been sent and
1755 * there are still requests that haven't been acked.
1756 */
1757 if ((psn & IB_BTH_REQ_ACK) && tail != head &&
1758 opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
1759 opcode != TID_OP(RESYNC) &&
1760 !(qp->s_flags &
1761 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1762 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1763 if (opcode == TID_OP(READ_REQ))
1764 rvt_add_retry_timer_ext(qp, priv->timeout_shift);
1765 else
1766 rvt_add_retry_timer(qp);
1767 }
1768
1769 /* Start TID RDMA ACK timer */
1770 if ((opcode == TID_OP(WRITE_DATA) ||
1771 opcode == TID_OP(WRITE_DATA_LAST) ||
1772 opcode == TID_OP(RESYNC)) &&
1773 (psn & IB_BTH_REQ_ACK) &&
1774 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
1775 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1776 /*
1777 * The TID RDMA ACK packet could be received before this
1778 * function is called. Therefore, add the timer only if TID
1779 * RDMA ACK packets are actually pending.
1780 */
1781 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1782 req = wqe_to_tid_req(wqe);
1783 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
1784 req->ack_seg < req->cur_seg)
1785 hfi1_add_tid_retry_timer(qp);
1786 }
1787
1788 while (qp->s_last != qp->s_acked) {
1789 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1790 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1791 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1792 break;
1793 trdma_clean_swqe(qp, wqe);
1794 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1795 rvt_qp_complete_swqe(qp,
1796 wqe,
1797 ib_hfi1_wc_opcode[wqe->wr.opcode],
1798 IB_WC_SUCCESS);
1799 }
1800 /*
1801 * If we were waiting for sends to complete before re-sending,
1802 * and they are now complete, restart sending.
1803 */
1804 trace_hfi1_sendcomplete(qp, psn);
1805 if (qp->s_flags & RVT_S_WAIT_PSN &&
1806 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1807 qp->s_flags &= ~RVT_S_WAIT_PSN;
1808 qp->s_sending_psn = qp->s_psn;
1809 qp->s_sending_hpsn = qp->s_psn - 1;
1810 hfi1_schedule_send(qp);
1811 }
1812}
1813
1814static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1815{
1816 qp->s_last_psn = psn;
1817}
1818
1819/*
1820 * Generate a SWQE completion.
1821 * This is similar to hfi1_send_complete but has to check to be sure
1822 * that the SGEs are not being referenced if the SWQE is being resent.
1823 */
1824struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1825 struct rvt_swqe *wqe,
1826 struct hfi1_ibport *ibp)
1827{
1828 struct hfi1_qp_priv *priv = qp->priv;
1829
1830 lockdep_assert_held(&qp->s_lock);
1831 /*
1832 * Don't decrement refcount and don't generate a
1833 * completion if the SWQE is being resent until the send
1834 * is finished.
1835 */
1836 trace_hfi1_rc_completion(qp, wqe->lpsn);
1837 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1838 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1839 trdma_clean_swqe(qp, wqe);
1840 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1841 rvt_qp_complete_swqe(qp,
1842 wqe,
1843 ib_hfi1_wc_opcode[wqe->wr.opcode],
1844 IB_WC_SUCCESS);
1845 } else {
1846 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1847
1848 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1849 /*
1850 * If send progress not running attempt to progress
1851 * SDMA queue.
1852 */
1853 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1854 struct sdma_engine *engine;
1855 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1856 u8 sc5;
1857
1858 /* For now use sc to find engine */
1859 sc5 = ibp->sl_to_sc[sl];
1860 engine = qp_to_sdma_engine(qp, sc5);
1861 sdma_engine_progress_schedule(engine);
1862 }
1863 }
1864
1865 qp->s_retry = qp->s_retry_cnt;
1866 /*
1867 * Don't update the last PSN if the request being completed is
1868 * a TID RDMA WRITE request.
1869 * Completion of the TID RDMA WRITE requests are done by the
1870 * TID RDMA ACKs and as such could be for a request that has
1871 * already been ACKed as far as the IB state machine is
1872 * concerned.
1873 */
1874 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1875 update_last_psn(qp, wqe->lpsn);
1876
1877 /*
1878 * If we are completing a request which is in the process of
1879 * being resent, we can stop re-sending it since we know the
1880 * responder has already seen it.
1881 */
1882 if (qp->s_acked == qp->s_cur) {
1883 if (++qp->s_cur >= qp->s_size)
1884 qp->s_cur = 0;
1885 qp->s_acked = qp->s_cur;
1886 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1887 if (qp->s_acked != qp->s_tail) {
1888 qp->s_state = OP(SEND_LAST);
1889 qp->s_psn = wqe->psn;
1890 }
1891 } else {
1892 if (++qp->s_acked >= qp->s_size)
1893 qp->s_acked = 0;
1894 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1895 qp->s_draining = 0;
1896 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1897 }
1898 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
1899 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1900 hfi1_schedule_send(qp);
1901 }
1902 return wqe;
1903}
1904
1905static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
1906{
1907 /* Retry this request. */
1908 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1909 qp->r_flags |= RVT_R_RDMAR_SEQ;
1910 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1911 if (list_empty(&qp->rspwait)) {
1912 qp->r_flags |= RVT_R_RSP_SEND;
1913 rvt_get_qp(qp);
1914 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1915 }
1916 }
1917}
1918
1919/**
1920 * update_qp_retry_state - Update qp retry state.
1921 * @qp: the QP
1922 * @psn: the packet sequence number of the TID RDMA WRITE RESP.
1923 * @spsn: The start psn for the given TID RDMA WRITE swqe.
1924 * @lpsn: The last psn for the given TID RDMA WRITE swqe.
1925 *
1926 * This function is called to update the qp retry state upon
1927 * receiving a TID WRITE RESP after the qp is scheduled to retry
1928 * a request.
1929 */
1930static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
1931 u32 lpsn)
1932{
1933 struct hfi1_qp_priv *qpriv = qp->priv;
1934
1935 qp->s_psn = psn + 1;
1936 /*
1937 * If this is the first TID RDMA WRITE RESP packet for the current
1938 * request, change the s_state so that the retry will be processed
1939 * correctly. Similarly, if this is the last TID RDMA WRITE RESP
1940 * packet, change the s_state and advance the s_cur.
1941 */
1942 if (cmp_psn(psn, lpsn) >= 0) {
1943 qp->s_cur = qpriv->s_tid_cur + 1;
1944 if (qp->s_cur >= qp->s_size)
1945 qp->s_cur = 0;
1946 qp->s_state = TID_OP(WRITE_REQ);
1947 } else if (!cmp_psn(psn, spsn)) {
1948 qp->s_cur = qpriv->s_tid_cur;
1949 qp->s_state = TID_OP(WRITE_RESP);
1950 }
1951}
1952
1953/*
1954 * do_rc_ack - process an incoming RC ACK
1955 * @qp: the QP the ACK came in on
1956 * @psn: the packet sequence number of the ACK
1957 * @opcode: the opcode of the request that resulted in the ACK
1958 *
1959 * This is called from rc_rcv_resp() to process an incoming RC ACK
1960 * for the given QP.
1961 * May be called at interrupt level, with the QP s_lock held.
1962 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1963 */
1964int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1965 u64 val, struct hfi1_ctxtdata *rcd)
1966{
1967 struct hfi1_ibport *ibp;
1968 enum ib_wc_status status;
1969 struct hfi1_qp_priv *qpriv = qp->priv;
1970 struct rvt_swqe *wqe;
1971 int ret = 0;
1972 u32 ack_psn;
1973 int diff;
1974 struct rvt_dev_info *rdi;
1975
1976 lockdep_assert_held(&qp->s_lock);
1977 /*
1978 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1979 * requests and implicitly NAK RDMA read and atomic requests issued
1980 * before the NAK'ed request. The MSN won't include the NAK'ed
1981 * request but will include an ACK'ed request(s).
1982 */
1983 ack_psn = psn;
1984 if (aeth >> IB_AETH_NAK_SHIFT)
1985 ack_psn--;
1986 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1987 ibp = rcd_to_iport(rcd);
1988
1989 /*
1990 * The MSN might be for a later WQE than the PSN indicates so
1991 * only complete WQEs that the PSN finishes.
1992 */
1993 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
1994 /*
1995 * RDMA_READ_RESPONSE_ONLY is a special case since
1996 * we want to generate completion events for everything
1997 * before the RDMA read, copy the data, then generate
1998 * the completion for the read.
1999 */
2000 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
2001 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
2002 diff == 0) {
2003 ret = 1;
2004 goto bail_stop;
2005 }
2006 /*
2007 * If this request is a RDMA read or atomic, and the ACK is
2008 * for a later operation, this ACK NAKs the RDMA read or
2009 * atomic. In other words, only a RDMA_READ_LAST or ONLY
2010 * can ACK a RDMA read and likewise for atomic ops. Note
2011 * that the NAK case can only happen if relaxed ordering is
2012 * used and requests are sent after an RDMA read or atomic
2013 * is sent but before the response is received.
2014 */
2015 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
2016 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
2017 (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
2018 (opcode != TID_OP(READ_RESP) || diff != 0)) ||
2019 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2020 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
2021 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
2022 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2023 (delta_psn(psn, qp->s_last_psn) != 1))) {
2024 set_restart_qp(qp, rcd);
2025 /*
2026 * No need to process the ACK/NAK since we are
2027 * restarting an earlier request.
2028 */
2029 goto bail_stop;
2030 }
2031 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2032 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2033 u64 *vaddr = wqe->sg_list[0].vaddr;
2034 *vaddr = val;
2035 }
2036 if (wqe->wr.opcode == IB_WR_OPFN)
2037 opfn_conn_reply(qp, val);
2038
2039 if (qp->s_num_rd_atomic &&
2040 (wqe->wr.opcode == IB_WR_RDMA_READ ||
2041 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2042 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
2043 qp->s_num_rd_atomic--;
2044 /* Restart sending task if fence is complete */
2045 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2046 !qp->s_num_rd_atomic) {
2047 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2048 RVT_S_WAIT_ACK);
2049 hfi1_schedule_send(qp);
2050 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2051 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
2052 RVT_S_WAIT_ACK);
2053 hfi1_schedule_send(qp);
2054 }
2055 }
2056
2057 /*
2058 * TID RDMA WRITE requests will be completed by the TID RDMA
2059 * ACK packet handler (see tid_rdma.c).
2060 */
2061 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
2062 break;
2063
2064 wqe = do_rc_completion(qp, wqe, ibp);
2065 if (qp->s_acked == qp->s_tail)
2066 break;
2067 }
2068
2069 trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
2070 trace_hfi1_sender_do_rc_ack(qp);
2071 switch (aeth >> IB_AETH_NAK_SHIFT) {
2072 case 0: /* ACK */
2073 this_cpu_inc(*ibp->rvp.rc_acks);
2074 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2075 if (wqe_to_tid_req(wqe)->ack_pending)
2076 rvt_mod_retry_timer_ext(qp,
2077 qpriv->timeout_shift);
2078 else
2079 rvt_stop_rc_timers(qp);
2080 } else if (qp->s_acked != qp->s_tail) {
2081 struct rvt_swqe *__w = NULL;
2082
2083 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
2084 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
2085
2086 /*
2087 * Stop timers if we've received all of the TID RDMA
2088 * WRITE * responses.
2089 */
2090 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2091 opcode == TID_OP(WRITE_RESP)) {
2092 /*
2093 * Normally, the loop above would correctly
2094 * process all WQEs from s_acked onward and
2095 * either complete them or check for correct
2096 * PSN sequencing.
2097 * However, for TID RDMA, due to pipelining,
2098 * the response may not be for the request at
2099 * s_acked so the above look would just be
2100 * skipped. This does not allow for checking
2101 * the PSN sequencing. It has to be done
2102 * separately.
2103 */
2104 if (cmp_psn(psn, qp->s_last_psn + 1)) {
2105 set_restart_qp(qp, rcd);
2106 goto bail_stop;
2107 }
2108 /*
2109 * If the psn is being resent, stop the
2110 * resending.
2111 */
2112 if (qp->s_cur != qp->s_tail &&
2113 cmp_psn(qp->s_psn, psn) <= 0)
2114 update_qp_retry_state(qp, psn,
2115 __w->psn,
2116 __w->lpsn);
2117 else if (--qpriv->pending_tid_w_resp)
2118 rvt_mod_retry_timer(qp);
2119 else
2120 rvt_stop_rc_timers(qp);
2121 } else {
2122 /*
2123 * We are expecting more ACKs so
2124 * mod the retry timer.
2125 */
2126 rvt_mod_retry_timer(qp);
2127 /*
2128 * We can stop re-sending the earlier packets
2129 * and continue with the next packet the
2130 * receiver wants.
2131 */
2132 if (cmp_psn(qp->s_psn, psn) <= 0)
2133 reset_psn(qp, psn + 1);
2134 }
2135 } else {
2136 /* No more acks - kill all timers */
2137 rvt_stop_rc_timers(qp);
2138 if (cmp_psn(qp->s_psn, psn) <= 0) {
2139 qp->s_state = OP(SEND_LAST);
2140 qp->s_psn = psn + 1;
2141 }
2142 }
2143 if (qp->s_flags & RVT_S_WAIT_ACK) {
2144 qp->s_flags &= ~RVT_S_WAIT_ACK;
2145 hfi1_schedule_send(qp);
2146 }
2147 rvt_get_credit(qp, aeth);
2148 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2149 qp->s_retry = qp->s_retry_cnt;
2150 /*
2151 * If the current request is a TID RDMA WRITE request and the
2152 * response is not a TID RDMA WRITE RESP packet, s_last_psn
2153 * can't be advanced.
2154 */
2155 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2156 opcode != TID_OP(WRITE_RESP) &&
2157 cmp_psn(psn, wqe->psn) >= 0)
2158 return 1;
2159 update_last_psn(qp, psn);
2160 return 1;
2161
2162 case 1: /* RNR NAK */
2163 ibp->rvp.n_rnr_naks++;
2164 if (qp->s_acked == qp->s_tail)
2165 goto bail_stop;
2166 if (qp->s_flags & RVT_S_WAIT_RNR)
2167 goto bail_stop;
2168 rdi = ib_to_rvt(qp->ibqp.device);
2169 if (!(rdi->post_parms[wqe->wr.opcode].flags &
2170 RVT_OPERATION_IGN_RNR_CNT)) {
2171 if (qp->s_rnr_retry == 0) {
2172 status = IB_WC_RNR_RETRY_EXC_ERR;
2173 goto class_b;
2174 }
2175 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
2176 qp->s_rnr_retry--;
2177 }
2178
2179 /*
2180 * The last valid PSN is the previous PSN. For TID RDMA WRITE
2181 * request, s_last_psn should be incremented only when a TID
2182 * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
2183 * WRITE RESP packets.
2184 */
2185 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
2186 reset_psn(qp, qp->s_last_psn + 1);
2187 } else {
2188 update_last_psn(qp, psn - 1);
2189 reset_psn(qp, psn);
2190 }
2191
2192 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
2193 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
2194 rvt_stop_rc_timers(qp);
2195 rvt_add_rnr_timer(qp, aeth);
2196 return 0;
2197
2198 case 3: /* NAK */
2199 if (qp->s_acked == qp->s_tail)
2200 goto bail_stop;
2201 /* The last valid PSN is the previous PSN. */
2202 update_last_psn(qp, psn - 1);
2203 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
2204 IB_AETH_CREDIT_MASK) {
2205 case 0: /* PSN sequence error */
2206 ibp->rvp.n_seq_naks++;
2207 /*
2208 * Back up to the responder's expected PSN.
2209 * Note that we might get a NAK in the middle of an
2210 * RDMA READ response which terminates the RDMA
2211 * READ.
2212 */
2213 hfi1_restart_rc(qp, psn, 0);
2214 hfi1_schedule_send(qp);
2215 break;
2216
2217 case 1: /* Invalid Request */
2218 status = IB_WC_REM_INV_REQ_ERR;
2219 ibp->rvp.n_other_naks++;
2220 goto class_b;
2221
2222 case 2: /* Remote Access Error */
2223 status = IB_WC_REM_ACCESS_ERR;
2224 ibp->rvp.n_other_naks++;
2225 goto class_b;
2226
2227 case 3: /* Remote Operation Error */
2228 status = IB_WC_REM_OP_ERR;
2229 ibp->rvp.n_other_naks++;
2230class_b:
2231 if (qp->s_last == qp->s_acked) {
2232 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
2233 hfi1_kern_read_tid_flow_free(qp);
2234
2235 hfi1_trdma_send_complete(qp, wqe, status);
2236 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2237 }
2238 break;
2239
2240 default:
2241 /* Ignore other reserved NAK error codes */
2242 goto reserved;
2243 }
2244 qp->s_retry = qp->s_retry_cnt;
2245 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2246 goto bail_stop;
2247
2248 default: /* 2: reserved */
2249reserved:
2250 /* Ignore reserved NAK codes. */
2251 goto bail_stop;
2252 }
2253 /* cannot be reached */
2254bail_stop:
2255 rvt_stop_rc_timers(qp);
2256 return ret;
2257}
2258
2259/*
2260 * We have seen an out of sequence RDMA read middle or last packet.
2261 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
2262 */
2263static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
2264 struct hfi1_ctxtdata *rcd)
2265{
2266 struct rvt_swqe *wqe;
2267
2268 lockdep_assert_held(&qp->s_lock);
2269 /* Remove QP from retry timer */
2270 rvt_stop_rc_timers(qp);
2271
2272 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2273
2274 while (cmp_psn(psn, wqe->lpsn) > 0) {
2275 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2276 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2277 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
2278 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2279 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
2280 break;
2281 wqe = do_rc_completion(qp, wqe, ibp);
2282 }
2283
2284 ibp->rvp.n_rdma_seq++;
2285 qp->r_flags |= RVT_R_RDMAR_SEQ;
2286 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
2287 if (list_empty(&qp->rspwait)) {
2288 qp->r_flags |= RVT_R_RSP_SEND;
2289 rvt_get_qp(qp);
2290 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2291 }
2292}
2293
2294/**
2295 * rc_rcv_resp - process an incoming RC response packet
2296 * @packet: data packet information
2297 *
2298 * This is called from hfi1_rc_rcv() to process an incoming RC response
2299 * packet for the given QP.
2300 * Called at interrupt level.
2301 */
2302static void rc_rcv_resp(struct hfi1_packet *packet)
2303{
2304 struct hfi1_ctxtdata *rcd = packet->rcd;
2305 void *data = packet->payload;
2306 u32 tlen = packet->tlen;
2307 struct rvt_qp *qp = packet->qp;
2308 struct hfi1_ibport *ibp;
2309 struct ib_other_headers *ohdr = packet->ohdr;
2310 struct rvt_swqe *wqe;
2311 enum ib_wc_status status;
2312 unsigned long flags;
2313 int diff;
2314 u64 val;
2315 u32 aeth;
2316 u32 psn = ib_bth_get_psn(packet->ohdr);
2317 u32 pmtu = qp->pmtu;
2318 u16 hdrsize = packet->hlen;
2319 u8 opcode = packet->opcode;
2320 u8 pad = packet->pad;
2321 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2322
2323 spin_lock_irqsave(&qp->s_lock, flags);
2324 trace_hfi1_ack(qp, psn);
2325
2326 /* Ignore invalid responses. */
2327 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
2328 goto ack_done;
2329
2330 /* Ignore duplicate responses. */
2331 diff = cmp_psn(psn, qp->s_last_psn);
2332 if (unlikely(diff <= 0)) {
2333 /* Update credits for "ghost" ACKs */
2334 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
2335 aeth = be32_to_cpu(ohdr->u.aeth);
2336 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
2337 rvt_get_credit(qp, aeth);
2338 }
2339 goto ack_done;
2340 }
2341
2342 /*
2343 * Skip everything other than the PSN we expect, if we are waiting
2344 * for a reply to a restarted RDMA read or atomic op.
2345 */
2346 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
2347 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
2348 goto ack_done;
2349 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
2350 }
2351
2352 if (unlikely(qp->s_acked == qp->s_tail))
2353 goto ack_done;
2354 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2355 status = IB_WC_SUCCESS;
2356
2357 switch (opcode) {
2358 case OP(ACKNOWLEDGE):
2359 case OP(ATOMIC_ACKNOWLEDGE):
2360 case OP(RDMA_READ_RESPONSE_FIRST):
2361 aeth = be32_to_cpu(ohdr->u.aeth);
2362 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
2363 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
2364 else
2365 val = 0;
2366 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
2367 opcode != OP(RDMA_READ_RESPONSE_FIRST))
2368 goto ack_done;
2369 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2370 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2371 goto ack_op_err;
2372 /*
2373 * If this is a response to a resent RDMA read, we
2374 * have to be careful to copy the data to the right
2375 * location.
2376 */
2377 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2378 wqe, psn, pmtu);
2379 goto read_middle;
2380
2381 case OP(RDMA_READ_RESPONSE_MIDDLE):
2382 /* no AETH, no ACK */
2383 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2384 goto ack_seq_err;
2385 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2386 goto ack_op_err;
2387read_middle:
2388 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2389 goto ack_len_err;
2390 if (unlikely(pmtu >= qp->s_rdma_read_len))
2391 goto ack_len_err;
2392
2393 /*
2394 * We got a response so update the timeout.
2395 * 4.096 usec. * (1 << qp->timeout)
2396 */
2397 rvt_mod_retry_timer(qp);
2398 if (qp->s_flags & RVT_S_WAIT_ACK) {
2399 qp->s_flags &= ~RVT_S_WAIT_ACK;
2400 hfi1_schedule_send(qp);
2401 }
2402
2403 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
2404 qp->s_retry = qp->s_retry_cnt;
2405
2406 /*
2407 * Update the RDMA receive state but do the copy w/o
2408 * holding the locks and blocking interrupts.
2409 */
2410 qp->s_rdma_read_len -= pmtu;
2411 update_last_psn(qp, psn);
2412 spin_unlock_irqrestore(&qp->s_lock, flags);
2413 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2414 data, pmtu, false, false);
2415 goto bail;
2416
2417 case OP(RDMA_READ_RESPONSE_ONLY):
2418 aeth = be32_to_cpu(ohdr->u.aeth);
2419 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
2420 goto ack_done;
2421 /*
2422 * Check that the data size is >= 0 && <= pmtu.
2423 * Remember to account for ICRC (4).
2424 */
2425 if (unlikely(tlen < (hdrsize + extra_bytes)))
2426 goto ack_len_err;
2427 /*
2428 * If this is a response to a resent RDMA read, we
2429 * have to be careful to copy the data to the right
2430 * location.
2431 */
2432 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2433 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2434 wqe, psn, pmtu);
2435 goto read_last;
2436
2437 case OP(RDMA_READ_RESPONSE_LAST):
2438 /* ACKs READ req. */
2439 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2440 goto ack_seq_err;
2441 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2442 goto ack_op_err;
2443 /*
2444 * Check that the data size is >= 1 && <= pmtu.
2445 * Remember to account for ICRC (4).
2446 */
2447 if (unlikely(tlen <= (hdrsize + extra_bytes)))
2448 goto ack_len_err;
2449read_last:
2450 tlen -= hdrsize + extra_bytes;
2451 if (unlikely(tlen != qp->s_rdma_read_len))
2452 goto ack_len_err;
2453 aeth = be32_to_cpu(ohdr->u.aeth);
2454 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2455 data, tlen, false, false);
2456 WARN_ON(qp->s_rdma_read_sge.num_sge);
2457 (void)do_rc_ack(qp, aeth, psn,
2458 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
2459 goto ack_done;
2460 }
2461
2462ack_op_err:
2463 status = IB_WC_LOC_QP_OP_ERR;
2464 goto ack_err;
2465
2466ack_seq_err:
2467 ibp = rcd_to_iport(rcd);
2468 rdma_seq_err(qp, ibp, psn, rcd);
2469 goto ack_done;
2470
2471ack_len_err:
2472 status = IB_WC_LOC_LEN_ERR;
2473ack_err:
2474 if (qp->s_last == qp->s_acked) {
2475 rvt_send_complete(qp, wqe, status);
2476 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2477 }
2478ack_done:
2479 spin_unlock_irqrestore(&qp->s_lock, flags);
2480bail:
2481 return;
2482}
2483
2484static inline void rc_cancel_ack(struct rvt_qp *qp)
2485{
2486 qp->r_adefered = 0;
2487 if (list_empty(&qp->rspwait))
2488 return;
2489 list_del_init(&qp->rspwait);
2490 qp->r_flags &= ~RVT_R_RSP_NAK;
2491 rvt_put_qp(qp);
2492}
2493
2494/**
2495 * rc_rcv_error - process an incoming duplicate or error RC packet
2496 * @ohdr: the other headers for this packet
2497 * @data: the packet data
2498 * @qp: the QP for this packet
2499 * @opcode: the opcode for this packet
2500 * @psn: the packet sequence number for this packet
2501 * @diff: the difference between the PSN and the expected PSN
2502 * @rcd: the receive context
2503 *
2504 * This is called from hfi1_rc_rcv() to process an unexpected
2505 * incoming RC packet for the given QP.
2506 * Called at interrupt level.
2507 * Return 1 if no more processing is needed; otherwise return 0 to
2508 * schedule a response to be sent.
2509 */
2510static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
2511 struct rvt_qp *qp, u32 opcode, u32 psn,
2512 int diff, struct hfi1_ctxtdata *rcd)
2513{
2514 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2515 struct rvt_ack_entry *e;
2516 unsigned long flags;
2517 u8 prev;
2518 u8 mra; /* most recent ACK */
2519 bool old_req;
2520
2521 trace_hfi1_rcv_error(qp, psn);
2522 if (diff > 0) {
2523 /*
2524 * Packet sequence error.
2525 * A NAK will ACK earlier sends and RDMA writes.
2526 * Don't queue the NAK if we already sent one.
2527 */
2528 if (!qp->r_nak_state) {
2529 ibp->rvp.n_rc_seqnak++;
2530 qp->r_nak_state = IB_NAK_PSN_ERROR;
2531 /* Use the expected PSN. */
2532 qp->r_ack_psn = qp->r_psn;
2533 /*
2534 * Wait to send the sequence NAK until all packets
2535 * in the receive queue have been processed.
2536 * Otherwise, we end up propagating congestion.
2537 */
2538 rc_defered_ack(rcd, qp);
2539 }
2540 goto done;
2541 }
2542
2543 /*
2544 * Handle a duplicate request. Don't re-execute SEND, RDMA
2545 * write or atomic op. Don't NAK errors, just silently drop
2546 * the duplicate request. Note that r_sge, r_len, and
2547 * r_rcv_len may be in use so don't modify them.
2548 *
2549 * We are supposed to ACK the earliest duplicate PSN but we
2550 * can coalesce an outstanding duplicate ACK. We have to
2551 * send the earliest so that RDMA reads can be restarted at
2552 * the requester's expected PSN.
2553 *
2554 * First, find where this duplicate PSN falls within the
2555 * ACKs previously sent.
2556 * old_req is true if there is an older response that is scheduled
2557 * to be sent before sending this one.
2558 */
2559 e = NULL;
2560 old_req = true;
2561 ibp->rvp.n_rc_dupreq++;
2562
2563 spin_lock_irqsave(&qp->s_lock, flags);
2564
2565 e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
2566
2567 switch (opcode) {
2568 case OP(RDMA_READ_REQUEST): {
2569 struct ib_reth *reth;
2570 u32 offset;
2571 u32 len;
2572
2573 /*
2574 * If we didn't find the RDMA read request in the ack queue,
2575 * we can ignore this request.
2576 */
2577 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
2578 goto unlock_done;
2579 /* RETH comes after BTH */
2580 reth = &ohdr->u.rc.reth;
2581 /*
2582 * Address range must be a subset of the original
2583 * request and start on pmtu boundaries.
2584 * We reuse the old ack_queue slot since the requester
2585 * should not back up and request an earlier PSN for the
2586 * same request.
2587 */
2588 offset = delta_psn(psn, e->psn) * qp->pmtu;
2589 len = be32_to_cpu(reth->length);
2590 if (unlikely(offset + len != e->rdma_sge.sge_length))
2591 goto unlock_done;
2592 release_rdma_sge_mr(e);
2593 if (len != 0) {
2594 u32 rkey = be32_to_cpu(reth->rkey);
2595 u64 vaddr = get_ib_reth_vaddr(reth);
2596 int ok;
2597
2598 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2599 IB_ACCESS_REMOTE_READ);
2600 if (unlikely(!ok))
2601 goto unlock_done;
2602 } else {
2603 e->rdma_sge.vaddr = NULL;
2604 e->rdma_sge.length = 0;
2605 e->rdma_sge.sge_length = 0;
2606 }
2607 e->psn = psn;
2608 if (old_req)
2609 goto unlock_done;
2610 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2611 qp->s_acked_ack_queue = prev;
2612 qp->s_tail_ack_queue = prev;
2613 break;
2614 }
2615
2616 case OP(COMPARE_SWAP):
2617 case OP(FETCH_ADD): {
2618 /*
2619 * If we didn't find the atomic request in the ack queue
2620 * or the send engine is already backed up to send an
2621 * earlier entry, we can ignore this request.
2622 */
2623 if (!e || e->opcode != (u8)opcode || old_req)
2624 goto unlock_done;
2625 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2626 qp->s_acked_ack_queue = prev;
2627 qp->s_tail_ack_queue = prev;
2628 break;
2629 }
2630
2631 default:
2632 /*
2633 * Ignore this operation if it doesn't request an ACK
2634 * or an earlier RDMA read or atomic is going to be resent.
2635 */
2636 if (!(psn & IB_BTH_REQ_ACK) || old_req)
2637 goto unlock_done;
2638 /*
2639 * Resend the most recent ACK if this request is
2640 * after all the previous RDMA reads and atomics.
2641 */
2642 if (mra == qp->r_head_ack_queue) {
2643 spin_unlock_irqrestore(&qp->s_lock, flags);
2644 qp->r_nak_state = 0;
2645 qp->r_ack_psn = qp->r_psn - 1;
2646 goto send_ack;
2647 }
2648
2649 /*
2650 * Resend the RDMA read or atomic op which
2651 * ACKs this duplicate request.
2652 */
2653 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2654 qp->s_acked_ack_queue = mra;
2655 qp->s_tail_ack_queue = mra;
2656 break;
2657 }
2658 qp->s_ack_state = OP(ACKNOWLEDGE);
2659 qp->s_flags |= RVT_S_RESP_PENDING;
2660 qp->r_nak_state = 0;
2661 hfi1_schedule_send(qp);
2662
2663unlock_done:
2664 spin_unlock_irqrestore(&qp->s_lock, flags);
2665done:
2666 return 1;
2667
2668send_ack:
2669 return 0;
2670}
2671
2672static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
2673 u32 lqpn, u32 rqpn, u8 svc_type)
2674{
2675 struct opa_hfi1_cong_log_event_internal *cc_event;
2676 unsigned long flags;
2677
2678 if (sl >= OPA_MAX_SLS)
2679 return;
2680
2681 spin_lock_irqsave(&ppd->cc_log_lock, flags);
2682
2683 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
2684 ppd->threshold_event_counter++;
2685
2686 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
2687 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
2688 ppd->cc_log_idx = 0;
2689 cc_event->lqpn = lqpn & RVT_QPN_MASK;
2690 cc_event->rqpn = rqpn & RVT_QPN_MASK;
2691 cc_event->sl = sl;
2692 cc_event->svc_type = svc_type;
2693 cc_event->rlid = rlid;
2694 /* keep timestamp in units of 1.024 usec */
2695 cc_event->timestamp = ktime_get_ns() / 1024;
2696
2697 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
2698}
2699
2700void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
2701 u32 rqpn, u8 svc_type)
2702{
2703 struct cca_timer *cca_timer;
2704 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
2705 u8 trigger_threshold;
2706 struct cc_state *cc_state;
2707 unsigned long flags;
2708
2709 if (sl >= OPA_MAX_SLS)
2710 return;
2711
2712 cc_state = get_cc_state(ppd);
2713
2714 if (!cc_state)
2715 return;
2716
2717 /*
2718 * 1) increase CCTI (for this SL)
2719 * 2) select IPG (i.e., call set_link_ipg())
2720 * 3) start timer
2721 */
2722 ccti_limit = cc_state->cct.ccti_limit;
2723 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
2724 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
2725 trigger_threshold =
2726 cc_state->cong_setting.entries[sl].trigger_threshold;
2727
2728 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2729
2730 cca_timer = &ppd->cca_timer[sl];
2731 if (cca_timer->ccti < ccti_limit) {
2732 if (cca_timer->ccti + ccti_incr <= ccti_limit)
2733 cca_timer->ccti += ccti_incr;
2734 else
2735 cca_timer->ccti = ccti_limit;
2736 set_link_ipg(ppd);
2737 }
2738
2739 ccti = cca_timer->ccti;
2740
2741 if (!hrtimer_active(&cca_timer->hrtimer)) {
2742 /* ccti_timer is in units of 1.024 usec */
2743 unsigned long nsec = 1024 * ccti_timer;
2744
2745 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2746 HRTIMER_MODE_REL_PINNED);
2747 }
2748
2749 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2750
2751 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2752 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2753}
2754
2755/**
2756 * hfi1_rc_rcv - process an incoming RC packet
2757 * @packet: data packet information
2758 *
2759 * This is called from qp_rcv() to process an incoming RC packet
2760 * for the given QP.
2761 * May be called at interrupt level.
2762 */
2763void hfi1_rc_rcv(struct hfi1_packet *packet)
2764{
2765 struct hfi1_ctxtdata *rcd = packet->rcd;
2766 void *data = packet->payload;
2767 u32 tlen = packet->tlen;
2768 struct rvt_qp *qp = packet->qp;
2769 struct hfi1_qp_priv *qpriv = qp->priv;
2770 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2771 struct ib_other_headers *ohdr = packet->ohdr;
2772 u32 opcode = packet->opcode;
2773 u32 hdrsize = packet->hlen;
2774 u32 psn = ib_bth_get_psn(packet->ohdr);
2775 u32 pad = packet->pad;
2776 struct ib_wc wc;
2777 u32 pmtu = qp->pmtu;
2778 int diff;
2779 struct ib_reth *reth;
2780 unsigned long flags;
2781 int ret;
2782 bool copy_last = false, fecn;
2783 u32 rkey;
2784 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2785
2786 lockdep_assert_held(&qp->r_lock);
2787
2788 if (hfi1_ruc_check_hdr(ibp, packet))
2789 return;
2790
2791 fecn = process_ecn(qp, packet);
2792 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
2793
2794 /*
2795 * Process responses (ACKs) before anything else. Note that the
2796 * packet sequence number will be for something in the send work
2797 * queue rather than the expected receive packet sequence number.
2798 * In other words, this QP is the requester.
2799 */
2800 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2801 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2802 rc_rcv_resp(packet);
2803 return;
2804 }
2805
2806 /* Compute 24 bits worth of difference. */
2807 diff = delta_psn(psn, qp->r_psn);
2808 if (unlikely(diff)) {
2809 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2810 return;
2811 goto send_ack;
2812 }
2813
2814 /* Check for opcode sequence errors. */
2815 switch (qp->r_state) {
2816 case OP(SEND_FIRST):
2817 case OP(SEND_MIDDLE):
2818 if (opcode == OP(SEND_MIDDLE) ||
2819 opcode == OP(SEND_LAST) ||
2820 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2821 opcode == OP(SEND_LAST_WITH_INVALIDATE))
2822 break;
2823 goto nack_inv;
2824
2825 case OP(RDMA_WRITE_FIRST):
2826 case OP(RDMA_WRITE_MIDDLE):
2827 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2828 opcode == OP(RDMA_WRITE_LAST) ||
2829 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2830 break;
2831 goto nack_inv;
2832
2833 default:
2834 if (opcode == OP(SEND_MIDDLE) ||
2835 opcode == OP(SEND_LAST) ||
2836 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2837 opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
2838 opcode == OP(RDMA_WRITE_MIDDLE) ||
2839 opcode == OP(RDMA_WRITE_LAST) ||
2840 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2841 goto nack_inv;
2842 /*
2843 * Note that it is up to the requester to not send a new
2844 * RDMA read or atomic operation before receiving an ACK
2845 * for the previous operation.
2846 */
2847 break;
2848 }
2849
2850 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2851 rvt_comm_est(qp);
2852
2853 /* OK, process the packet. */
2854 switch (opcode) {
2855 case OP(SEND_FIRST):
2856 ret = rvt_get_rwqe(qp, false);
2857 if (ret < 0)
2858 goto nack_op_err;
2859 if (!ret)
2860 goto rnr_nak;
2861 qp->r_rcv_len = 0;
2862 fallthrough;
2863 case OP(SEND_MIDDLE):
2864 case OP(RDMA_WRITE_MIDDLE):
2865send_middle:
2866 /* Check for invalid length PMTU or posted rwqe len. */
2867 /*
2868 * There will be no padding for 9B packet but 16B packets
2869 * will come in with some padding since we always add
2870 * CRC and LT bytes which will need to be flit aligned
2871 */
2872 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2873 goto nack_inv;
2874 qp->r_rcv_len += pmtu;
2875 if (unlikely(qp->r_rcv_len > qp->r_len))
2876 goto nack_inv;
2877 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
2878 break;
2879
2880 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2881 /* consume RWQE */
2882 ret = rvt_get_rwqe(qp, true);
2883 if (ret < 0)
2884 goto nack_op_err;
2885 if (!ret)
2886 goto rnr_nak;
2887 goto send_last_imm;
2888
2889 case OP(SEND_ONLY):
2890 case OP(SEND_ONLY_WITH_IMMEDIATE):
2891 case OP(SEND_ONLY_WITH_INVALIDATE):
2892 ret = rvt_get_rwqe(qp, false);
2893 if (ret < 0)
2894 goto nack_op_err;
2895 if (!ret)
2896 goto rnr_nak;
2897 qp->r_rcv_len = 0;
2898 if (opcode == OP(SEND_ONLY))
2899 goto no_immediate_data;
2900 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2901 goto send_last_inv;
2902 fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
2903 case OP(SEND_LAST_WITH_IMMEDIATE):
2904send_last_imm:
2905 wc.ex.imm_data = ohdr->u.imm_data;
2906 wc.wc_flags = IB_WC_WITH_IMM;
2907 goto send_last;
2908 case OP(SEND_LAST_WITH_INVALIDATE):
2909send_last_inv:
2910 rkey = be32_to_cpu(ohdr->u.ieth);
2911 if (rvt_invalidate_rkey(qp, rkey))
2912 goto no_immediate_data;
2913 wc.ex.invalidate_rkey = rkey;
2914 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2915 goto send_last;
2916 case OP(RDMA_WRITE_LAST):
2917 copy_last = rvt_is_user_qp(qp);
2918 fallthrough;
2919 case OP(SEND_LAST):
2920no_immediate_data:
2921 wc.wc_flags = 0;
2922 wc.ex.imm_data = 0;
2923send_last:
2924 /* Check for invalid length. */
2925 /* LAST len should be >= 1 */
2926 if (unlikely(tlen < (hdrsize + extra_bytes)))
2927 goto nack_inv;
2928 /* Don't count the CRC(and padding and LT byte for 16B). */
2929 tlen -= (hdrsize + extra_bytes);
2930 wc.byte_len = tlen + qp->r_rcv_len;
2931 if (unlikely(wc.byte_len > qp->r_len))
2932 goto nack_inv;
2933 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
2934 rvt_put_ss(&qp->r_sge);
2935 qp->r_msn++;
2936 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2937 break;
2938 wc.wr_id = qp->r_wr_id;
2939 wc.status = IB_WC_SUCCESS;
2940 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2941 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2942 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2943 else
2944 wc.opcode = IB_WC_RECV;
2945 wc.qp = &qp->ibqp;
2946 wc.src_qp = qp->remote_qpn;
2947 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
2948 /*
2949 * It seems that IB mandates the presence of an SL in a
2950 * work completion only for the UD transport (see section
2951 * 11.4.2 of IBTA Vol. 1).
2952 *
2953 * However, the way the SL is chosen below is consistent
2954 * with the way that IB/qib works and is trying avoid
2955 * introducing incompatibilities.
2956 *
2957 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2958 */
2959 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
2960 /* zero fields that are N/A */
2961 wc.vendor_err = 0;
2962 wc.pkey_index = 0;
2963 wc.dlid_path_bits = 0;
2964 wc.port_num = 0;
2965 /* Signal completion event if the solicited bit is set. */
2966 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
2967 break;
2968
2969 case OP(RDMA_WRITE_ONLY):
2970 copy_last = rvt_is_user_qp(qp);
2971 fallthrough;
2972 case OP(RDMA_WRITE_FIRST):
2973 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2974 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2975 goto nack_inv;
2976 /* consume RWQE */
2977 reth = &ohdr->u.rc.reth;
2978 qp->r_len = be32_to_cpu(reth->length);
2979 qp->r_rcv_len = 0;
2980 qp->r_sge.sg_list = NULL;
2981 if (qp->r_len != 0) {
2982 u32 rkey = be32_to_cpu(reth->rkey);
2983 u64 vaddr = get_ib_reth_vaddr(reth);
2984 int ok;
2985
2986 /* Check rkey & NAK */
2987 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2988 rkey, IB_ACCESS_REMOTE_WRITE);
2989 if (unlikely(!ok))
2990 goto nack_acc;
2991 qp->r_sge.num_sge = 1;
2992 } else {
2993 qp->r_sge.num_sge = 0;
2994 qp->r_sge.sge.mr = NULL;
2995 qp->r_sge.sge.vaddr = NULL;
2996 qp->r_sge.sge.length = 0;
2997 qp->r_sge.sge.sge_length = 0;
2998 }
2999 if (opcode == OP(RDMA_WRITE_FIRST))
3000 goto send_middle;
3001 else if (opcode == OP(RDMA_WRITE_ONLY))
3002 goto no_immediate_data;
3003 ret = rvt_get_rwqe(qp, true);
3004 if (ret < 0)
3005 goto nack_op_err;
3006 if (!ret) {
3007 /* peer will send again */
3008 rvt_put_ss(&qp->r_sge);
3009 goto rnr_nak;
3010 }
3011 wc.ex.imm_data = ohdr->u.rc.imm_data;
3012 wc.wc_flags = IB_WC_WITH_IMM;
3013 goto send_last;
3014
3015 case OP(RDMA_READ_REQUEST): {
3016 struct rvt_ack_entry *e;
3017 u32 len;
3018 u8 next;
3019
3020 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3021 goto nack_inv;
3022 next = qp->r_head_ack_queue + 1;
3023 /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
3024 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3025 next = 0;
3026 spin_lock_irqsave(&qp->s_lock, flags);
3027 if (unlikely(next == qp->s_acked_ack_queue)) {
3028 if (!qp->s_ack_queue[next].sent)
3029 goto nack_inv_unlck;
3030 update_ack_queue(qp, next);
3031 }
3032 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3033 release_rdma_sge_mr(e);
3034 reth = &ohdr->u.rc.reth;
3035 len = be32_to_cpu(reth->length);
3036 if (len) {
3037 u32 rkey = be32_to_cpu(reth->rkey);
3038 u64 vaddr = get_ib_reth_vaddr(reth);
3039 int ok;
3040
3041 /* Check rkey & NAK */
3042 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
3043 rkey, IB_ACCESS_REMOTE_READ);
3044 if (unlikely(!ok))
3045 goto nack_acc_unlck;
3046 /*
3047 * Update the next expected PSN. We add 1 later
3048 * below, so only add the remainder here.
3049 */
3050 qp->r_psn += rvt_div_mtu(qp, len - 1);
3051 } else {
3052 e->rdma_sge.mr = NULL;
3053 e->rdma_sge.vaddr = NULL;
3054 e->rdma_sge.length = 0;
3055 e->rdma_sge.sge_length = 0;
3056 }
3057 e->opcode = opcode;
3058 e->sent = 0;
3059 e->psn = psn;
3060 e->lpsn = qp->r_psn;
3061 /*
3062 * We need to increment the MSN here instead of when we
3063 * finish sending the result since a duplicate request would
3064 * increment it more than once.
3065 */
3066 qp->r_msn++;
3067 qp->r_psn++;
3068 qp->r_state = opcode;
3069 qp->r_nak_state = 0;
3070 qp->r_head_ack_queue = next;
3071 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3072
3073 /* Schedule the send engine. */
3074 qp->s_flags |= RVT_S_RESP_PENDING;
3075 if (fecn)
3076 qp->s_flags |= RVT_S_ECN;
3077 hfi1_schedule_send(qp);
3078
3079 spin_unlock_irqrestore(&qp->s_lock, flags);
3080 return;
3081 }
3082
3083 case OP(COMPARE_SWAP):
3084 case OP(FETCH_ADD): {
3085 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
3086 u64 vaddr = get_ib_ateth_vaddr(ateth);
3087 bool opfn = opcode == OP(COMPARE_SWAP) &&
3088 vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
3089 struct rvt_ack_entry *e;
3090 atomic64_t *maddr;
3091 u64 sdata;
3092 u32 rkey;
3093 u8 next;
3094
3095 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
3096 !opfn))
3097 goto nack_inv;
3098 next = qp->r_head_ack_queue + 1;
3099 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3100 next = 0;
3101 spin_lock_irqsave(&qp->s_lock, flags);
3102 if (unlikely(next == qp->s_acked_ack_queue)) {
3103 if (!qp->s_ack_queue[next].sent)
3104 goto nack_inv_unlck;
3105 update_ack_queue(qp, next);
3106 }
3107 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3108 release_rdma_sge_mr(e);
3109 /* Process OPFN special virtual address */
3110 if (opfn) {
3111 opfn_conn_response(qp, e, ateth);
3112 goto ack;
3113 }
3114 if (unlikely(vaddr & (sizeof(u64) - 1)))
3115 goto nack_inv_unlck;
3116 rkey = be32_to_cpu(ateth->rkey);
3117 /* Check rkey & NAK */
3118 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3119 vaddr, rkey,
3120 IB_ACCESS_REMOTE_ATOMIC)))
3121 goto nack_acc_unlck;
3122 /* Perform atomic OP and save result. */
3123 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3124 sdata = get_ib_ateth_swap(ateth);
3125 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
3126 (u64)atomic64_add_return(sdata, maddr) - sdata :
3127 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3128 get_ib_ateth_compare(ateth),
3129 sdata);
3130 rvt_put_mr(qp->r_sge.sge.mr);
3131 qp->r_sge.num_sge = 0;
3132ack:
3133 e->opcode = opcode;
3134 e->sent = 0;
3135 e->psn = psn;
3136 e->lpsn = psn;
3137 qp->r_msn++;
3138 qp->r_psn++;
3139 qp->r_state = opcode;
3140 qp->r_nak_state = 0;
3141 qp->r_head_ack_queue = next;
3142 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3143
3144 /* Schedule the send engine. */
3145 qp->s_flags |= RVT_S_RESP_PENDING;
3146 if (fecn)
3147 qp->s_flags |= RVT_S_ECN;
3148 hfi1_schedule_send(qp);
3149
3150 spin_unlock_irqrestore(&qp->s_lock, flags);
3151 return;
3152 }
3153
3154 default:
3155 /* NAK unknown opcodes. */
3156 goto nack_inv;
3157 }
3158 qp->r_psn++;
3159 qp->r_state = opcode;
3160 qp->r_ack_psn = psn;
3161 qp->r_nak_state = 0;
3162 /* Send an ACK if requested or required. */
3163 if (psn & IB_BTH_REQ_ACK || fecn) {
3164 if (packet->numpkt == 0 || fecn ||
3165 qp->r_adefered >= HFI1_PSN_CREDIT) {
3166 rc_cancel_ack(qp);
3167 goto send_ack;
3168 }
3169 qp->r_adefered++;
3170 rc_defered_ack(rcd, qp);
3171 }
3172 return;
3173
3174rnr_nak:
3175 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
3176 qp->r_ack_psn = qp->r_psn;
3177 /* Queue RNR NAK for later */
3178 rc_defered_ack(rcd, qp);
3179 return;
3180
3181nack_op_err:
3182 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3183 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
3184 qp->r_ack_psn = qp->r_psn;
3185 /* Queue NAK for later */
3186 rc_defered_ack(rcd, qp);
3187 return;
3188
3189nack_inv_unlck:
3190 spin_unlock_irqrestore(&qp->s_lock, flags);
3191nack_inv:
3192 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3193 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3194 qp->r_ack_psn = qp->r_psn;
3195 /* Queue NAK for later */
3196 rc_defered_ack(rcd, qp);
3197 return;
3198
3199nack_acc_unlck:
3200 spin_unlock_irqrestore(&qp->s_lock, flags);
3201nack_acc:
3202 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3203 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3204 qp->r_ack_psn = qp->r_psn;
3205send_ack:
3206 hfi1_send_rc_ack(packet, fecn);
3207}
3208
3209void hfi1_rc_hdrerr(
3210 struct hfi1_ctxtdata *rcd,
3211 struct hfi1_packet *packet,
3212 struct rvt_qp *qp)
3213{
3214 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
3215 int diff;
3216 u32 opcode;
3217 u32 psn;
3218
3219 if (hfi1_ruc_check_hdr(ibp, packet))
3220 return;
3221
3222 psn = ib_bth_get_psn(packet->ohdr);
3223 opcode = ib_bth_get_opcode(packet->ohdr);
3224
3225 /* Only deal with RDMA Writes for now */
3226 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
3227 diff = delta_psn(psn, qp->r_psn);
3228 if (!qp->r_nak_state && diff >= 0) {
3229 ibp->rvp.n_rc_seqnak++;
3230 qp->r_nak_state = IB_NAK_PSN_ERROR;
3231 /* Use the expected PSN. */
3232 qp->r_ack_psn = qp->r_psn;
3233 /*
3234 * Wait to send the sequence
3235 * NAK until all packets
3236 * in the receive queue have
3237 * been processed.
3238 * Otherwise, we end up
3239 * propagating congestion.
3240 */
3241 rc_defered_ack(rcd, qp);
3242 } /* Out of sequence NAK */
3243 } /* QP Request NAKs */
3244}
1/*
2 * Copyright(c) 2015 - 2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/io.h>
49#include <rdma/rdma_vt.h>
50#include <rdma/rdmavt_qp.h>
51
52#include "hfi.h"
53#include "qp.h"
54#include "rc.h"
55#include "verbs_txreq.h"
56#include "trace.h"
57
58struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
59 u8 *prev_ack, bool *scheduled)
60 __must_hold(&qp->s_lock)
61{
62 struct rvt_ack_entry *e = NULL;
63 u8 i, p;
64 bool s = true;
65
66 for (i = qp->r_head_ack_queue; ; i = p) {
67 if (i == qp->s_tail_ack_queue)
68 s = false;
69 if (i)
70 p = i - 1;
71 else
72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
73 if (p == qp->r_head_ack_queue) {
74 e = NULL;
75 break;
76 }
77 e = &qp->s_ack_queue[p];
78 if (!e->opcode) {
79 e = NULL;
80 break;
81 }
82 if (cmp_psn(psn, e->psn) >= 0) {
83 if (p == qp->s_tail_ack_queue &&
84 cmp_psn(psn, e->lpsn) <= 0)
85 s = false;
86 break;
87 }
88 }
89 if (prev)
90 *prev = p;
91 if (prev_ack)
92 *prev_ack = i;
93 if (scheduled)
94 *scheduled = s;
95 return e;
96}
97
98/**
99 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
100 * @dev: the device for this QP
101 * @qp: a pointer to the QP
102 * @ohdr: a pointer to the IB header being constructed
103 * @ps: the xmit packet state
104 *
105 * Return 1 if constructed; otherwise, return 0.
106 * Note that we are in the responder's side of the QP context.
107 * Note the QP s_lock must be held.
108 */
109static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
110 struct ib_other_headers *ohdr,
111 struct hfi1_pkt_state *ps)
112{
113 struct rvt_ack_entry *e;
114 u32 hwords, hdrlen;
115 u32 len = 0;
116 u32 bth0 = 0, bth2 = 0;
117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
118 int middle = 0;
119 u32 pmtu = qp->pmtu;
120 struct hfi1_qp_priv *qpriv = qp->priv;
121 bool last_pkt;
122 u32 delta;
123 u8 next = qp->s_tail_ack_queue;
124 struct tid_rdma_request *req;
125
126 trace_hfi1_rsp_make_rc_ack(qp, 0);
127 lockdep_assert_held(&qp->s_lock);
128 /* Don't send an ACK if we aren't supposed to. */
129 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
130 goto bail;
131
132 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
133 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
134 hwords = 5;
135 else
136 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
137 hwords = 7;
138
139 switch (qp->s_ack_state) {
140 case OP(RDMA_READ_RESPONSE_LAST):
141 case OP(RDMA_READ_RESPONSE_ONLY):
142 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
143 release_rdma_sge_mr(e);
144 fallthrough;
145 case OP(ATOMIC_ACKNOWLEDGE):
146 /*
147 * We can increment the tail pointer now that the last
148 * response has been sent instead of only being
149 * constructed.
150 */
151 if (++next > rvt_size_atomic(&dev->rdi))
152 next = 0;
153 /*
154 * Only advance the s_acked_ack_queue pointer if there
155 * have been no TID RDMA requests.
156 */
157 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
158 if (e->opcode != TID_OP(WRITE_REQ) &&
159 qp->s_acked_ack_queue == qp->s_tail_ack_queue)
160 qp->s_acked_ack_queue = next;
161 qp->s_tail_ack_queue = next;
162 trace_hfi1_rsp_make_rc_ack(qp, e->psn);
163 fallthrough;
164 case OP(SEND_ONLY):
165 case OP(ACKNOWLEDGE):
166 /* Check for no next entry in the queue. */
167 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
168 if (qp->s_flags & RVT_S_ACK_PENDING)
169 goto normal;
170 goto bail;
171 }
172
173 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
174 /* Check for tid write fence */
175 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
176 hfi1_tid_rdma_ack_interlock(qp, e)) {
177 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
178 goto bail;
179 }
180 if (e->opcode == OP(RDMA_READ_REQUEST)) {
181 /*
182 * If a RDMA read response is being resent and
183 * we haven't seen the duplicate request yet,
184 * then stop sending the remaining responses the
185 * responder has seen until the requester re-sends it.
186 */
187 len = e->rdma_sge.sge_length;
188 if (len && !e->rdma_sge.mr) {
189 if (qp->s_acked_ack_queue ==
190 qp->s_tail_ack_queue)
191 qp->s_acked_ack_queue =
192 qp->r_head_ack_queue;
193 qp->s_tail_ack_queue = qp->r_head_ack_queue;
194 goto bail;
195 }
196 /* Copy SGE state in case we need to resend */
197 ps->s_txreq->mr = e->rdma_sge.mr;
198 if (ps->s_txreq->mr)
199 rvt_get_mr(ps->s_txreq->mr);
200 qp->s_ack_rdma_sge.sge = e->rdma_sge;
201 qp->s_ack_rdma_sge.num_sge = 1;
202 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
203 if (len > pmtu) {
204 len = pmtu;
205 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
206 } else {
207 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
208 e->sent = 1;
209 }
210 ohdr->u.aeth = rvt_compute_aeth(qp);
211 hwords++;
212 qp->s_ack_rdma_psn = e->psn;
213 bth2 = mask_psn(qp->s_ack_rdma_psn++);
214 } else if (e->opcode == TID_OP(WRITE_REQ)) {
215 /*
216 * If a TID RDMA WRITE RESP is being resent, we have to
217 * wait for the actual request. All requests that are to
218 * be resent will have their state set to
219 * TID_REQUEST_RESEND. When the new request arrives, the
220 * state will be changed to TID_REQUEST_RESEND_ACTIVE.
221 */
222 req = ack_to_tid_req(e);
223 if (req->state == TID_REQUEST_RESEND ||
224 req->state == TID_REQUEST_INIT_RESEND)
225 goto bail;
226 qp->s_ack_state = TID_OP(WRITE_RESP);
227 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
228 goto write_resp;
229 } else if (e->opcode == TID_OP(READ_REQ)) {
230 /*
231 * If a TID RDMA read response is being resent and
232 * we haven't seen the duplicate request yet,
233 * then stop sending the remaining responses the
234 * responder has seen until the requester re-sends it.
235 */
236 len = e->rdma_sge.sge_length;
237 if (len && !e->rdma_sge.mr) {
238 if (qp->s_acked_ack_queue ==
239 qp->s_tail_ack_queue)
240 qp->s_acked_ack_queue =
241 qp->r_head_ack_queue;
242 qp->s_tail_ack_queue = qp->r_head_ack_queue;
243 goto bail;
244 }
245 /* Copy SGE state in case we need to resend */
246 ps->s_txreq->mr = e->rdma_sge.mr;
247 if (ps->s_txreq->mr)
248 rvt_get_mr(ps->s_txreq->mr);
249 qp->s_ack_rdma_sge.sge = e->rdma_sge;
250 qp->s_ack_rdma_sge.num_sge = 1;
251 qp->s_ack_state = TID_OP(READ_RESP);
252 goto read_resp;
253 } else {
254 /* COMPARE_SWAP or FETCH_ADD */
255 ps->s_txreq->ss = NULL;
256 len = 0;
257 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
258 ohdr->u.at.aeth = rvt_compute_aeth(qp);
259 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
260 hwords += sizeof(ohdr->u.at) / sizeof(u32);
261 bth2 = mask_psn(e->psn);
262 e->sent = 1;
263 }
264 trace_hfi1_tid_write_rsp_make_rc_ack(qp);
265 bth0 = qp->s_ack_state << 24;
266 break;
267
268 case OP(RDMA_READ_RESPONSE_FIRST):
269 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
270 fallthrough;
271 case OP(RDMA_READ_RESPONSE_MIDDLE):
272 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
273 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
274 if (ps->s_txreq->mr)
275 rvt_get_mr(ps->s_txreq->mr);
276 len = qp->s_ack_rdma_sge.sge.sge_length;
277 if (len > pmtu) {
278 len = pmtu;
279 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
280 } else {
281 ohdr->u.aeth = rvt_compute_aeth(qp);
282 hwords++;
283 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
284 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
285 e->sent = 1;
286 }
287 bth0 = qp->s_ack_state << 24;
288 bth2 = mask_psn(qp->s_ack_rdma_psn++);
289 break;
290
291 case TID_OP(WRITE_RESP):
292write_resp:
293 /*
294 * 1. Check if RVT_S_ACK_PENDING is set. If yes,
295 * goto normal.
296 * 2. Attempt to allocate TID resources.
297 * 3. Remove RVT_S_RESP_PENDING flags from s_flags
298 * 4. If resources not available:
299 * 4.1 Set RVT_S_WAIT_TID_SPACE
300 * 4.2 Queue QP on RCD TID queue
301 * 4.3 Put QP on iowait list.
302 * 4.4 Build IB RNR NAK with appropriate timeout value
303 * 4.5 Return indication progress made.
304 * 5. If resources are available:
305 * 5.1 Program HW flow CSRs
306 * 5.2 Build TID RDMA WRITE RESP packet
307 * 5.3 If more resources needed, do 2.1 - 2.3.
308 * 5.4 Wake up next QP on RCD TID queue.
309 * 5.5 Return indication progress made.
310 */
311
312 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
313 req = ack_to_tid_req(e);
314
315 /*
316 * Send scheduled RNR NAK's. RNR NAK's need to be sent at
317 * segment boundaries, not at request boundaries. Don't change
318 * s_ack_state because we are still in the middle of a request
319 */
320 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
321 qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
322 req->cur_seg == req->alloc_seg) {
323 qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
324 goto normal_no_state;
325 }
326
327 bth2 = mask_psn(qp->s_ack_rdma_psn);
328 hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
329 bth2, &len,
330 &ps->s_txreq->ss);
331 if (!hdrlen)
332 return 0;
333
334 hwords += hdrlen;
335 bth0 = qp->s_ack_state << 24;
336 qp->s_ack_rdma_psn++;
337 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
338 e->lpsn, req);
339 if (req->cur_seg != req->total_segs)
340 break;
341
342 e->sent = 1;
343 /* Do not free e->rdma_sge until all data are received */
344 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
345 break;
346
347 case TID_OP(READ_RESP):
348read_resp:
349 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
350 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
351 delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
352 &bth1, &bth2, &len,
353 &last_pkt);
354 if (delta == 0)
355 goto error_qp;
356 hwords += delta;
357 if (last_pkt) {
358 e->sent = 1;
359 /*
360 * Increment qp->s_tail_ack_queue through s_ack_state
361 * transition.
362 */
363 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
364 }
365 break;
366 case TID_OP(READ_REQ):
367 goto bail;
368
369 default:
370normal:
371 /*
372 * Send a regular ACK.
373 * Set the s_ack_state so we wait until after sending
374 * the ACK before setting s_ack_state to ACKNOWLEDGE
375 * (see above).
376 */
377 qp->s_ack_state = OP(SEND_ONLY);
378normal_no_state:
379 if (qp->s_nak_state)
380 ohdr->u.aeth =
381 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
382 (qp->s_nak_state <<
383 IB_AETH_CREDIT_SHIFT));
384 else
385 ohdr->u.aeth = rvt_compute_aeth(qp);
386 hwords++;
387 len = 0;
388 bth0 = OP(ACKNOWLEDGE) << 24;
389 bth2 = mask_psn(qp->s_ack_psn);
390 qp->s_flags &= ~RVT_S_ACK_PENDING;
391 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
392 ps->s_txreq->ss = NULL;
393 }
394 qp->s_rdma_ack_cnt++;
395 ps->s_txreq->sde = qpriv->s_sde;
396 ps->s_txreq->s_cur_size = len;
397 ps->s_txreq->hdr_dwords = hwords;
398 hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
399 return 1;
400error_qp:
401 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
402 spin_lock_irqsave(&qp->r_lock, ps->flags);
403 spin_lock(&qp->s_lock);
404 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
405 spin_unlock(&qp->s_lock);
406 spin_unlock_irqrestore(&qp->r_lock, ps->flags);
407 spin_lock_irqsave(&qp->s_lock, ps->flags);
408bail:
409 qp->s_ack_state = OP(ACKNOWLEDGE);
410 /*
411 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
412 * RVT_S_RESP_PENDING
413 */
414 smp_wmb();
415 qp->s_flags &= ~(RVT_S_RESP_PENDING
416 | RVT_S_ACK_PENDING
417 | HFI1_S_AHG_VALID);
418 return 0;
419}
420
421/**
422 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
423 * @qp: a pointer to the QP
424 *
425 * Assumes s_lock is held.
426 *
427 * Return 1 if constructed; otherwise, return 0.
428 */
429int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
430{
431 struct hfi1_qp_priv *priv = qp->priv;
432 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
433 struct ib_other_headers *ohdr;
434 struct rvt_sge_state *ss = NULL;
435 struct rvt_swqe *wqe;
436 struct hfi1_swqe_priv *wpriv;
437 struct tid_rdma_request *req = NULL;
438 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
439 u32 hwords = 5;
440 u32 len = 0;
441 u32 bth0 = 0, bth2 = 0;
442 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
443 u32 pmtu = qp->pmtu;
444 char newreq;
445 int middle = 0;
446 int delta;
447 struct tid_rdma_flow *flow = NULL;
448 struct tid_rdma_params *remote;
449
450 trace_hfi1_sender_make_rc_req(qp);
451 lockdep_assert_held(&qp->s_lock);
452 ps->s_txreq = get_txreq(ps->dev, qp);
453 if (!ps->s_txreq)
454 goto bail_no_tx;
455
456 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
457 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
458 hwords = 5;
459 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
460 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
461 else
462 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
463 } else {
464 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
465 hwords = 7;
466 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
467 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
468 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
469 else
470 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
471 }
472
473 /* Sending responses has higher priority over sending requests. */
474 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
475 make_rc_ack(dev, qp, ohdr, ps))
476 return 1;
477
478 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
479 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
480 goto bail;
481 /* We are in the error state, flush the work request. */
482 if (qp->s_last == READ_ONCE(qp->s_head))
483 goto bail;
484 /* If DMAs are in progress, we can't flush immediately. */
485 if (iowait_sdma_pending(&priv->s_iowait)) {
486 qp->s_flags |= RVT_S_WAIT_DMA;
487 goto bail;
488 }
489 clear_ahg(qp);
490 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
492 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
493 /* will get called again */
494 goto done_free_tx;
495 }
496
497 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
498 goto bail;
499
500 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
501 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
502 qp->s_flags |= RVT_S_WAIT_PSN;
503 goto bail;
504 }
505 qp->s_sending_psn = qp->s_psn;
506 qp->s_sending_hpsn = qp->s_psn - 1;
507 }
508
509 /* Send a request. */
510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
511check_s_state:
512 switch (qp->s_state) {
513 default:
514 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
515 goto bail;
516 /*
517 * Resend an old request or start a new one.
518 *
519 * We keep track of the current SWQE so that
520 * we don't reset the "furthest progress" state
521 * if we need to back up.
522 */
523 newreq = 0;
524 if (qp->s_cur == qp->s_tail) {
525 /* Check if send work queue is empty. */
526 if (qp->s_tail == READ_ONCE(qp->s_head)) {
527 clear_ahg(qp);
528 goto bail;
529 }
530 /*
531 * If a fence is requested, wait for previous
532 * RDMA read and atomic operations to finish.
533 * However, there is no need to guard against
534 * TID RDMA READ after TID RDMA READ.
535 */
536 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
537 qp->s_num_rd_atomic &&
538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
539 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
540 qp->s_flags |= RVT_S_WAIT_FENCE;
541 goto bail;
542 }
543 /*
544 * Local operations are processed immediately
545 * after all prior requests have completed
546 */
547 if (wqe->wr.opcode == IB_WR_REG_MR ||
548 wqe->wr.opcode == IB_WR_LOCAL_INV) {
549 int local_ops = 0;
550 int err = 0;
551
552 if (qp->s_last != qp->s_cur)
553 goto bail;
554 if (++qp->s_cur == qp->s_size)
555 qp->s_cur = 0;
556 if (++qp->s_tail == qp->s_size)
557 qp->s_tail = 0;
558 if (!(wqe->wr.send_flags &
559 RVT_SEND_COMPLETION_ONLY)) {
560 err = rvt_invalidate_rkey(
561 qp,
562 wqe->wr.ex.invalidate_rkey);
563 local_ops = 1;
564 }
565 rvt_send_complete(qp, wqe,
566 err ? IB_WC_LOC_PROT_ERR
567 : IB_WC_SUCCESS);
568 if (local_ops)
569 atomic_dec(&qp->local_ops_pending);
570 goto done_free_tx;
571 }
572
573 newreq = 1;
574 qp->s_psn = wqe->psn;
575 }
576 /*
577 * Note that we have to be careful not to modify the
578 * original work request since we may need to resend
579 * it.
580 */
581 len = wqe->length;
582 ss = &qp->s_sge;
583 bth2 = mask_psn(qp->s_psn);
584
585 /*
586 * Interlock between various IB requests and TID RDMA
587 * if necessary.
588 */
589 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
590 hfi1_tid_rdma_wqe_interlock(qp, wqe))
591 goto bail;
592
593 switch (wqe->wr.opcode) {
594 case IB_WR_SEND:
595 case IB_WR_SEND_WITH_IMM:
596 case IB_WR_SEND_WITH_INV:
597 /* If no credit, return. */
598 if (!rvt_rc_credit_avail(qp, wqe))
599 goto bail;
600 if (len > pmtu) {
601 qp->s_state = OP(SEND_FIRST);
602 len = pmtu;
603 break;
604 }
605 if (wqe->wr.opcode == IB_WR_SEND) {
606 qp->s_state = OP(SEND_ONLY);
607 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
608 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
609 /* Immediate data comes after the BTH */
610 ohdr->u.imm_data = wqe->wr.ex.imm_data;
611 hwords += 1;
612 } else {
613 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
614 /* Invalidate rkey comes after the BTH */
615 ohdr->u.ieth = cpu_to_be32(
616 wqe->wr.ex.invalidate_rkey);
617 hwords += 1;
618 }
619 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
620 bth0 |= IB_BTH_SOLICITED;
621 bth2 |= IB_BTH_REQ_ACK;
622 if (++qp->s_cur == qp->s_size)
623 qp->s_cur = 0;
624 break;
625
626 case IB_WR_RDMA_WRITE:
627 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
628 qp->s_lsn++;
629 goto no_flow_control;
630 case IB_WR_RDMA_WRITE_WITH_IMM:
631 /* If no credit, return. */
632 if (!rvt_rc_credit_avail(qp, wqe))
633 goto bail;
634no_flow_control:
635 put_ib_reth_vaddr(
636 wqe->rdma_wr.remote_addr,
637 &ohdr->u.rc.reth);
638 ohdr->u.rc.reth.rkey =
639 cpu_to_be32(wqe->rdma_wr.rkey);
640 ohdr->u.rc.reth.length = cpu_to_be32(len);
641 hwords += sizeof(struct ib_reth) / sizeof(u32);
642 if (len > pmtu) {
643 qp->s_state = OP(RDMA_WRITE_FIRST);
644 len = pmtu;
645 break;
646 }
647 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
648 qp->s_state = OP(RDMA_WRITE_ONLY);
649 } else {
650 qp->s_state =
651 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
652 /* Immediate data comes after RETH */
653 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
654 hwords += 1;
655 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
656 bth0 |= IB_BTH_SOLICITED;
657 }
658 bth2 |= IB_BTH_REQ_ACK;
659 if (++qp->s_cur == qp->s_size)
660 qp->s_cur = 0;
661 break;
662
663 case IB_WR_TID_RDMA_WRITE:
664 if (newreq) {
665 /*
666 * Limit the number of TID RDMA WRITE requests.
667 */
668 if (atomic_read(&priv->n_tid_requests) >=
669 HFI1_TID_RDMA_WRITE_CNT)
670 goto bail;
671
672 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
673 qp->s_lsn++;
674 }
675
676 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
677 &bth1, &bth2,
678 &len);
679 ss = NULL;
680 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
681 priv->s_tid_cur = qp->s_cur;
682 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
683 priv->s_tid_tail = qp->s_cur;
684 priv->s_state = TID_OP(WRITE_RESP);
685 }
686 } else if (priv->s_tid_cur == priv->s_tid_head) {
687 struct rvt_swqe *__w;
688 struct tid_rdma_request *__r;
689
690 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
691 __r = wqe_to_tid_req(__w);
692
693 /*
694 * The s_tid_cur pointer is advanced to s_cur if
695 * any of the following conditions about the WQE
696 * to which s_ti_cur currently points to are
697 * satisfied:
698 * 1. The request is not a TID RDMA WRITE
699 * request,
700 * 2. The request is in the INACTIVE or
701 * COMPLETE states (TID RDMA READ requests
702 * stay at INACTIVE and TID RDMA WRITE
703 * transition to COMPLETE when done),
704 * 3. The request is in the ACTIVE or SYNC
705 * state and the number of completed
706 * segments is equal to the total segment
707 * count.
708 * (If ACTIVE, the request is waiting for
709 * ACKs. If SYNC, the request has not
710 * received any responses because it's
711 * waiting on a sync point.)
712 */
713 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
714 __r->state == TID_REQUEST_INACTIVE ||
715 __r->state == TID_REQUEST_COMPLETE ||
716 ((__r->state == TID_REQUEST_ACTIVE ||
717 __r->state == TID_REQUEST_SYNC) &&
718 __r->comp_seg == __r->total_segs)) {
719 if (priv->s_tid_tail ==
720 priv->s_tid_cur &&
721 priv->s_state ==
722 TID_OP(WRITE_DATA_LAST)) {
723 priv->s_tid_tail = qp->s_cur;
724 priv->s_state =
725 TID_OP(WRITE_RESP);
726 }
727 priv->s_tid_cur = qp->s_cur;
728 }
729 /*
730 * A corner case: when the last TID RDMA WRITE
731 * request was completed, s_tid_head,
732 * s_tid_cur, and s_tid_tail all point to the
733 * same location. Other requests are posted and
734 * s_cur wraps around to the same location,
735 * where a new TID RDMA WRITE is posted. In
736 * this case, none of the indices need to be
737 * updated. However, the priv->s_state should.
738 */
739 if (priv->s_tid_tail == qp->s_cur &&
740 priv->s_state == TID_OP(WRITE_DATA_LAST))
741 priv->s_state = TID_OP(WRITE_RESP);
742 }
743 req = wqe_to_tid_req(wqe);
744 if (newreq) {
745 priv->s_tid_head = qp->s_cur;
746 priv->pending_tid_w_resp += req->total_segs;
747 atomic_inc(&priv->n_tid_requests);
748 atomic_dec(&priv->n_requests);
749 } else {
750 req->state = TID_REQUEST_RESEND;
751 req->comp_seg = delta_psn(bth2, wqe->psn);
752 /*
753 * Pull back any segments since we are going
754 * to re-receive them.
755 */
756 req->setup_head = req->clear_tail;
757 priv->pending_tid_w_resp +=
758 delta_psn(wqe->lpsn, bth2) + 1;
759 }
760
761 trace_hfi1_tid_write_sender_make_req(qp, newreq);
762 trace_hfi1_tid_req_make_req_write(qp, newreq,
763 wqe->wr.opcode,
764 wqe->psn, wqe->lpsn,
765 req);
766 if (++qp->s_cur == qp->s_size)
767 qp->s_cur = 0;
768 break;
769
770 case IB_WR_RDMA_READ:
771 /*
772 * Don't allow more operations to be started
773 * than the QP limits allow.
774 */
775 if (qp->s_num_rd_atomic >=
776 qp->s_max_rd_atomic) {
777 qp->s_flags |= RVT_S_WAIT_RDMAR;
778 goto bail;
779 }
780 qp->s_num_rd_atomic++;
781 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
782 qp->s_lsn++;
783 put_ib_reth_vaddr(
784 wqe->rdma_wr.remote_addr,
785 &ohdr->u.rc.reth);
786 ohdr->u.rc.reth.rkey =
787 cpu_to_be32(wqe->rdma_wr.rkey);
788 ohdr->u.rc.reth.length = cpu_to_be32(len);
789 qp->s_state = OP(RDMA_READ_REQUEST);
790 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
791 ss = NULL;
792 len = 0;
793 bth2 |= IB_BTH_REQ_ACK;
794 if (++qp->s_cur == qp->s_size)
795 qp->s_cur = 0;
796 break;
797
798 case IB_WR_TID_RDMA_READ:
799 trace_hfi1_tid_read_sender_make_req(qp, newreq);
800 wpriv = wqe->priv;
801 req = wqe_to_tid_req(wqe);
802 trace_hfi1_tid_req_make_req_read(qp, newreq,
803 wqe->wr.opcode,
804 wqe->psn, wqe->lpsn,
805 req);
806 delta = cmp_psn(qp->s_psn, wqe->psn);
807
808 /*
809 * Don't allow more operations to be started
810 * than the QP limits allow. We could get here under
811 * three conditions; (1) It's a new request; (2) We are
812 * sending the second or later segment of a request,
813 * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
814 * when the last segment of a previous request is
815 * received just before this; (3) We are re-sending a
816 * request.
817 */
818 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
819 qp->s_flags |= RVT_S_WAIT_RDMAR;
820 goto bail;
821 }
822 if (newreq) {
823 struct tid_rdma_flow *flow =
824 &req->flows[req->setup_head];
825
826 /*
827 * Set up s_sge as it is needed for TID
828 * allocation. However, if the pages have been
829 * walked and mapped, skip it. An earlier try
830 * has failed to allocate the TID entries.
831 */
832 if (!flow->npagesets) {
833 qp->s_sge.sge = wqe->sg_list[0];
834 qp->s_sge.sg_list = wqe->sg_list + 1;
835 qp->s_sge.num_sge = wqe->wr.num_sge;
836 qp->s_sge.total_len = wqe->length;
837 qp->s_len = wqe->length;
838 req->isge = 0;
839 req->clear_tail = req->setup_head;
840 req->flow_idx = req->setup_head;
841 req->state = TID_REQUEST_ACTIVE;
842 }
843 } else if (delta == 0) {
844 /* Re-send a request */
845 req->cur_seg = 0;
846 req->comp_seg = 0;
847 req->ack_pending = 0;
848 req->flow_idx = req->clear_tail;
849 req->state = TID_REQUEST_RESEND;
850 }
851 req->s_next_psn = qp->s_psn;
852 /* Read one segment at a time */
853 len = min_t(u32, req->seg_len,
854 wqe->length - req->seg_len * req->cur_seg);
855 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
856 &bth1, &bth2,
857 &len);
858 if (delta <= 0) {
859 /* Wait for TID space */
860 goto bail;
861 }
862 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
863 qp->s_lsn++;
864 hwords += delta;
865 ss = &wpriv->ss;
866 /* Check if this is the last segment */
867 if (req->cur_seg >= req->total_segs &&
868 ++qp->s_cur == qp->s_size)
869 qp->s_cur = 0;
870 break;
871
872 case IB_WR_ATOMIC_CMP_AND_SWP:
873 case IB_WR_ATOMIC_FETCH_AND_ADD:
874 /*
875 * Don't allow more operations to be started
876 * than the QP limits allow.
877 */
878 if (qp->s_num_rd_atomic >=
879 qp->s_max_rd_atomic) {
880 qp->s_flags |= RVT_S_WAIT_RDMAR;
881 goto bail;
882 }
883 qp->s_num_rd_atomic++;
884 fallthrough;
885 case IB_WR_OPFN:
886 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
887 qp->s_lsn++;
888 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
889 wqe->wr.opcode == IB_WR_OPFN) {
890 qp->s_state = OP(COMPARE_SWAP);
891 put_ib_ateth_swap(wqe->atomic_wr.swap,
892 &ohdr->u.atomic_eth);
893 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
894 &ohdr->u.atomic_eth);
895 } else {
896 qp->s_state = OP(FETCH_ADD);
897 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
898 &ohdr->u.atomic_eth);
899 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
900 }
901 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
902 &ohdr->u.atomic_eth);
903 ohdr->u.atomic_eth.rkey = cpu_to_be32(
904 wqe->atomic_wr.rkey);
905 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
906 ss = NULL;
907 len = 0;
908 bth2 |= IB_BTH_REQ_ACK;
909 if (++qp->s_cur == qp->s_size)
910 qp->s_cur = 0;
911 break;
912
913 default:
914 goto bail;
915 }
916 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
917 qp->s_sge.sge = wqe->sg_list[0];
918 qp->s_sge.sg_list = wqe->sg_list + 1;
919 qp->s_sge.num_sge = wqe->wr.num_sge;
920 qp->s_sge.total_len = wqe->length;
921 qp->s_len = wqe->length;
922 }
923 if (newreq) {
924 qp->s_tail++;
925 if (qp->s_tail >= qp->s_size)
926 qp->s_tail = 0;
927 }
928 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
929 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
930 qp->s_psn = wqe->lpsn + 1;
931 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
932 qp->s_psn = req->s_next_psn;
933 else
934 qp->s_psn++;
935 break;
936
937 case OP(RDMA_READ_RESPONSE_FIRST):
938 /*
939 * qp->s_state is normally set to the opcode of the
940 * last packet constructed for new requests and therefore
941 * is never set to RDMA read response.
942 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
943 * thread to indicate a SEND needs to be restarted from an
944 * earlier PSN without interfering with the sending thread.
945 * See restart_rc().
946 */
947 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
948 fallthrough;
949 case OP(SEND_FIRST):
950 qp->s_state = OP(SEND_MIDDLE);
951 fallthrough;
952 case OP(SEND_MIDDLE):
953 bth2 = mask_psn(qp->s_psn++);
954 ss = &qp->s_sge;
955 len = qp->s_len;
956 if (len > pmtu) {
957 len = pmtu;
958 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
959 break;
960 }
961 if (wqe->wr.opcode == IB_WR_SEND) {
962 qp->s_state = OP(SEND_LAST);
963 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
964 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
965 /* Immediate data comes after the BTH */
966 ohdr->u.imm_data = wqe->wr.ex.imm_data;
967 hwords += 1;
968 } else {
969 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
970 /* invalidate data comes after the BTH */
971 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
972 hwords += 1;
973 }
974 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
975 bth0 |= IB_BTH_SOLICITED;
976 bth2 |= IB_BTH_REQ_ACK;
977 qp->s_cur++;
978 if (qp->s_cur >= qp->s_size)
979 qp->s_cur = 0;
980 break;
981
982 case OP(RDMA_READ_RESPONSE_LAST):
983 /*
984 * qp->s_state is normally set to the opcode of the
985 * last packet constructed for new requests and therefore
986 * is never set to RDMA read response.
987 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
988 * thread to indicate a RDMA write needs to be restarted from
989 * an earlier PSN without interfering with the sending thread.
990 * See restart_rc().
991 */
992 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
993 fallthrough;
994 case OP(RDMA_WRITE_FIRST):
995 qp->s_state = OP(RDMA_WRITE_MIDDLE);
996 fallthrough;
997 case OP(RDMA_WRITE_MIDDLE):
998 bth2 = mask_psn(qp->s_psn++);
999 ss = &qp->s_sge;
1000 len = qp->s_len;
1001 if (len > pmtu) {
1002 len = pmtu;
1003 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
1004 break;
1005 }
1006 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
1007 qp->s_state = OP(RDMA_WRITE_LAST);
1008 } else {
1009 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
1010 /* Immediate data comes after the BTH */
1011 ohdr->u.imm_data = wqe->wr.ex.imm_data;
1012 hwords += 1;
1013 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
1014 bth0 |= IB_BTH_SOLICITED;
1015 }
1016 bth2 |= IB_BTH_REQ_ACK;
1017 qp->s_cur++;
1018 if (qp->s_cur >= qp->s_size)
1019 qp->s_cur = 0;
1020 break;
1021
1022 case OP(RDMA_READ_RESPONSE_MIDDLE):
1023 /*
1024 * qp->s_state is normally set to the opcode of the
1025 * last packet constructed for new requests and therefore
1026 * is never set to RDMA read response.
1027 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
1028 * thread to indicate a RDMA read needs to be restarted from
1029 * an earlier PSN without interfering with the sending thread.
1030 * See restart_rc().
1031 */
1032 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
1033 put_ib_reth_vaddr(
1034 wqe->rdma_wr.remote_addr + len,
1035 &ohdr->u.rc.reth);
1036 ohdr->u.rc.reth.rkey =
1037 cpu_to_be32(wqe->rdma_wr.rkey);
1038 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
1039 qp->s_state = OP(RDMA_READ_REQUEST);
1040 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
1041 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
1042 qp->s_psn = wqe->lpsn + 1;
1043 ss = NULL;
1044 len = 0;
1045 qp->s_cur++;
1046 if (qp->s_cur == qp->s_size)
1047 qp->s_cur = 0;
1048 break;
1049
1050 case TID_OP(WRITE_RESP):
1051 /*
1052 * This value for s_state is used for restarting a TID RDMA
1053 * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
1054 * for more).
1055 */
1056 req = wqe_to_tid_req(wqe);
1057 req->state = TID_REQUEST_RESEND;
1058 rcu_read_lock();
1059 remote = rcu_dereference(priv->tid_rdma.remote);
1060 req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
1061 len = wqe->length - (req->comp_seg * remote->max_len);
1062 rcu_read_unlock();
1063
1064 bth2 = mask_psn(qp->s_psn);
1065 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
1066 &bth2, &len);
1067 qp->s_psn = wqe->lpsn + 1;
1068 ss = NULL;
1069 qp->s_state = TID_OP(WRITE_REQ);
1070 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
1071 priv->s_tid_cur = qp->s_cur;
1072 if (++qp->s_cur == qp->s_size)
1073 qp->s_cur = 0;
1074 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
1075 wqe->psn, wqe->lpsn, req);
1076 break;
1077
1078 case TID_OP(READ_RESP):
1079 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
1080 goto bail;
1081 /* This is used to restart a TID read request */
1082 req = wqe_to_tid_req(wqe);
1083 wpriv = wqe->priv;
1084 /*
1085 * Back down. The field qp->s_psn has been set to the psn with
1086 * which the request should be restart. It's OK to use division
1087 * as this is on the retry path.
1088 */
1089 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
1090
1091 /*
1092 * The following function need to be redefined to return the
1093 * status to make sure that we find the flow. At the same
1094 * time, we can use the req->state change to check if the
1095 * call succeeds or not.
1096 */
1097 req->state = TID_REQUEST_RESEND;
1098 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
1099 if (req->state != TID_REQUEST_ACTIVE) {
1100 /*
1101 * Failed to find the flow. Release all allocated tid
1102 * resources.
1103 */
1104 hfi1_kern_exp_rcv_clear_all(req);
1105 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1106
1107 hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
1108 goto bail;
1109 }
1110 req->state = TID_REQUEST_RESEND;
1111 len = min_t(u32, req->seg_len,
1112 wqe->length - req->seg_len * req->cur_seg);
1113 flow = &req->flows[req->flow_idx];
1114 len -= flow->sent;
1115 req->s_next_psn = flow->flow_state.ib_lpsn + 1;
1116 delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
1117 &bth2, &len);
1118 if (delta <= 0) {
1119 /* Wait for TID space */
1120 goto bail;
1121 }
1122 hwords += delta;
1123 ss = &wpriv->ss;
1124 /* Check if this is the last segment */
1125 if (req->cur_seg >= req->total_segs &&
1126 ++qp->s_cur == qp->s_size)
1127 qp->s_cur = 0;
1128 qp->s_psn = req->s_next_psn;
1129 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1130 wqe->psn, wqe->lpsn, req);
1131 break;
1132 case TID_OP(READ_REQ):
1133 req = wqe_to_tid_req(wqe);
1134 delta = cmp_psn(qp->s_psn, wqe->psn);
1135 /*
1136 * If the current WR is not TID RDMA READ, or this is the start
1137 * of a new request, we need to change the qp->s_state so that
1138 * the request can be set up properly.
1139 */
1140 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
1141 qp->s_cur == qp->s_tail) {
1142 qp->s_state = OP(RDMA_READ_REQUEST);
1143 if (delta == 0 || qp->s_cur == qp->s_tail)
1144 goto check_s_state;
1145 else
1146 goto bail;
1147 }
1148
1149 /* Rate limiting */
1150 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
1151 qp->s_flags |= RVT_S_WAIT_RDMAR;
1152 goto bail;
1153 }
1154
1155 wpriv = wqe->priv;
1156 /* Read one segment at a time */
1157 len = min_t(u32, req->seg_len,
1158 wqe->length - req->seg_len * req->cur_seg);
1159 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
1160 &bth2, &len);
1161 if (delta <= 0) {
1162 /* Wait for TID space */
1163 goto bail;
1164 }
1165 hwords += delta;
1166 ss = &wpriv->ss;
1167 /* Check if this is the last segment */
1168 if (req->cur_seg >= req->total_segs &&
1169 ++qp->s_cur == qp->s_size)
1170 qp->s_cur = 0;
1171 qp->s_psn = req->s_next_psn;
1172 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1173 wqe->psn, wqe->lpsn, req);
1174 break;
1175 }
1176 qp->s_sending_hpsn = bth2;
1177 delta = delta_psn(bth2, wqe->psn);
1178 if (delta && delta % HFI1_PSN_CREDIT == 0 &&
1179 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1180 bth2 |= IB_BTH_REQ_ACK;
1181 if (qp->s_flags & RVT_S_SEND_ONE) {
1182 qp->s_flags &= ~RVT_S_SEND_ONE;
1183 qp->s_flags |= RVT_S_WAIT_ACK;
1184 bth2 |= IB_BTH_REQ_ACK;
1185 }
1186 qp->s_len -= len;
1187 ps->s_txreq->hdr_dwords = hwords;
1188 ps->s_txreq->sde = priv->s_sde;
1189 ps->s_txreq->ss = ss;
1190 ps->s_txreq->s_cur_size = len;
1191 hfi1_make_ruc_header(
1192 qp,
1193 ohdr,
1194 bth0 | (qp->s_state << 24),
1195 bth1,
1196 bth2,
1197 middle,
1198 ps);
1199 return 1;
1200
1201done_free_tx:
1202 hfi1_put_txreq(ps->s_txreq);
1203 ps->s_txreq = NULL;
1204 return 1;
1205
1206bail:
1207 hfi1_put_txreq(ps->s_txreq);
1208
1209bail_no_tx:
1210 ps->s_txreq = NULL;
1211 qp->s_flags &= ~RVT_S_BUSY;
1212 /*
1213 * If we didn't get a txreq, the QP will be woken up later to try
1214 * again. Set the flags to indicate which work item to wake
1215 * up.
1216 */
1217 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
1218 return 0;
1219}
1220
1221static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
1222 struct ib_other_headers *ohdr,
1223 u32 bth0, u32 bth1)
1224{
1225 if (qp->r_nak_state)
1226 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
1227 (qp->r_nak_state <<
1228 IB_AETH_CREDIT_SHIFT));
1229 else
1230 ohdr->u.aeth = rvt_compute_aeth(qp);
1231
1232 ohdr->bth[0] = cpu_to_be32(bth0);
1233 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
1234 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
1235}
1236
1237static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1238{
1239 struct rvt_qp *qp = packet->qp;
1240 struct hfi1_ibport *ibp;
1241 unsigned long flags;
1242
1243 spin_lock_irqsave(&qp->s_lock, flags);
1244 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1245 goto unlock;
1246 ibp = rcd_to_iport(packet->rcd);
1247 this_cpu_inc(*ibp->rvp.rc_qacks);
1248 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
1249 qp->s_nak_state = qp->r_nak_state;
1250 qp->s_ack_psn = qp->r_ack_psn;
1251 if (is_fecn)
1252 qp->s_flags |= RVT_S_ECN;
1253
1254 /* Schedule the send tasklet. */
1255 hfi1_schedule_send(qp);
1256unlock:
1257 spin_unlock_irqrestore(&qp->s_lock, flags);
1258}
1259
1260static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
1261 struct hfi1_opa_header *opa_hdr,
1262 u8 sc5, bool is_fecn,
1263 u64 *pbc_flags, u32 *hwords,
1264 u32 *nwords)
1265{
1266 struct rvt_qp *qp = packet->qp;
1267 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1268 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1269 struct ib_header *hdr = &opa_hdr->ibh;
1270 struct ib_other_headers *ohdr;
1271 u16 lrh0 = HFI1_LRH_BTH;
1272 u16 pkey;
1273 u32 bth0, bth1;
1274
1275 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
1276 ohdr = &hdr->u.oth;
1277 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
1278 *hwords = 6;
1279
1280 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
1281 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1282 rdma_ah_read_grh(&qp->remote_ah_attr),
1283 *hwords - 2, SIZE_OF_CRC);
1284 ohdr = &hdr->u.l.oth;
1285 lrh0 = HFI1_LRH_GRH;
1286 }
1287 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1288 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
1289
1290 /* read pkey_index w/o lock (its atomic) */
1291 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1292
1293 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
1294 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
1295 IB_SL_SHIFT;
1296
1297 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
1298 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
1299 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
1300
1301 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1302 if (qp->s_mig_state == IB_MIG_MIGRATED)
1303 bth0 |= IB_BTH_MIG_REQ;
1304 bth1 = (!!is_fecn) << IB_BECN_SHIFT;
1305 /*
1306 * Inline ACKs go out without the use of the Verbs send engine, so
1307 * we need to set the STL Verbs Extended bit here
1308 */
1309 bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
1310 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1311}
1312
1313static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
1314 struct hfi1_opa_header *opa_hdr,
1315 u8 sc5, bool is_fecn,
1316 u64 *pbc_flags, u32 *hwords,
1317 u32 *nwords)
1318{
1319 struct rvt_qp *qp = packet->qp;
1320 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1321 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1322 struct hfi1_16b_header *hdr = &opa_hdr->opah;
1323 struct ib_other_headers *ohdr;
1324 u32 bth0, bth1 = 0;
1325 u16 len, pkey;
1326 bool becn = is_fecn;
1327 u8 l4 = OPA_16B_L4_IB_LOCAL;
1328 u8 extra_bytes;
1329
1330 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
1331 ohdr = &hdr->u.oth;
1332 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
1333 *hwords = 8;
1334 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
1335 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
1336
1337 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
1338 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
1339 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1340 rdma_ah_read_grh(&qp->remote_ah_attr),
1341 *hwords - 4, *nwords);
1342 ohdr = &hdr->u.l.oth;
1343 l4 = OPA_16B_L4_IB_GLOBAL;
1344 }
1345 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1346
1347 /* read pkey_index w/o lock (its atomic) */
1348 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1349
1350 /* Convert dwords to flits */
1351 len = (*hwords + *nwords) >> 1;
1352
1353 hfi1_make_16b_hdr(hdr, ppd->lid |
1354 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
1355 ((1 << ppd->lmc) - 1)),
1356 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
1357 16B), len, pkey, becn, 0, l4, sc5);
1358
1359 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1360 bth0 |= extra_bytes << 20;
1361 if (qp->s_mig_state == IB_MIG_MIGRATED)
1362 bth1 = OPA_BTH_MIG_REQ;
1363 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1364}
1365
1366typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
1367 struct hfi1_opa_header *opa_hdr,
1368 u8 sc5, bool is_fecn,
1369 u64 *pbc_flags, u32 *hwords,
1370 u32 *nwords);
1371
1372/* We support only two types - 9B and 16B for now */
1373static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
1374 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
1375 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
1376};
1377
1378/**
1379 * hfi1_send_rc_ack - Construct an ACK packet and send it
1380 * @qp: a pointer to the QP
1381 *
1382 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
1383 * Note that RDMA reads and atomics are handled in the
1384 * send side QP state and send engine.
1385 */
1386void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1387{
1388 struct hfi1_ctxtdata *rcd = packet->rcd;
1389 struct rvt_qp *qp = packet->qp;
1390 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1391 struct hfi1_qp_priv *priv = qp->priv;
1392 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1393 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
1394 u64 pbc, pbc_flags = 0;
1395 u32 hwords = 0;
1396 u32 nwords = 0;
1397 u32 plen;
1398 struct pio_buf *pbuf;
1399 struct hfi1_opa_header opa_hdr;
1400
1401 /* clear the defer count */
1402 qp->r_adefered = 0;
1403
1404 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
1405 if (qp->s_flags & RVT_S_RESP_PENDING) {
1406 hfi1_queue_rc_ack(packet, is_fecn);
1407 return;
1408 }
1409
1410 /* Ensure s_rdma_ack_cnt changes are committed */
1411 if (qp->s_rdma_ack_cnt) {
1412 hfi1_queue_rc_ack(packet, is_fecn);
1413 return;
1414 }
1415
1416 /* Don't try to send ACKs if the link isn't ACTIVE */
1417 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
1418 return;
1419
1420 /* Make the appropriate header */
1421 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
1422 &pbc_flags, &hwords, &nwords);
1423
1424 plen = 2 /* PBC */ + hwords + nwords;
1425 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
1426 sc_to_vlt(ppd->dd, sc5), plen);
1427 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
1428 if (IS_ERR_OR_NULL(pbuf)) {
1429 /*
1430 * We have no room to send at the moment. Pass
1431 * responsibility for sending the ACK to the send engine
1432 * so that when enough buffer space becomes available,
1433 * the ACK is sent ahead of other outgoing packets.
1434 */
1435 hfi1_queue_rc_ack(packet, is_fecn);
1436 return;
1437 }
1438 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1439 &opa_hdr, ib_is_sc5(sc5));
1440
1441 /* write the pbc and data */
1442 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
1443 (priv->hdr_type == HFI1_PKT_TYPE_9B ?
1444 (void *)&opa_hdr.ibh :
1445 (void *)&opa_hdr.opah), hwords);
1446 return;
1447}
1448
1449/**
1450 * update_num_rd_atomic - update the qp->s_num_rd_atomic
1451 * @qp: the QP
1452 * @psn: the packet sequence number to restart at
1453 * @wqe: the wqe
1454 *
1455 * This is called from reset_psn() to update qp->s_num_rd_atomic
1456 * for the current wqe.
1457 * Called at interrupt level with the QP s_lock held.
1458 */
1459static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
1460 struct rvt_swqe *wqe)
1461{
1462 u32 opcode = wqe->wr.opcode;
1463
1464 if (opcode == IB_WR_RDMA_READ ||
1465 opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1466 opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1467 qp->s_num_rd_atomic++;
1468 } else if (opcode == IB_WR_TID_RDMA_READ) {
1469 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1470 struct hfi1_qp_priv *priv = qp->priv;
1471
1472 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1473 u32 cur_seg;
1474
1475 cur_seg = (psn - wqe->psn) / priv->pkts_ps;
1476 req->ack_pending = cur_seg - req->comp_seg;
1477 priv->pending_tid_r_segs += req->ack_pending;
1478 qp->s_num_rd_atomic += req->ack_pending;
1479 trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
1480 wqe->wr.opcode,
1481 wqe->psn,
1482 wqe->lpsn,
1483 req);
1484 } else {
1485 priv->pending_tid_r_segs += req->total_segs;
1486 qp->s_num_rd_atomic += req->total_segs;
1487 }
1488 }
1489}
1490
1491/**
1492 * reset_psn - reset the QP state to send starting from PSN
1493 * @qp: the QP
1494 * @psn: the packet sequence number to restart at
1495 *
1496 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
1497 * for the given QP.
1498 * Called at interrupt level with the QP s_lock held.
1499 */
1500static void reset_psn(struct rvt_qp *qp, u32 psn)
1501{
1502 u32 n = qp->s_acked;
1503 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
1504 u32 opcode;
1505 struct hfi1_qp_priv *priv = qp->priv;
1506
1507 lockdep_assert_held(&qp->s_lock);
1508 qp->s_cur = n;
1509 priv->pending_tid_r_segs = 0;
1510 priv->pending_tid_w_resp = 0;
1511 qp->s_num_rd_atomic = 0;
1512
1513 /*
1514 * If we are starting the request from the beginning,
1515 * let the normal send code handle initialization.
1516 */
1517 if (cmp_psn(psn, wqe->psn) <= 0) {
1518 qp->s_state = OP(SEND_LAST);
1519 goto done;
1520 }
1521 update_num_rd_atomic(qp, psn, wqe);
1522
1523 /* Find the work request opcode corresponding to the given PSN. */
1524 for (;;) {
1525 int diff;
1526
1527 if (++n == qp->s_size)
1528 n = 0;
1529 if (n == qp->s_tail)
1530 break;
1531 wqe = rvt_get_swqe_ptr(qp, n);
1532 diff = cmp_psn(psn, wqe->psn);
1533 if (diff < 0) {
1534 /* Point wqe back to the previous one*/
1535 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1536 break;
1537 }
1538 qp->s_cur = n;
1539 /*
1540 * If we are starting the request from the beginning,
1541 * let the normal send code handle initialization.
1542 */
1543 if (diff == 0) {
1544 qp->s_state = OP(SEND_LAST);
1545 goto done;
1546 }
1547
1548 update_num_rd_atomic(qp, psn, wqe);
1549 }
1550 opcode = wqe->wr.opcode;
1551
1552 /*
1553 * Set the state to restart in the middle of a request.
1554 * Don't change the s_sge, s_cur_sge, or s_cur_size.
1555 * See hfi1_make_rc_req().
1556 */
1557 switch (opcode) {
1558 case IB_WR_SEND:
1559 case IB_WR_SEND_WITH_IMM:
1560 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1561 break;
1562
1563 case IB_WR_RDMA_WRITE:
1564 case IB_WR_RDMA_WRITE_WITH_IMM:
1565 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1566 break;
1567
1568 case IB_WR_TID_RDMA_WRITE:
1569 qp->s_state = TID_OP(WRITE_RESP);
1570 break;
1571
1572 case IB_WR_RDMA_READ:
1573 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1574 break;
1575
1576 case IB_WR_TID_RDMA_READ:
1577 qp->s_state = TID_OP(READ_RESP);
1578 break;
1579
1580 default:
1581 /*
1582 * This case shouldn't happen since its only
1583 * one PSN per req.
1584 */
1585 qp->s_state = OP(SEND_LAST);
1586 }
1587done:
1588 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1589 qp->s_psn = psn;
1590 /*
1591 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1592 * asynchronously before the send engine can get scheduled.
1593 * Doing it in hfi1_make_rc_req() is too late.
1594 */
1595 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1596 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1597 qp->s_flags |= RVT_S_WAIT_PSN;
1598 qp->s_flags &= ~HFI1_S_AHG_VALID;
1599 trace_hfi1_sender_reset_psn(qp);
1600}
1601
1602/*
1603 * Back up requester to resend the last un-ACKed request.
1604 * The QP r_lock and s_lock should be held and interrupts disabled.
1605 */
1606void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1607{
1608 struct hfi1_qp_priv *priv = qp->priv;
1609 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1610 struct hfi1_ibport *ibp;
1611
1612 lockdep_assert_held(&qp->r_lock);
1613 lockdep_assert_held(&qp->s_lock);
1614 trace_hfi1_sender_restart_rc(qp);
1615 if (qp->s_retry == 0) {
1616 if (qp->s_mig_state == IB_MIG_ARMED) {
1617 hfi1_migrate_qp(qp);
1618 qp->s_retry = qp->s_retry_cnt;
1619 } else if (qp->s_last == qp->s_acked) {
1620 /*
1621 * We need special handling for the OPFN request WQEs as
1622 * they are not allowed to generate real user errors
1623 */
1624 if (wqe->wr.opcode == IB_WR_OPFN) {
1625 struct hfi1_ibport *ibp =
1626 to_iport(qp->ibqp.device, qp->port_num);
1627 /*
1628 * Call opfn_conn_reply() with capcode and
1629 * remaining data as 0 to close out the
1630 * current request
1631 */
1632 opfn_conn_reply(qp, priv->opfn.curr);
1633 wqe = do_rc_completion(qp, wqe, ibp);
1634 qp->s_flags &= ~RVT_S_WAIT_ACK;
1635 } else {
1636 trace_hfi1_tid_write_sender_restart_rc(qp, 0);
1637 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
1638 struct tid_rdma_request *req;
1639
1640 req = wqe_to_tid_req(wqe);
1641 hfi1_kern_exp_rcv_clear_all(req);
1642 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1643 }
1644
1645 hfi1_trdma_send_complete(qp, wqe,
1646 IB_WC_RETRY_EXC_ERR);
1647 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1648 }
1649 return;
1650 } else { /* need to handle delayed completion */
1651 return;
1652 }
1653 } else {
1654 qp->s_retry--;
1655 }
1656
1657 ibp = to_iport(qp->ibqp.device, qp->port_num);
1658 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1659 wqe->wr.opcode == IB_WR_TID_RDMA_READ)
1660 ibp->rvp.n_rc_resends++;
1661 else
1662 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1663
1664 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1665 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1666 RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
1667 if (wait)
1668 qp->s_flags |= RVT_S_SEND_ONE;
1669 reset_psn(qp, psn);
1670}
1671
1672/*
1673 * Set qp->s_sending_psn to the next PSN after the given one.
1674 * This would be psn+1 except when RDMA reads or TID RDMA ops
1675 * are present.
1676 */
1677static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1678{
1679 struct rvt_swqe *wqe;
1680 u32 n = qp->s_last;
1681
1682 lockdep_assert_held(&qp->s_lock);
1683 /* Find the work request corresponding to the given PSN. */
1684 for (;;) {
1685 wqe = rvt_get_swqe_ptr(qp, n);
1686 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1687 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1688 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
1689 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
1690 qp->s_sending_psn = wqe->lpsn + 1;
1691 else
1692 qp->s_sending_psn = psn + 1;
1693 break;
1694 }
1695 if (++n == qp->s_size)
1696 n = 0;
1697 if (n == qp->s_tail)
1698 break;
1699 }
1700}
1701
1702/**
1703 * hfi1_rc_verbs_aborted - handle abort status
1704 * @qp: the QP
1705 * @opah: the opa header
1706 *
1707 * This code modifies both ACK bit in BTH[2]
1708 * and the s_flags to go into send one mode.
1709 *
1710 * This serves to throttle the send engine to only
1711 * send a single packet in the likely case the
1712 * a link has gone down.
1713 */
1714void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1715{
1716 struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
1717 u8 opcode = ib_bth_get_opcode(ohdr);
1718 u32 psn;
1719
1720 /* ignore responses */
1721 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1722 opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1723 opcode == TID_OP(READ_RESP) ||
1724 opcode == TID_OP(WRITE_RESP))
1725 return;
1726
1727 psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
1728 ohdr->bth[2] = cpu_to_be32(psn);
1729 qp->s_flags |= RVT_S_SEND_ONE;
1730}
1731
1732/*
1733 * This should be called with the QP s_lock held and interrupts disabled.
1734 */
1735void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1736{
1737 struct ib_other_headers *ohdr;
1738 struct hfi1_qp_priv *priv = qp->priv;
1739 struct rvt_swqe *wqe;
1740 u32 opcode, head, tail;
1741 u32 psn;
1742 struct tid_rdma_request *req;
1743
1744 lockdep_assert_held(&qp->s_lock);
1745 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
1746 return;
1747
1748 ohdr = hfi1_get_rc_ohdr(opah);
1749 opcode = ib_bth_get_opcode(ohdr);
1750 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1751 opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1752 opcode == TID_OP(READ_RESP) ||
1753 opcode == TID_OP(WRITE_RESP)) {
1754 WARN_ON(!qp->s_rdma_ack_cnt);
1755 qp->s_rdma_ack_cnt--;
1756 return;
1757 }
1758
1759 psn = ib_bth_get_psn(ohdr);
1760 /*
1761 * Don't attempt to reset the sending PSN for packets in the
1762 * KDETH PSN space since the PSN does not match anything.
1763 */
1764 if (opcode != TID_OP(WRITE_DATA) &&
1765 opcode != TID_OP(WRITE_DATA_LAST) &&
1766 opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
1767 reset_sending_psn(qp, psn);
1768
1769 /* Handle TID RDMA WRITE packets differently */
1770 if (opcode >= TID_OP(WRITE_REQ) &&
1771 opcode <= TID_OP(WRITE_DATA_LAST)) {
1772 head = priv->s_tid_head;
1773 tail = priv->s_tid_cur;
1774 /*
1775 * s_tid_cur is set to s_tid_head in the case, where
1776 * a new TID RDMA request is being started and all
1777 * previous ones have been completed.
1778 * Therefore, we need to do a secondary check in order
1779 * to properly determine whether we should start the
1780 * RC timer.
1781 */
1782 wqe = rvt_get_swqe_ptr(qp, tail);
1783 req = wqe_to_tid_req(wqe);
1784 if (head == tail && req->comp_seg < req->total_segs) {
1785 if (tail == 0)
1786 tail = qp->s_size - 1;
1787 else
1788 tail -= 1;
1789 }
1790 } else {
1791 head = qp->s_tail;
1792 tail = qp->s_acked;
1793 }
1794
1795 /*
1796 * Start timer after a packet requesting an ACK has been sent and
1797 * there are still requests that haven't been acked.
1798 */
1799 if ((psn & IB_BTH_REQ_ACK) && tail != head &&
1800 opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
1801 opcode != TID_OP(RESYNC) &&
1802 !(qp->s_flags &
1803 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1804 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1805 if (opcode == TID_OP(READ_REQ))
1806 rvt_add_retry_timer_ext(qp, priv->timeout_shift);
1807 else
1808 rvt_add_retry_timer(qp);
1809 }
1810
1811 /* Start TID RDMA ACK timer */
1812 if ((opcode == TID_OP(WRITE_DATA) ||
1813 opcode == TID_OP(WRITE_DATA_LAST) ||
1814 opcode == TID_OP(RESYNC)) &&
1815 (psn & IB_BTH_REQ_ACK) &&
1816 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
1817 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1818 /*
1819 * The TID RDMA ACK packet could be received before this
1820 * function is called. Therefore, add the timer only if TID
1821 * RDMA ACK packets are actually pending.
1822 */
1823 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1824 req = wqe_to_tid_req(wqe);
1825 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
1826 req->ack_seg < req->cur_seg)
1827 hfi1_add_tid_retry_timer(qp);
1828 }
1829
1830 while (qp->s_last != qp->s_acked) {
1831 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1832 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1833 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1834 break;
1835 trdma_clean_swqe(qp, wqe);
1836 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1837 rvt_qp_complete_swqe(qp,
1838 wqe,
1839 ib_hfi1_wc_opcode[wqe->wr.opcode],
1840 IB_WC_SUCCESS);
1841 }
1842 /*
1843 * If we were waiting for sends to complete before re-sending,
1844 * and they are now complete, restart sending.
1845 */
1846 trace_hfi1_sendcomplete(qp, psn);
1847 if (qp->s_flags & RVT_S_WAIT_PSN &&
1848 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1849 qp->s_flags &= ~RVT_S_WAIT_PSN;
1850 qp->s_sending_psn = qp->s_psn;
1851 qp->s_sending_hpsn = qp->s_psn - 1;
1852 hfi1_schedule_send(qp);
1853 }
1854}
1855
1856static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1857{
1858 qp->s_last_psn = psn;
1859}
1860
1861/*
1862 * Generate a SWQE completion.
1863 * This is similar to hfi1_send_complete but has to check to be sure
1864 * that the SGEs are not being referenced if the SWQE is being resent.
1865 */
1866struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1867 struct rvt_swqe *wqe,
1868 struct hfi1_ibport *ibp)
1869{
1870 struct hfi1_qp_priv *priv = qp->priv;
1871
1872 lockdep_assert_held(&qp->s_lock);
1873 /*
1874 * Don't decrement refcount and don't generate a
1875 * completion if the SWQE is being resent until the send
1876 * is finished.
1877 */
1878 trace_hfi1_rc_completion(qp, wqe->lpsn);
1879 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1880 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1881 trdma_clean_swqe(qp, wqe);
1882 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1883 rvt_qp_complete_swqe(qp,
1884 wqe,
1885 ib_hfi1_wc_opcode[wqe->wr.opcode],
1886 IB_WC_SUCCESS);
1887 } else {
1888 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1889
1890 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1891 /*
1892 * If send progress not running attempt to progress
1893 * SDMA queue.
1894 */
1895 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1896 struct sdma_engine *engine;
1897 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1898 u8 sc5;
1899
1900 /* For now use sc to find engine */
1901 sc5 = ibp->sl_to_sc[sl];
1902 engine = qp_to_sdma_engine(qp, sc5);
1903 sdma_engine_progress_schedule(engine);
1904 }
1905 }
1906
1907 qp->s_retry = qp->s_retry_cnt;
1908 /*
1909 * Don't update the last PSN if the request being completed is
1910 * a TID RDMA WRITE request.
1911 * Completion of the TID RDMA WRITE requests are done by the
1912 * TID RDMA ACKs and as such could be for a request that has
1913 * already been ACKed as far as the IB state machine is
1914 * concerned.
1915 */
1916 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1917 update_last_psn(qp, wqe->lpsn);
1918
1919 /*
1920 * If we are completing a request which is in the process of
1921 * being resent, we can stop re-sending it since we know the
1922 * responder has already seen it.
1923 */
1924 if (qp->s_acked == qp->s_cur) {
1925 if (++qp->s_cur >= qp->s_size)
1926 qp->s_cur = 0;
1927 qp->s_acked = qp->s_cur;
1928 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1929 if (qp->s_acked != qp->s_tail) {
1930 qp->s_state = OP(SEND_LAST);
1931 qp->s_psn = wqe->psn;
1932 }
1933 } else {
1934 if (++qp->s_acked >= qp->s_size)
1935 qp->s_acked = 0;
1936 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1937 qp->s_draining = 0;
1938 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1939 }
1940 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
1941 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1942 hfi1_schedule_send(qp);
1943 }
1944 return wqe;
1945}
1946
1947static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
1948{
1949 /* Retry this request. */
1950 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1951 qp->r_flags |= RVT_R_RDMAR_SEQ;
1952 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1953 if (list_empty(&qp->rspwait)) {
1954 qp->r_flags |= RVT_R_RSP_SEND;
1955 rvt_get_qp(qp);
1956 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1957 }
1958 }
1959}
1960
1961/**
1962 * update_qp_retry_state - Update qp retry state.
1963 * @qp: the QP
1964 * @psn: the packet sequence number of the TID RDMA WRITE RESP.
1965 * @spsn: The start psn for the given TID RDMA WRITE swqe.
1966 * @lpsn: The last psn for the given TID RDMA WRITE swqe.
1967 *
1968 * This function is called to update the qp retry state upon
1969 * receiving a TID WRITE RESP after the qp is scheduled to retry
1970 * a request.
1971 */
1972static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
1973 u32 lpsn)
1974{
1975 struct hfi1_qp_priv *qpriv = qp->priv;
1976
1977 qp->s_psn = psn + 1;
1978 /*
1979 * If this is the first TID RDMA WRITE RESP packet for the current
1980 * request, change the s_state so that the retry will be processed
1981 * correctly. Similarly, if this is the last TID RDMA WRITE RESP
1982 * packet, change the s_state and advance the s_cur.
1983 */
1984 if (cmp_psn(psn, lpsn) >= 0) {
1985 qp->s_cur = qpriv->s_tid_cur + 1;
1986 if (qp->s_cur >= qp->s_size)
1987 qp->s_cur = 0;
1988 qp->s_state = TID_OP(WRITE_REQ);
1989 } else if (!cmp_psn(psn, spsn)) {
1990 qp->s_cur = qpriv->s_tid_cur;
1991 qp->s_state = TID_OP(WRITE_RESP);
1992 }
1993}
1994
1995/**
1996 * do_rc_ack - process an incoming RC ACK
1997 * @qp: the QP the ACK came in on
1998 * @psn: the packet sequence number of the ACK
1999 * @opcode: the opcode of the request that resulted in the ACK
2000 *
2001 * This is called from rc_rcv_resp() to process an incoming RC ACK
2002 * for the given QP.
2003 * May be called at interrupt level, with the QP s_lock held.
2004 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
2005 */
2006int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
2007 u64 val, struct hfi1_ctxtdata *rcd)
2008{
2009 struct hfi1_ibport *ibp;
2010 enum ib_wc_status status;
2011 struct hfi1_qp_priv *qpriv = qp->priv;
2012 struct rvt_swqe *wqe;
2013 int ret = 0;
2014 u32 ack_psn;
2015 int diff;
2016 struct rvt_dev_info *rdi;
2017
2018 lockdep_assert_held(&qp->s_lock);
2019 /*
2020 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2021 * requests and implicitly NAK RDMA read and atomic requests issued
2022 * before the NAK'ed request. The MSN won't include the NAK'ed
2023 * request but will include an ACK'ed request(s).
2024 */
2025 ack_psn = psn;
2026 if (aeth >> IB_AETH_NAK_SHIFT)
2027 ack_psn--;
2028 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2029 ibp = rcd_to_iport(rcd);
2030
2031 /*
2032 * The MSN might be for a later WQE than the PSN indicates so
2033 * only complete WQEs that the PSN finishes.
2034 */
2035 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
2036 /*
2037 * RDMA_READ_RESPONSE_ONLY is a special case since
2038 * we want to generate completion events for everything
2039 * before the RDMA read, copy the data, then generate
2040 * the completion for the read.
2041 */
2042 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
2043 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
2044 diff == 0) {
2045 ret = 1;
2046 goto bail_stop;
2047 }
2048 /*
2049 * If this request is a RDMA read or atomic, and the ACK is
2050 * for a later operation, this ACK NAKs the RDMA read or
2051 * atomic. In other words, only a RDMA_READ_LAST or ONLY
2052 * can ACK a RDMA read and likewise for atomic ops. Note
2053 * that the NAK case can only happen if relaxed ordering is
2054 * used and requests are sent after an RDMA read or atomic
2055 * is sent but before the response is received.
2056 */
2057 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
2058 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
2059 (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
2060 (opcode != TID_OP(READ_RESP) || diff != 0)) ||
2061 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2062 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
2063 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
2064 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2065 (delta_psn(psn, qp->s_last_psn) != 1))) {
2066 set_restart_qp(qp, rcd);
2067 /*
2068 * No need to process the ACK/NAK since we are
2069 * restarting an earlier request.
2070 */
2071 goto bail_stop;
2072 }
2073 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2074 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2075 u64 *vaddr = wqe->sg_list[0].vaddr;
2076 *vaddr = val;
2077 }
2078 if (wqe->wr.opcode == IB_WR_OPFN)
2079 opfn_conn_reply(qp, val);
2080
2081 if (qp->s_num_rd_atomic &&
2082 (wqe->wr.opcode == IB_WR_RDMA_READ ||
2083 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2084 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
2085 qp->s_num_rd_atomic--;
2086 /* Restart sending task if fence is complete */
2087 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2088 !qp->s_num_rd_atomic) {
2089 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2090 RVT_S_WAIT_ACK);
2091 hfi1_schedule_send(qp);
2092 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2093 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
2094 RVT_S_WAIT_ACK);
2095 hfi1_schedule_send(qp);
2096 }
2097 }
2098
2099 /*
2100 * TID RDMA WRITE requests will be completed by the TID RDMA
2101 * ACK packet handler (see tid_rdma.c).
2102 */
2103 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
2104 break;
2105
2106 wqe = do_rc_completion(qp, wqe, ibp);
2107 if (qp->s_acked == qp->s_tail)
2108 break;
2109 }
2110
2111 trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
2112 trace_hfi1_sender_do_rc_ack(qp);
2113 switch (aeth >> IB_AETH_NAK_SHIFT) {
2114 case 0: /* ACK */
2115 this_cpu_inc(*ibp->rvp.rc_acks);
2116 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2117 if (wqe_to_tid_req(wqe)->ack_pending)
2118 rvt_mod_retry_timer_ext(qp,
2119 qpriv->timeout_shift);
2120 else
2121 rvt_stop_rc_timers(qp);
2122 } else if (qp->s_acked != qp->s_tail) {
2123 struct rvt_swqe *__w = NULL;
2124
2125 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
2126 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
2127
2128 /*
2129 * Stop timers if we've received all of the TID RDMA
2130 * WRITE * responses.
2131 */
2132 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2133 opcode == TID_OP(WRITE_RESP)) {
2134 /*
2135 * Normally, the loop above would correctly
2136 * process all WQEs from s_acked onward and
2137 * either complete them or check for correct
2138 * PSN sequencing.
2139 * However, for TID RDMA, due to pipelining,
2140 * the response may not be for the request at
2141 * s_acked so the above look would just be
2142 * skipped. This does not allow for checking
2143 * the PSN sequencing. It has to be done
2144 * separately.
2145 */
2146 if (cmp_psn(psn, qp->s_last_psn + 1)) {
2147 set_restart_qp(qp, rcd);
2148 goto bail_stop;
2149 }
2150 /*
2151 * If the psn is being resent, stop the
2152 * resending.
2153 */
2154 if (qp->s_cur != qp->s_tail &&
2155 cmp_psn(qp->s_psn, psn) <= 0)
2156 update_qp_retry_state(qp, psn,
2157 __w->psn,
2158 __w->lpsn);
2159 else if (--qpriv->pending_tid_w_resp)
2160 rvt_mod_retry_timer(qp);
2161 else
2162 rvt_stop_rc_timers(qp);
2163 } else {
2164 /*
2165 * We are expecting more ACKs so
2166 * mod the retry timer.
2167 */
2168 rvt_mod_retry_timer(qp);
2169 /*
2170 * We can stop re-sending the earlier packets
2171 * and continue with the next packet the
2172 * receiver wants.
2173 */
2174 if (cmp_psn(qp->s_psn, psn) <= 0)
2175 reset_psn(qp, psn + 1);
2176 }
2177 } else {
2178 /* No more acks - kill all timers */
2179 rvt_stop_rc_timers(qp);
2180 if (cmp_psn(qp->s_psn, psn) <= 0) {
2181 qp->s_state = OP(SEND_LAST);
2182 qp->s_psn = psn + 1;
2183 }
2184 }
2185 if (qp->s_flags & RVT_S_WAIT_ACK) {
2186 qp->s_flags &= ~RVT_S_WAIT_ACK;
2187 hfi1_schedule_send(qp);
2188 }
2189 rvt_get_credit(qp, aeth);
2190 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2191 qp->s_retry = qp->s_retry_cnt;
2192 /*
2193 * If the current request is a TID RDMA WRITE request and the
2194 * response is not a TID RDMA WRITE RESP packet, s_last_psn
2195 * can't be advanced.
2196 */
2197 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2198 opcode != TID_OP(WRITE_RESP) &&
2199 cmp_psn(psn, wqe->psn) >= 0)
2200 return 1;
2201 update_last_psn(qp, psn);
2202 return 1;
2203
2204 case 1: /* RNR NAK */
2205 ibp->rvp.n_rnr_naks++;
2206 if (qp->s_acked == qp->s_tail)
2207 goto bail_stop;
2208 if (qp->s_flags & RVT_S_WAIT_RNR)
2209 goto bail_stop;
2210 rdi = ib_to_rvt(qp->ibqp.device);
2211 if (!(rdi->post_parms[wqe->wr.opcode].flags &
2212 RVT_OPERATION_IGN_RNR_CNT)) {
2213 if (qp->s_rnr_retry == 0) {
2214 status = IB_WC_RNR_RETRY_EXC_ERR;
2215 goto class_b;
2216 }
2217 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
2218 qp->s_rnr_retry--;
2219 }
2220
2221 /*
2222 * The last valid PSN is the previous PSN. For TID RDMA WRITE
2223 * request, s_last_psn should be incremented only when a TID
2224 * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
2225 * WRITE RESP packets.
2226 */
2227 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
2228 reset_psn(qp, qp->s_last_psn + 1);
2229 } else {
2230 update_last_psn(qp, psn - 1);
2231 reset_psn(qp, psn);
2232 }
2233
2234 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
2235 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
2236 rvt_stop_rc_timers(qp);
2237 rvt_add_rnr_timer(qp, aeth);
2238 return 0;
2239
2240 case 3: /* NAK */
2241 if (qp->s_acked == qp->s_tail)
2242 goto bail_stop;
2243 /* The last valid PSN is the previous PSN. */
2244 update_last_psn(qp, psn - 1);
2245 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
2246 IB_AETH_CREDIT_MASK) {
2247 case 0: /* PSN sequence error */
2248 ibp->rvp.n_seq_naks++;
2249 /*
2250 * Back up to the responder's expected PSN.
2251 * Note that we might get a NAK in the middle of an
2252 * RDMA READ response which terminates the RDMA
2253 * READ.
2254 */
2255 hfi1_restart_rc(qp, psn, 0);
2256 hfi1_schedule_send(qp);
2257 break;
2258
2259 case 1: /* Invalid Request */
2260 status = IB_WC_REM_INV_REQ_ERR;
2261 ibp->rvp.n_other_naks++;
2262 goto class_b;
2263
2264 case 2: /* Remote Access Error */
2265 status = IB_WC_REM_ACCESS_ERR;
2266 ibp->rvp.n_other_naks++;
2267 goto class_b;
2268
2269 case 3: /* Remote Operation Error */
2270 status = IB_WC_REM_OP_ERR;
2271 ibp->rvp.n_other_naks++;
2272class_b:
2273 if (qp->s_last == qp->s_acked) {
2274 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
2275 hfi1_kern_read_tid_flow_free(qp);
2276
2277 hfi1_trdma_send_complete(qp, wqe, status);
2278 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2279 }
2280 break;
2281
2282 default:
2283 /* Ignore other reserved NAK error codes */
2284 goto reserved;
2285 }
2286 qp->s_retry = qp->s_retry_cnt;
2287 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2288 goto bail_stop;
2289
2290 default: /* 2: reserved */
2291reserved:
2292 /* Ignore reserved NAK codes. */
2293 goto bail_stop;
2294 }
2295 /* cannot be reached */
2296bail_stop:
2297 rvt_stop_rc_timers(qp);
2298 return ret;
2299}
2300
2301/*
2302 * We have seen an out of sequence RDMA read middle or last packet.
2303 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
2304 */
2305static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
2306 struct hfi1_ctxtdata *rcd)
2307{
2308 struct rvt_swqe *wqe;
2309
2310 lockdep_assert_held(&qp->s_lock);
2311 /* Remove QP from retry timer */
2312 rvt_stop_rc_timers(qp);
2313
2314 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2315
2316 while (cmp_psn(psn, wqe->lpsn) > 0) {
2317 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2318 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2319 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
2320 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2321 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
2322 break;
2323 wqe = do_rc_completion(qp, wqe, ibp);
2324 }
2325
2326 ibp->rvp.n_rdma_seq++;
2327 qp->r_flags |= RVT_R_RDMAR_SEQ;
2328 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
2329 if (list_empty(&qp->rspwait)) {
2330 qp->r_flags |= RVT_R_RSP_SEND;
2331 rvt_get_qp(qp);
2332 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2333 }
2334}
2335
2336/**
2337 * rc_rcv_resp - process an incoming RC response packet
2338 * @packet: data packet information
2339 *
2340 * This is called from hfi1_rc_rcv() to process an incoming RC response
2341 * packet for the given QP.
2342 * Called at interrupt level.
2343 */
2344static void rc_rcv_resp(struct hfi1_packet *packet)
2345{
2346 struct hfi1_ctxtdata *rcd = packet->rcd;
2347 void *data = packet->payload;
2348 u32 tlen = packet->tlen;
2349 struct rvt_qp *qp = packet->qp;
2350 struct hfi1_ibport *ibp;
2351 struct ib_other_headers *ohdr = packet->ohdr;
2352 struct rvt_swqe *wqe;
2353 enum ib_wc_status status;
2354 unsigned long flags;
2355 int diff;
2356 u64 val;
2357 u32 aeth;
2358 u32 psn = ib_bth_get_psn(packet->ohdr);
2359 u32 pmtu = qp->pmtu;
2360 u16 hdrsize = packet->hlen;
2361 u8 opcode = packet->opcode;
2362 u8 pad = packet->pad;
2363 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2364
2365 spin_lock_irqsave(&qp->s_lock, flags);
2366 trace_hfi1_ack(qp, psn);
2367
2368 /* Ignore invalid responses. */
2369 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
2370 goto ack_done;
2371
2372 /* Ignore duplicate responses. */
2373 diff = cmp_psn(psn, qp->s_last_psn);
2374 if (unlikely(diff <= 0)) {
2375 /* Update credits for "ghost" ACKs */
2376 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
2377 aeth = be32_to_cpu(ohdr->u.aeth);
2378 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
2379 rvt_get_credit(qp, aeth);
2380 }
2381 goto ack_done;
2382 }
2383
2384 /*
2385 * Skip everything other than the PSN we expect, if we are waiting
2386 * for a reply to a restarted RDMA read or atomic op.
2387 */
2388 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
2389 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
2390 goto ack_done;
2391 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
2392 }
2393
2394 if (unlikely(qp->s_acked == qp->s_tail))
2395 goto ack_done;
2396 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2397 status = IB_WC_SUCCESS;
2398
2399 switch (opcode) {
2400 case OP(ACKNOWLEDGE):
2401 case OP(ATOMIC_ACKNOWLEDGE):
2402 case OP(RDMA_READ_RESPONSE_FIRST):
2403 aeth = be32_to_cpu(ohdr->u.aeth);
2404 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
2405 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
2406 else
2407 val = 0;
2408 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
2409 opcode != OP(RDMA_READ_RESPONSE_FIRST))
2410 goto ack_done;
2411 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2412 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2413 goto ack_op_err;
2414 /*
2415 * If this is a response to a resent RDMA read, we
2416 * have to be careful to copy the data to the right
2417 * location.
2418 */
2419 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2420 wqe, psn, pmtu);
2421 goto read_middle;
2422
2423 case OP(RDMA_READ_RESPONSE_MIDDLE):
2424 /* no AETH, no ACK */
2425 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2426 goto ack_seq_err;
2427 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2428 goto ack_op_err;
2429read_middle:
2430 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2431 goto ack_len_err;
2432 if (unlikely(pmtu >= qp->s_rdma_read_len))
2433 goto ack_len_err;
2434
2435 /*
2436 * We got a response so update the timeout.
2437 * 4.096 usec. * (1 << qp->timeout)
2438 */
2439 rvt_mod_retry_timer(qp);
2440 if (qp->s_flags & RVT_S_WAIT_ACK) {
2441 qp->s_flags &= ~RVT_S_WAIT_ACK;
2442 hfi1_schedule_send(qp);
2443 }
2444
2445 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
2446 qp->s_retry = qp->s_retry_cnt;
2447
2448 /*
2449 * Update the RDMA receive state but do the copy w/o
2450 * holding the locks and blocking interrupts.
2451 */
2452 qp->s_rdma_read_len -= pmtu;
2453 update_last_psn(qp, psn);
2454 spin_unlock_irqrestore(&qp->s_lock, flags);
2455 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2456 data, pmtu, false, false);
2457 goto bail;
2458
2459 case OP(RDMA_READ_RESPONSE_ONLY):
2460 aeth = be32_to_cpu(ohdr->u.aeth);
2461 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
2462 goto ack_done;
2463 /*
2464 * Check that the data size is >= 0 && <= pmtu.
2465 * Remember to account for ICRC (4).
2466 */
2467 if (unlikely(tlen < (hdrsize + extra_bytes)))
2468 goto ack_len_err;
2469 /*
2470 * If this is a response to a resent RDMA read, we
2471 * have to be careful to copy the data to the right
2472 * location.
2473 */
2474 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2475 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2476 wqe, psn, pmtu);
2477 goto read_last;
2478
2479 case OP(RDMA_READ_RESPONSE_LAST):
2480 /* ACKs READ req. */
2481 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2482 goto ack_seq_err;
2483 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2484 goto ack_op_err;
2485 /*
2486 * Check that the data size is >= 1 && <= pmtu.
2487 * Remember to account for ICRC (4).
2488 */
2489 if (unlikely(tlen <= (hdrsize + extra_bytes)))
2490 goto ack_len_err;
2491read_last:
2492 tlen -= hdrsize + extra_bytes;
2493 if (unlikely(tlen != qp->s_rdma_read_len))
2494 goto ack_len_err;
2495 aeth = be32_to_cpu(ohdr->u.aeth);
2496 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2497 data, tlen, false, false);
2498 WARN_ON(qp->s_rdma_read_sge.num_sge);
2499 (void)do_rc_ack(qp, aeth, psn,
2500 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
2501 goto ack_done;
2502 }
2503
2504ack_op_err:
2505 status = IB_WC_LOC_QP_OP_ERR;
2506 goto ack_err;
2507
2508ack_seq_err:
2509 ibp = rcd_to_iport(rcd);
2510 rdma_seq_err(qp, ibp, psn, rcd);
2511 goto ack_done;
2512
2513ack_len_err:
2514 status = IB_WC_LOC_LEN_ERR;
2515ack_err:
2516 if (qp->s_last == qp->s_acked) {
2517 rvt_send_complete(qp, wqe, status);
2518 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2519 }
2520ack_done:
2521 spin_unlock_irqrestore(&qp->s_lock, flags);
2522bail:
2523 return;
2524}
2525
2526static inline void rc_cancel_ack(struct rvt_qp *qp)
2527{
2528 qp->r_adefered = 0;
2529 if (list_empty(&qp->rspwait))
2530 return;
2531 list_del_init(&qp->rspwait);
2532 qp->r_flags &= ~RVT_R_RSP_NAK;
2533 rvt_put_qp(qp);
2534}
2535
2536/**
2537 * rc_rcv_error - process an incoming duplicate or error RC packet
2538 * @ohdr: the other headers for this packet
2539 * @data: the packet data
2540 * @qp: the QP for this packet
2541 * @opcode: the opcode for this packet
2542 * @psn: the packet sequence number for this packet
2543 * @diff: the difference between the PSN and the expected PSN
2544 *
2545 * This is called from hfi1_rc_rcv() to process an unexpected
2546 * incoming RC packet for the given QP.
2547 * Called at interrupt level.
2548 * Return 1 if no more processing is needed; otherwise return 0 to
2549 * schedule a response to be sent.
2550 */
2551static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
2552 struct rvt_qp *qp, u32 opcode, u32 psn,
2553 int diff, struct hfi1_ctxtdata *rcd)
2554{
2555 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2556 struct rvt_ack_entry *e;
2557 unsigned long flags;
2558 u8 prev;
2559 u8 mra; /* most recent ACK */
2560 bool old_req;
2561
2562 trace_hfi1_rcv_error(qp, psn);
2563 if (diff > 0) {
2564 /*
2565 * Packet sequence error.
2566 * A NAK will ACK earlier sends and RDMA writes.
2567 * Don't queue the NAK if we already sent one.
2568 */
2569 if (!qp->r_nak_state) {
2570 ibp->rvp.n_rc_seqnak++;
2571 qp->r_nak_state = IB_NAK_PSN_ERROR;
2572 /* Use the expected PSN. */
2573 qp->r_ack_psn = qp->r_psn;
2574 /*
2575 * Wait to send the sequence NAK until all packets
2576 * in the receive queue have been processed.
2577 * Otherwise, we end up propagating congestion.
2578 */
2579 rc_defered_ack(rcd, qp);
2580 }
2581 goto done;
2582 }
2583
2584 /*
2585 * Handle a duplicate request. Don't re-execute SEND, RDMA
2586 * write or atomic op. Don't NAK errors, just silently drop
2587 * the duplicate request. Note that r_sge, r_len, and
2588 * r_rcv_len may be in use so don't modify them.
2589 *
2590 * We are supposed to ACK the earliest duplicate PSN but we
2591 * can coalesce an outstanding duplicate ACK. We have to
2592 * send the earliest so that RDMA reads can be restarted at
2593 * the requester's expected PSN.
2594 *
2595 * First, find where this duplicate PSN falls within the
2596 * ACKs previously sent.
2597 * old_req is true if there is an older response that is scheduled
2598 * to be sent before sending this one.
2599 */
2600 e = NULL;
2601 old_req = true;
2602 ibp->rvp.n_rc_dupreq++;
2603
2604 spin_lock_irqsave(&qp->s_lock, flags);
2605
2606 e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
2607
2608 switch (opcode) {
2609 case OP(RDMA_READ_REQUEST): {
2610 struct ib_reth *reth;
2611 u32 offset;
2612 u32 len;
2613
2614 /*
2615 * If we didn't find the RDMA read request in the ack queue,
2616 * we can ignore this request.
2617 */
2618 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
2619 goto unlock_done;
2620 /* RETH comes after BTH */
2621 reth = &ohdr->u.rc.reth;
2622 /*
2623 * Address range must be a subset of the original
2624 * request and start on pmtu boundaries.
2625 * We reuse the old ack_queue slot since the requester
2626 * should not back up and request an earlier PSN for the
2627 * same request.
2628 */
2629 offset = delta_psn(psn, e->psn) * qp->pmtu;
2630 len = be32_to_cpu(reth->length);
2631 if (unlikely(offset + len != e->rdma_sge.sge_length))
2632 goto unlock_done;
2633 release_rdma_sge_mr(e);
2634 if (len != 0) {
2635 u32 rkey = be32_to_cpu(reth->rkey);
2636 u64 vaddr = get_ib_reth_vaddr(reth);
2637 int ok;
2638
2639 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2640 IB_ACCESS_REMOTE_READ);
2641 if (unlikely(!ok))
2642 goto unlock_done;
2643 } else {
2644 e->rdma_sge.vaddr = NULL;
2645 e->rdma_sge.length = 0;
2646 e->rdma_sge.sge_length = 0;
2647 }
2648 e->psn = psn;
2649 if (old_req)
2650 goto unlock_done;
2651 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2652 qp->s_acked_ack_queue = prev;
2653 qp->s_tail_ack_queue = prev;
2654 break;
2655 }
2656
2657 case OP(COMPARE_SWAP):
2658 case OP(FETCH_ADD): {
2659 /*
2660 * If we didn't find the atomic request in the ack queue
2661 * or the send engine is already backed up to send an
2662 * earlier entry, we can ignore this request.
2663 */
2664 if (!e || e->opcode != (u8)opcode || old_req)
2665 goto unlock_done;
2666 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2667 qp->s_acked_ack_queue = prev;
2668 qp->s_tail_ack_queue = prev;
2669 break;
2670 }
2671
2672 default:
2673 /*
2674 * Ignore this operation if it doesn't request an ACK
2675 * or an earlier RDMA read or atomic is going to be resent.
2676 */
2677 if (!(psn & IB_BTH_REQ_ACK) || old_req)
2678 goto unlock_done;
2679 /*
2680 * Resend the most recent ACK if this request is
2681 * after all the previous RDMA reads and atomics.
2682 */
2683 if (mra == qp->r_head_ack_queue) {
2684 spin_unlock_irqrestore(&qp->s_lock, flags);
2685 qp->r_nak_state = 0;
2686 qp->r_ack_psn = qp->r_psn - 1;
2687 goto send_ack;
2688 }
2689
2690 /*
2691 * Resend the RDMA read or atomic op which
2692 * ACKs this duplicate request.
2693 */
2694 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2695 qp->s_acked_ack_queue = mra;
2696 qp->s_tail_ack_queue = mra;
2697 break;
2698 }
2699 qp->s_ack_state = OP(ACKNOWLEDGE);
2700 qp->s_flags |= RVT_S_RESP_PENDING;
2701 qp->r_nak_state = 0;
2702 hfi1_schedule_send(qp);
2703
2704unlock_done:
2705 spin_unlock_irqrestore(&qp->s_lock, flags);
2706done:
2707 return 1;
2708
2709send_ack:
2710 return 0;
2711}
2712
2713static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
2714 u32 lqpn, u32 rqpn, u8 svc_type)
2715{
2716 struct opa_hfi1_cong_log_event_internal *cc_event;
2717 unsigned long flags;
2718
2719 if (sl >= OPA_MAX_SLS)
2720 return;
2721
2722 spin_lock_irqsave(&ppd->cc_log_lock, flags);
2723
2724 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
2725 ppd->threshold_event_counter++;
2726
2727 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
2728 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
2729 ppd->cc_log_idx = 0;
2730 cc_event->lqpn = lqpn & RVT_QPN_MASK;
2731 cc_event->rqpn = rqpn & RVT_QPN_MASK;
2732 cc_event->sl = sl;
2733 cc_event->svc_type = svc_type;
2734 cc_event->rlid = rlid;
2735 /* keep timestamp in units of 1.024 usec */
2736 cc_event->timestamp = ktime_get_ns() / 1024;
2737
2738 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
2739}
2740
2741void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
2742 u32 rqpn, u8 svc_type)
2743{
2744 struct cca_timer *cca_timer;
2745 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
2746 u8 trigger_threshold;
2747 struct cc_state *cc_state;
2748 unsigned long flags;
2749
2750 if (sl >= OPA_MAX_SLS)
2751 return;
2752
2753 cc_state = get_cc_state(ppd);
2754
2755 if (!cc_state)
2756 return;
2757
2758 /*
2759 * 1) increase CCTI (for this SL)
2760 * 2) select IPG (i.e., call set_link_ipg())
2761 * 3) start timer
2762 */
2763 ccti_limit = cc_state->cct.ccti_limit;
2764 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
2765 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
2766 trigger_threshold =
2767 cc_state->cong_setting.entries[sl].trigger_threshold;
2768
2769 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2770
2771 cca_timer = &ppd->cca_timer[sl];
2772 if (cca_timer->ccti < ccti_limit) {
2773 if (cca_timer->ccti + ccti_incr <= ccti_limit)
2774 cca_timer->ccti += ccti_incr;
2775 else
2776 cca_timer->ccti = ccti_limit;
2777 set_link_ipg(ppd);
2778 }
2779
2780 ccti = cca_timer->ccti;
2781
2782 if (!hrtimer_active(&cca_timer->hrtimer)) {
2783 /* ccti_timer is in units of 1.024 usec */
2784 unsigned long nsec = 1024 * ccti_timer;
2785
2786 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2787 HRTIMER_MODE_REL_PINNED);
2788 }
2789
2790 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2791
2792 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2793 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2794}
2795
2796/**
2797 * hfi1_rc_rcv - process an incoming RC packet
2798 * @packet: data packet information
2799 *
2800 * This is called from qp_rcv() to process an incoming RC packet
2801 * for the given QP.
2802 * May be called at interrupt level.
2803 */
2804void hfi1_rc_rcv(struct hfi1_packet *packet)
2805{
2806 struct hfi1_ctxtdata *rcd = packet->rcd;
2807 void *data = packet->payload;
2808 u32 tlen = packet->tlen;
2809 struct rvt_qp *qp = packet->qp;
2810 struct hfi1_qp_priv *qpriv = qp->priv;
2811 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2812 struct ib_other_headers *ohdr = packet->ohdr;
2813 u32 opcode = packet->opcode;
2814 u32 hdrsize = packet->hlen;
2815 u32 psn = ib_bth_get_psn(packet->ohdr);
2816 u32 pad = packet->pad;
2817 struct ib_wc wc;
2818 u32 pmtu = qp->pmtu;
2819 int diff;
2820 struct ib_reth *reth;
2821 unsigned long flags;
2822 int ret;
2823 bool copy_last = false, fecn;
2824 u32 rkey;
2825 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2826
2827 lockdep_assert_held(&qp->r_lock);
2828
2829 if (hfi1_ruc_check_hdr(ibp, packet))
2830 return;
2831
2832 fecn = process_ecn(qp, packet);
2833 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
2834
2835 /*
2836 * Process responses (ACKs) before anything else. Note that the
2837 * packet sequence number will be for something in the send work
2838 * queue rather than the expected receive packet sequence number.
2839 * In other words, this QP is the requester.
2840 */
2841 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2842 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2843 rc_rcv_resp(packet);
2844 return;
2845 }
2846
2847 /* Compute 24 bits worth of difference. */
2848 diff = delta_psn(psn, qp->r_psn);
2849 if (unlikely(diff)) {
2850 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2851 return;
2852 goto send_ack;
2853 }
2854
2855 /* Check for opcode sequence errors. */
2856 switch (qp->r_state) {
2857 case OP(SEND_FIRST):
2858 case OP(SEND_MIDDLE):
2859 if (opcode == OP(SEND_MIDDLE) ||
2860 opcode == OP(SEND_LAST) ||
2861 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2862 opcode == OP(SEND_LAST_WITH_INVALIDATE))
2863 break;
2864 goto nack_inv;
2865
2866 case OP(RDMA_WRITE_FIRST):
2867 case OP(RDMA_WRITE_MIDDLE):
2868 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2869 opcode == OP(RDMA_WRITE_LAST) ||
2870 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2871 break;
2872 goto nack_inv;
2873
2874 default:
2875 if (opcode == OP(SEND_MIDDLE) ||
2876 opcode == OP(SEND_LAST) ||
2877 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2878 opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
2879 opcode == OP(RDMA_WRITE_MIDDLE) ||
2880 opcode == OP(RDMA_WRITE_LAST) ||
2881 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2882 goto nack_inv;
2883 /*
2884 * Note that it is up to the requester to not send a new
2885 * RDMA read or atomic operation before receiving an ACK
2886 * for the previous operation.
2887 */
2888 break;
2889 }
2890
2891 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2892 rvt_comm_est(qp);
2893
2894 /* OK, process the packet. */
2895 switch (opcode) {
2896 case OP(SEND_FIRST):
2897 ret = rvt_get_rwqe(qp, false);
2898 if (ret < 0)
2899 goto nack_op_err;
2900 if (!ret)
2901 goto rnr_nak;
2902 qp->r_rcv_len = 0;
2903 fallthrough;
2904 case OP(SEND_MIDDLE):
2905 case OP(RDMA_WRITE_MIDDLE):
2906send_middle:
2907 /* Check for invalid length PMTU or posted rwqe len. */
2908 /*
2909 * There will be no padding for 9B packet but 16B packets
2910 * will come in with some padding since we always add
2911 * CRC and LT bytes which will need to be flit aligned
2912 */
2913 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2914 goto nack_inv;
2915 qp->r_rcv_len += pmtu;
2916 if (unlikely(qp->r_rcv_len > qp->r_len))
2917 goto nack_inv;
2918 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
2919 break;
2920
2921 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2922 /* consume RWQE */
2923 ret = rvt_get_rwqe(qp, true);
2924 if (ret < 0)
2925 goto nack_op_err;
2926 if (!ret)
2927 goto rnr_nak;
2928 goto send_last_imm;
2929
2930 case OP(SEND_ONLY):
2931 case OP(SEND_ONLY_WITH_IMMEDIATE):
2932 case OP(SEND_ONLY_WITH_INVALIDATE):
2933 ret = rvt_get_rwqe(qp, false);
2934 if (ret < 0)
2935 goto nack_op_err;
2936 if (!ret)
2937 goto rnr_nak;
2938 qp->r_rcv_len = 0;
2939 if (opcode == OP(SEND_ONLY))
2940 goto no_immediate_data;
2941 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2942 goto send_last_inv;
2943 fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
2944 case OP(SEND_LAST_WITH_IMMEDIATE):
2945send_last_imm:
2946 wc.ex.imm_data = ohdr->u.imm_data;
2947 wc.wc_flags = IB_WC_WITH_IMM;
2948 goto send_last;
2949 case OP(SEND_LAST_WITH_INVALIDATE):
2950send_last_inv:
2951 rkey = be32_to_cpu(ohdr->u.ieth);
2952 if (rvt_invalidate_rkey(qp, rkey))
2953 goto no_immediate_data;
2954 wc.ex.invalidate_rkey = rkey;
2955 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2956 goto send_last;
2957 case OP(RDMA_WRITE_LAST):
2958 copy_last = rvt_is_user_qp(qp);
2959 fallthrough;
2960 case OP(SEND_LAST):
2961no_immediate_data:
2962 wc.wc_flags = 0;
2963 wc.ex.imm_data = 0;
2964send_last:
2965 /* Check for invalid length. */
2966 /* LAST len should be >= 1 */
2967 if (unlikely(tlen < (hdrsize + extra_bytes)))
2968 goto nack_inv;
2969 /* Don't count the CRC(and padding and LT byte for 16B). */
2970 tlen -= (hdrsize + extra_bytes);
2971 wc.byte_len = tlen + qp->r_rcv_len;
2972 if (unlikely(wc.byte_len > qp->r_len))
2973 goto nack_inv;
2974 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
2975 rvt_put_ss(&qp->r_sge);
2976 qp->r_msn++;
2977 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2978 break;
2979 wc.wr_id = qp->r_wr_id;
2980 wc.status = IB_WC_SUCCESS;
2981 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2982 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2983 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2984 else
2985 wc.opcode = IB_WC_RECV;
2986 wc.qp = &qp->ibqp;
2987 wc.src_qp = qp->remote_qpn;
2988 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
2989 /*
2990 * It seems that IB mandates the presence of an SL in a
2991 * work completion only for the UD transport (see section
2992 * 11.4.2 of IBTA Vol. 1).
2993 *
2994 * However, the way the SL is chosen below is consistent
2995 * with the way that IB/qib works and is trying avoid
2996 * introducing incompatibilities.
2997 *
2998 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2999 */
3000 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3001 /* zero fields that are N/A */
3002 wc.vendor_err = 0;
3003 wc.pkey_index = 0;
3004 wc.dlid_path_bits = 0;
3005 wc.port_num = 0;
3006 /* Signal completion event if the solicited bit is set. */
3007 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
3008 break;
3009
3010 case OP(RDMA_WRITE_ONLY):
3011 copy_last = rvt_is_user_qp(qp);
3012 fallthrough;
3013 case OP(RDMA_WRITE_FIRST):
3014 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
3015 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3016 goto nack_inv;
3017 /* consume RWQE */
3018 reth = &ohdr->u.rc.reth;
3019 qp->r_len = be32_to_cpu(reth->length);
3020 qp->r_rcv_len = 0;
3021 qp->r_sge.sg_list = NULL;
3022 if (qp->r_len != 0) {
3023 u32 rkey = be32_to_cpu(reth->rkey);
3024 u64 vaddr = get_ib_reth_vaddr(reth);
3025 int ok;
3026
3027 /* Check rkey & NAK */
3028 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
3029 rkey, IB_ACCESS_REMOTE_WRITE);
3030 if (unlikely(!ok))
3031 goto nack_acc;
3032 qp->r_sge.num_sge = 1;
3033 } else {
3034 qp->r_sge.num_sge = 0;
3035 qp->r_sge.sge.mr = NULL;
3036 qp->r_sge.sge.vaddr = NULL;
3037 qp->r_sge.sge.length = 0;
3038 qp->r_sge.sge.sge_length = 0;
3039 }
3040 if (opcode == OP(RDMA_WRITE_FIRST))
3041 goto send_middle;
3042 else if (opcode == OP(RDMA_WRITE_ONLY))
3043 goto no_immediate_data;
3044 ret = rvt_get_rwqe(qp, true);
3045 if (ret < 0)
3046 goto nack_op_err;
3047 if (!ret) {
3048 /* peer will send again */
3049 rvt_put_ss(&qp->r_sge);
3050 goto rnr_nak;
3051 }
3052 wc.ex.imm_data = ohdr->u.rc.imm_data;
3053 wc.wc_flags = IB_WC_WITH_IMM;
3054 goto send_last;
3055
3056 case OP(RDMA_READ_REQUEST): {
3057 struct rvt_ack_entry *e;
3058 u32 len;
3059 u8 next;
3060
3061 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3062 goto nack_inv;
3063 next = qp->r_head_ack_queue + 1;
3064 /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
3065 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3066 next = 0;
3067 spin_lock_irqsave(&qp->s_lock, flags);
3068 if (unlikely(next == qp->s_acked_ack_queue)) {
3069 if (!qp->s_ack_queue[next].sent)
3070 goto nack_inv_unlck;
3071 update_ack_queue(qp, next);
3072 }
3073 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3074 release_rdma_sge_mr(e);
3075 reth = &ohdr->u.rc.reth;
3076 len = be32_to_cpu(reth->length);
3077 if (len) {
3078 u32 rkey = be32_to_cpu(reth->rkey);
3079 u64 vaddr = get_ib_reth_vaddr(reth);
3080 int ok;
3081
3082 /* Check rkey & NAK */
3083 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
3084 rkey, IB_ACCESS_REMOTE_READ);
3085 if (unlikely(!ok))
3086 goto nack_acc_unlck;
3087 /*
3088 * Update the next expected PSN. We add 1 later
3089 * below, so only add the remainder here.
3090 */
3091 qp->r_psn += rvt_div_mtu(qp, len - 1);
3092 } else {
3093 e->rdma_sge.mr = NULL;
3094 e->rdma_sge.vaddr = NULL;
3095 e->rdma_sge.length = 0;
3096 e->rdma_sge.sge_length = 0;
3097 }
3098 e->opcode = opcode;
3099 e->sent = 0;
3100 e->psn = psn;
3101 e->lpsn = qp->r_psn;
3102 /*
3103 * We need to increment the MSN here instead of when we
3104 * finish sending the result since a duplicate request would
3105 * increment it more than once.
3106 */
3107 qp->r_msn++;
3108 qp->r_psn++;
3109 qp->r_state = opcode;
3110 qp->r_nak_state = 0;
3111 qp->r_head_ack_queue = next;
3112 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3113
3114 /* Schedule the send engine. */
3115 qp->s_flags |= RVT_S_RESP_PENDING;
3116 if (fecn)
3117 qp->s_flags |= RVT_S_ECN;
3118 hfi1_schedule_send(qp);
3119
3120 spin_unlock_irqrestore(&qp->s_lock, flags);
3121 return;
3122 }
3123
3124 case OP(COMPARE_SWAP):
3125 case OP(FETCH_ADD): {
3126 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
3127 u64 vaddr = get_ib_ateth_vaddr(ateth);
3128 bool opfn = opcode == OP(COMPARE_SWAP) &&
3129 vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
3130 struct rvt_ack_entry *e;
3131 atomic64_t *maddr;
3132 u64 sdata;
3133 u32 rkey;
3134 u8 next;
3135
3136 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
3137 !opfn))
3138 goto nack_inv;
3139 next = qp->r_head_ack_queue + 1;
3140 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3141 next = 0;
3142 spin_lock_irqsave(&qp->s_lock, flags);
3143 if (unlikely(next == qp->s_acked_ack_queue)) {
3144 if (!qp->s_ack_queue[next].sent)
3145 goto nack_inv_unlck;
3146 update_ack_queue(qp, next);
3147 }
3148 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3149 release_rdma_sge_mr(e);
3150 /* Process OPFN special virtual address */
3151 if (opfn) {
3152 opfn_conn_response(qp, e, ateth);
3153 goto ack;
3154 }
3155 if (unlikely(vaddr & (sizeof(u64) - 1)))
3156 goto nack_inv_unlck;
3157 rkey = be32_to_cpu(ateth->rkey);
3158 /* Check rkey & NAK */
3159 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3160 vaddr, rkey,
3161 IB_ACCESS_REMOTE_ATOMIC)))
3162 goto nack_acc_unlck;
3163 /* Perform atomic OP and save result. */
3164 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3165 sdata = get_ib_ateth_swap(ateth);
3166 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
3167 (u64)atomic64_add_return(sdata, maddr) - sdata :
3168 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3169 get_ib_ateth_compare(ateth),
3170 sdata);
3171 rvt_put_mr(qp->r_sge.sge.mr);
3172 qp->r_sge.num_sge = 0;
3173ack:
3174 e->opcode = opcode;
3175 e->sent = 0;
3176 e->psn = psn;
3177 e->lpsn = psn;
3178 qp->r_msn++;
3179 qp->r_psn++;
3180 qp->r_state = opcode;
3181 qp->r_nak_state = 0;
3182 qp->r_head_ack_queue = next;
3183 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3184
3185 /* Schedule the send engine. */
3186 qp->s_flags |= RVT_S_RESP_PENDING;
3187 if (fecn)
3188 qp->s_flags |= RVT_S_ECN;
3189 hfi1_schedule_send(qp);
3190
3191 spin_unlock_irqrestore(&qp->s_lock, flags);
3192 return;
3193 }
3194
3195 default:
3196 /* NAK unknown opcodes. */
3197 goto nack_inv;
3198 }
3199 qp->r_psn++;
3200 qp->r_state = opcode;
3201 qp->r_ack_psn = psn;
3202 qp->r_nak_state = 0;
3203 /* Send an ACK if requested or required. */
3204 if (psn & IB_BTH_REQ_ACK || fecn) {
3205 if (packet->numpkt == 0 || fecn ||
3206 qp->r_adefered >= HFI1_PSN_CREDIT) {
3207 rc_cancel_ack(qp);
3208 goto send_ack;
3209 }
3210 qp->r_adefered++;
3211 rc_defered_ack(rcd, qp);
3212 }
3213 return;
3214
3215rnr_nak:
3216 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
3217 qp->r_ack_psn = qp->r_psn;
3218 /* Queue RNR NAK for later */
3219 rc_defered_ack(rcd, qp);
3220 return;
3221
3222nack_op_err:
3223 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3224 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
3225 qp->r_ack_psn = qp->r_psn;
3226 /* Queue NAK for later */
3227 rc_defered_ack(rcd, qp);
3228 return;
3229
3230nack_inv_unlck:
3231 spin_unlock_irqrestore(&qp->s_lock, flags);
3232nack_inv:
3233 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3234 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3235 qp->r_ack_psn = qp->r_psn;
3236 /* Queue NAK for later */
3237 rc_defered_ack(rcd, qp);
3238 return;
3239
3240nack_acc_unlck:
3241 spin_unlock_irqrestore(&qp->s_lock, flags);
3242nack_acc:
3243 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3244 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3245 qp->r_ack_psn = qp->r_psn;
3246send_ack:
3247 hfi1_send_rc_ack(packet, fecn);
3248}
3249
3250void hfi1_rc_hdrerr(
3251 struct hfi1_ctxtdata *rcd,
3252 struct hfi1_packet *packet,
3253 struct rvt_qp *qp)
3254{
3255 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
3256 int diff;
3257 u32 opcode;
3258 u32 psn;
3259
3260 if (hfi1_ruc_check_hdr(ibp, packet))
3261 return;
3262
3263 psn = ib_bth_get_psn(packet->ohdr);
3264 opcode = ib_bth_get_opcode(packet->ohdr);
3265
3266 /* Only deal with RDMA Writes for now */
3267 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
3268 diff = delta_psn(psn, qp->r_psn);
3269 if (!qp->r_nak_state && diff >= 0) {
3270 ibp->rvp.n_rc_seqnak++;
3271 qp->r_nak_state = IB_NAK_PSN_ERROR;
3272 /* Use the expected PSN. */
3273 qp->r_ack_psn = qp->r_psn;
3274 /*
3275 * Wait to send the sequence
3276 * NAK until all packets
3277 * in the receive queue have
3278 * been processed.
3279 * Otherwise, we end up
3280 * propagating congestion.
3281 */
3282 rc_defered_ack(rcd, qp);
3283 } /* Out of sequence NAK */
3284 } /* QP Request NAKs */
3285}