Loading...
1/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 *
4 * Support for backward direction RPCs on RPC/RDMA.
5 */
6
7#include <linux/module.h>
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
10#include <linux/sunrpc/svc_xprt.h>
11
12#include "xprt_rdma.h"
13
14#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
15# define RPCDBG_FACILITY RPCDBG_TRANS
16#endif
17
18#undef RPCRDMA_BACKCHANNEL_DEBUG
19
20static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
21 struct rpc_rqst *rqst)
22{
23 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
24 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
25
26 spin_lock(&buf->rb_reqslock);
27 list_del(&req->rl_all);
28 spin_unlock(&buf->rb_reqslock);
29
30 rpcrdma_destroy_req(&r_xprt->rx_ia, req);
31
32 kfree(rqst);
33}
34
35static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
36 struct rpc_rqst *rqst)
37{
38 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
39 struct rpcrdma_regbuf *rb;
40 struct rpcrdma_req *req;
41 struct xdr_buf *buf;
42 size_t size;
43
44 req = rpcrdma_create_req(r_xprt);
45 if (IS_ERR(req))
46 return PTR_ERR(req);
47 req->rl_backchannel = true;
48
49 size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
50 rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
51 if (IS_ERR(rb))
52 goto out_fail;
53 req->rl_rdmabuf = rb;
54
55 size += RPCRDMA_INLINE_READ_THRESHOLD(rqst);
56 rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
57 if (IS_ERR(rb))
58 goto out_fail;
59 rb->rg_owner = req;
60 req->rl_sendbuf = rb;
61 /* so that rpcr_to_rdmar works when receiving a request */
62 rqst->rq_buffer = (void *)req->rl_sendbuf->rg_base;
63
64 buf = &rqst->rq_snd_buf;
65 buf->head[0].iov_base = rqst->rq_buffer;
66 buf->head[0].iov_len = 0;
67 buf->tail[0].iov_base = NULL;
68 buf->tail[0].iov_len = 0;
69 buf->page_len = 0;
70 buf->len = 0;
71 buf->buflen = size;
72
73 return 0;
74
75out_fail:
76 rpcrdma_bc_free_rqst(r_xprt, rqst);
77 return -ENOMEM;
78}
79
80/* Allocate and add receive buffers to the rpcrdma_buffer's
81 * existing list of rep's. These are released when the
82 * transport is destroyed.
83 */
84static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
85 unsigned int count)
86{
87 struct rpcrdma_rep *rep;
88 int rc = 0;
89
90 while (count--) {
91 rep = rpcrdma_create_rep(r_xprt);
92 if (IS_ERR(rep)) {
93 pr_err("RPC: %s: reply buffer alloc failed\n",
94 __func__);
95 rc = PTR_ERR(rep);
96 break;
97 }
98
99 rpcrdma_recv_buffer_put(rep);
100 }
101
102 return rc;
103}
104
105/**
106 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
107 * @xprt: transport associated with these backchannel resources
108 * @reqs: number of concurrent incoming requests to expect
109 *
110 * Returns 0 on success; otherwise a negative errno
111 */
112int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
113{
114 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
115 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
116 struct rpc_rqst *rqst;
117 unsigned int i;
118 int rc;
119
120 /* The backchannel reply path returns each rpc_rqst to the
121 * bc_pa_list _after_ the reply is sent. If the server is
122 * faster than the client, it can send another backward
123 * direction request before the rpc_rqst is returned to the
124 * list. The client rejects the request in this case.
125 *
126 * Twice as many rpc_rqsts are prepared to ensure there is
127 * always an rpc_rqst available as soon as a reply is sent.
128 */
129 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
130 goto out_err;
131
132 for (i = 0; i < (reqs << 1); i++) {
133 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
134 if (!rqst) {
135 pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
136 __func__);
137 goto out_free;
138 }
139 dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
140
141 rqst->rq_xprt = &r_xprt->rx_xprt;
142 INIT_LIST_HEAD(&rqst->rq_list);
143 INIT_LIST_HEAD(&rqst->rq_bc_list);
144
145 if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
146 goto out_free;
147
148 spin_lock_bh(&xprt->bc_pa_lock);
149 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
150 spin_unlock_bh(&xprt->bc_pa_lock);
151 }
152
153 rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
154 if (rc)
155 goto out_free;
156
157 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs);
158 if (rc)
159 goto out_free;
160
161 buffer->rb_bc_srv_max_requests = reqs;
162 request_module("svcrdma");
163
164 return 0;
165
166out_free:
167 xprt_rdma_bc_destroy(xprt, reqs);
168
169out_err:
170 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
171 return -ENOMEM;
172}
173
174/**
175 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
176 * @serv: server endpoint
177 * @net: network namespace
178 *
179 * The "xprt" is an implied argument: it supplies the name of the
180 * backchannel transport class.
181 *
182 * Returns zero on success, negative errno on failure
183 */
184int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
185{
186 int ret;
187
188 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
189 if (ret < 0)
190 return ret;
191 return 0;
192}
193
194/**
195 * rpcrdma_bc_marshal_reply - Send backwards direction reply
196 * @rqst: buffer containing RPC reply data
197 *
198 * Returns zero on success.
199 */
200int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
201{
202 struct rpc_xprt *xprt = rqst->rq_xprt;
203 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
204 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
205 struct rpcrdma_msg *headerp;
206 size_t rpclen;
207
208 headerp = rdmab_to_msg(req->rl_rdmabuf);
209 headerp->rm_xid = rqst->rq_xid;
210 headerp->rm_vers = rpcrdma_version;
211 headerp->rm_credit =
212 cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
213 headerp->rm_type = rdma_msg;
214 headerp->rm_body.rm_chunks[0] = xdr_zero;
215 headerp->rm_body.rm_chunks[1] = xdr_zero;
216 headerp->rm_body.rm_chunks[2] = xdr_zero;
217
218 rpclen = rqst->rq_svec[0].iov_len;
219
220#ifdef RPCRDMA_BACKCHANNEL_DEBUG
221 pr_info("RPC: %s: rpclen %zd headerp 0x%p lkey 0x%x\n",
222 __func__, rpclen, headerp, rdmab_lkey(req->rl_rdmabuf));
223 pr_info("RPC: %s: RPC/RDMA: %*ph\n",
224 __func__, (int)RPCRDMA_HDRLEN_MIN, headerp);
225 pr_info("RPC: %s: RPC: %*ph\n",
226 __func__, (int)rpclen, rqst->rq_svec[0].iov_base);
227#endif
228
229 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
230 req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN;
231 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
232
233 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
234 req->rl_send_iov[1].length = rpclen;
235 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
236
237 req->rl_niovs = 2;
238 return 0;
239}
240
241/**
242 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
243 * @xprt: transport associated with these backchannel resources
244 * @reqs: number of incoming requests to destroy; ignored
245 */
246void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
247{
248 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
249 struct rpc_rqst *rqst, *tmp;
250
251 spin_lock_bh(&xprt->bc_pa_lock);
252 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
253 list_del(&rqst->rq_bc_pa_list);
254 spin_unlock_bh(&xprt->bc_pa_lock);
255
256 rpcrdma_bc_free_rqst(r_xprt, rqst);
257
258 spin_lock_bh(&xprt->bc_pa_lock);
259 }
260 spin_unlock_bh(&xprt->bc_pa_lock);
261}
262
263/**
264 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
265 * @rqst: request to release
266 */
267void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
268{
269 struct rpc_xprt *xprt = rqst->rq_xprt;
270
271 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
272 __func__, rqst, rpcr_to_rdmar(rqst));
273
274 smp_mb__before_atomic();
275 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
276 clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
277 smp_mb__after_atomic();
278
279 spin_lock_bh(&xprt->bc_pa_lock);
280 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
281 spin_unlock_bh(&xprt->bc_pa_lock);
282}
283
284/**
285 * rpcrdma_bc_receive_call - Handle a backward direction call
286 * @xprt: transport receiving the call
287 * @rep: receive buffer containing the call
288 *
289 * Called in the RPC reply handler, which runs in a tasklet.
290 * Be quick about it.
291 *
292 * Operational assumptions:
293 * o Backchannel credits are ignored, just as the NFS server
294 * forechannel currently does
295 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
296 * No replay detection is done at the transport level
297 */
298void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
299 struct rpcrdma_rep *rep)
300{
301 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
302 struct rpcrdma_msg *headerp;
303 struct svc_serv *bc_serv;
304 struct rpcrdma_req *req;
305 struct rpc_rqst *rqst;
306 struct xdr_buf *buf;
307 size_t size;
308 __be32 *p;
309
310 headerp = rdmab_to_msg(rep->rr_rdmabuf);
311#ifdef RPCRDMA_BACKCHANNEL_DEBUG
312 pr_info("RPC: %s: callback XID %08x, length=%u\n",
313 __func__, be32_to_cpu(headerp->rm_xid), rep->rr_len);
314 pr_info("RPC: %s: %*ph\n", __func__, rep->rr_len, headerp);
315#endif
316
317 /* Sanity check:
318 * Need at least enough bytes for RPC/RDMA header, as code
319 * here references the header fields by array offset. Also,
320 * backward calls are always inline, so ensure there
321 * are some bytes beyond the RPC/RDMA header.
322 */
323 if (rep->rr_len < RPCRDMA_HDRLEN_MIN + 24)
324 goto out_short;
325 p = (__be32 *)((unsigned char *)headerp + RPCRDMA_HDRLEN_MIN);
326 size = rep->rr_len - RPCRDMA_HDRLEN_MIN;
327
328 /* Grab a free bc rqst */
329 spin_lock(&xprt->bc_pa_lock);
330 if (list_empty(&xprt->bc_pa_list)) {
331 spin_unlock(&xprt->bc_pa_lock);
332 goto out_overflow;
333 }
334 rqst = list_first_entry(&xprt->bc_pa_list,
335 struct rpc_rqst, rq_bc_pa_list);
336 list_del(&rqst->rq_bc_pa_list);
337 spin_unlock(&xprt->bc_pa_lock);
338 dprintk("RPC: %s: using rqst %p\n", __func__, rqst);
339
340 /* Prepare rqst */
341 rqst->rq_reply_bytes_recvd = 0;
342 rqst->rq_bytes_sent = 0;
343 rqst->rq_xid = headerp->rm_xid;
344
345 rqst->rq_private_buf.len = size;
346 set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
347
348 buf = &rqst->rq_rcv_buf;
349 memset(buf, 0, sizeof(*buf));
350 buf->head[0].iov_base = p;
351 buf->head[0].iov_len = size;
352 buf->len = size;
353
354 /* The receive buffer has to be hooked to the rpcrdma_req
355 * so that it can be reposted after the server is done
356 * parsing it but just before sending the backward
357 * direction reply.
358 */
359 req = rpcr_to_rdmar(rqst);
360 dprintk("RPC: %s: attaching rep %p to req %p\n",
361 __func__, rep, req);
362 req->rl_reply = rep;
363
364 /* Defeat the retransmit detection logic in send_request */
365 req->rl_connect_cookie = 0;
366
367 /* Queue rqst for ULP's callback service */
368 bc_serv = xprt->bc_serv;
369 spin_lock(&bc_serv->sv_cb_lock);
370 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
371 spin_unlock(&bc_serv->sv_cb_lock);
372
373 wake_up(&bc_serv->sv_cb_waitq);
374
375 r_xprt->rx_stats.bcall_count++;
376 return;
377
378out_overflow:
379 pr_warn("RPC/RDMA backchannel overflow\n");
380 xprt_disconnect_done(xprt);
381 /* This receive buffer gets reposted automatically
382 * when the connection is re-established.
383 */
384 return;
385
386out_short:
387 pr_warn("RPC/RDMA short backward direction call\n");
388
389 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
390 xprt_disconnect_done(xprt);
391 else
392 pr_warn("RPC: %s: reposting rep %p\n",
393 __func__, rep);
394}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2015-2020, Oracle and/or its affiliates.
4 *
5 * Support for reverse-direction RPCs on RPC/RDMA.
6 */
7
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
10#include <linux/sunrpc/svc_xprt.h>
11#include <linux/sunrpc/svc_rdma.h>
12
13#include "xprt_rdma.h"
14#include <trace/events/rpcrdma.h>
15
16#undef RPCRDMA_BACKCHANNEL_DEBUG
17
18/**
19 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
20 * @xprt: transport associated with these backchannel resources
21 * @reqs: number of concurrent incoming requests to expect
22 *
23 * Returns 0 on success; otherwise a negative errno
24 */
25int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
26{
27 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
28
29 r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
30 trace_xprtrdma_cb_setup(r_xprt, reqs);
31 return 0;
32}
33
34/**
35 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
36 * @xprt: transport
37 *
38 * Returns maximum size, in bytes, of a backchannel message
39 */
40size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
41{
42 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
43 struct rpcrdma_ep *ep = r_xprt->rx_ep;
44 size_t maxmsg;
45
46 maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
47 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
48 return maxmsg - RPCRDMA_HDRLEN_MIN;
49}
50
51unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
52{
53 return RPCRDMA_BACKWARD_WRS >> 1;
54}
55
56static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
57{
58 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
59 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
60 __be32 *p;
61
62 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
63 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
64 rdmab_data(req->rl_rdmabuf), rqst);
65
66 p = xdr_reserve_space(&req->rl_stream, 28);
67 if (unlikely(!p))
68 return -EIO;
69 *p++ = rqst->rq_xid;
70 *p++ = rpcrdma_version;
71 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
72 *p++ = rdma_msg;
73 *p++ = xdr_zero;
74 *p++ = xdr_zero;
75 *p = xdr_zero;
76
77 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
78 &rqst->rq_snd_buf, rpcrdma_noch_pullup))
79 return -EIO;
80
81 trace_xprtrdma_cb_reply(r_xprt, rqst);
82 return 0;
83}
84
85/**
86 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
87 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
88 *
89 * Caller holds the transport's write lock.
90 *
91 * Returns:
92 * %0 if the RPC message has been sent
93 * %-ENOTCONN if the caller should reconnect and call again
94 * %-EIO if a permanent error occurred and the request was not
95 * sent. Do not try to send this message again.
96 */
97int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
98{
99 struct rpc_xprt *xprt = rqst->rq_xprt;
100 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
101 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
102 int rc;
103
104 if (!xprt_connected(xprt))
105 return -ENOTCONN;
106
107 if (!xprt_request_get_cong(xprt, rqst))
108 return -EBADSLT;
109
110 rc = rpcrdma_bc_marshal_reply(rqst);
111 if (rc < 0)
112 goto failed_marshal;
113
114 if (frwr_send(r_xprt, req))
115 goto drop_connection;
116 return 0;
117
118failed_marshal:
119 if (rc != -ENOTCONN)
120 return rc;
121drop_connection:
122 xprt_rdma_close(xprt);
123 return -ENOTCONN;
124}
125
126/**
127 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
128 * @xprt: transport associated with these backchannel resources
129 * @reqs: number of incoming requests to destroy; ignored
130 */
131void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
132{
133 struct rpc_rqst *rqst, *tmp;
134
135 spin_lock(&xprt->bc_pa_lock);
136 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
137 list_del(&rqst->rq_bc_pa_list);
138 spin_unlock(&xprt->bc_pa_lock);
139
140 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
141
142 spin_lock(&xprt->bc_pa_lock);
143 }
144 spin_unlock(&xprt->bc_pa_lock);
145}
146
147/**
148 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
149 * @rqst: request to release
150 */
151void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
152{
153 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
154 struct rpcrdma_rep *rep = req->rl_reply;
155 struct rpc_xprt *xprt = rqst->rq_xprt;
156 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
157
158 rpcrdma_rep_put(&r_xprt->rx_buf, rep);
159 req->rl_reply = NULL;
160
161 spin_lock(&xprt->bc_pa_lock);
162 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
163 spin_unlock(&xprt->bc_pa_lock);
164 xprt_put(xprt);
165}
166
167static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
168{
169 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
170 struct rpcrdma_req *req;
171 struct rpc_rqst *rqst;
172 size_t size;
173
174 spin_lock(&xprt->bc_pa_lock);
175 rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
176 rq_bc_pa_list);
177 if (!rqst)
178 goto create_req;
179 list_del(&rqst->rq_bc_pa_list);
180 spin_unlock(&xprt->bc_pa_lock);
181 return rqst;
182
183create_req:
184 spin_unlock(&xprt->bc_pa_lock);
185
186 /* Set a limit to prevent a remote from overrunning our resources.
187 */
188 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
189 return NULL;
190
191 size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
192 req = rpcrdma_req_create(r_xprt, size);
193 if (!req)
194 return NULL;
195 if (rpcrdma_req_setup(r_xprt, req)) {
196 rpcrdma_req_destroy(req);
197 return NULL;
198 }
199
200 xprt->bc_alloc_count++;
201 rqst = &req->rl_slot;
202 rqst->rq_xprt = xprt;
203 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
204 xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
205 return rqst;
206}
207
208/**
209 * rpcrdma_bc_receive_call - Handle a reverse-direction Call
210 * @r_xprt: transport receiving the call
211 * @rep: receive buffer containing the call
212 *
213 * Operational assumptions:
214 * o Backchannel credits are ignored, just as the NFS server
215 * forechannel currently does
216 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
217 * No replay detection is done at the transport level
218 */
219void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
220 struct rpcrdma_rep *rep)
221{
222 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
223 struct svc_serv *bc_serv;
224 struct rpcrdma_req *req;
225 struct rpc_rqst *rqst;
226 struct xdr_buf *buf;
227 size_t size;
228 __be32 *p;
229
230 p = xdr_inline_decode(&rep->rr_stream, 0);
231 size = xdr_stream_remaining(&rep->rr_stream);
232
233#ifdef RPCRDMA_BACKCHANNEL_DEBUG
234 pr_info("RPC: %s: callback XID %08x, length=%u\n",
235 __func__, be32_to_cpup(p), size);
236 pr_info("RPC: %s: %*ph\n", __func__, size, p);
237#endif
238
239 rqst = rpcrdma_bc_rqst_get(r_xprt);
240 if (!rqst)
241 goto out_overflow;
242
243 rqst->rq_reply_bytes_recvd = 0;
244 rqst->rq_xid = *p;
245
246 rqst->rq_private_buf.len = size;
247
248 buf = &rqst->rq_rcv_buf;
249 memset(buf, 0, sizeof(*buf));
250 buf->head[0].iov_base = p;
251 buf->head[0].iov_len = size;
252 buf->len = size;
253
254 /* The receive buffer has to be hooked to the rpcrdma_req
255 * so that it is not released while the req is pointing
256 * to its buffer, and so that it can be reposted after
257 * the Upper Layer is done decoding it.
258 */
259 req = rpcr_to_rdmar(rqst);
260 req->rl_reply = rep;
261 trace_xprtrdma_cb_call(r_xprt, rqst);
262
263 /* Queue rqst for ULP's callback service */
264 bc_serv = xprt->bc_serv;
265 xprt_get(xprt);
266 spin_lock(&bc_serv->sv_cb_lock);
267 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
268 spin_unlock(&bc_serv->sv_cb_lock);
269
270 wake_up(&bc_serv->sv_cb_waitq);
271
272 r_xprt->rx_stats.bcall_count++;
273 return;
274
275out_overflow:
276 pr_warn("RPC/RDMA backchannel overflow\n");
277 xprt_force_disconnect(xprt);
278 /* This receive buffer gets reposted automatically
279 * when the connection is re-established.
280 */
281 return;
282}