Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2015-2020, Oracle and/or its affiliates.
4 *
5 * Support for reverse-direction RPCs on RPC/RDMA.
6 */
7
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
10#include <linux/sunrpc/svc_xprt.h>
11#include <linux/sunrpc/svc_rdma.h>
12
13#include "xprt_rdma.h"
14#include <trace/events/rpcrdma.h>
15
16#undef RPCRDMA_BACKCHANNEL_DEBUG
17
18/**
19 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
20 * @xprt: transport associated with these backchannel resources
21 * @reqs: number of concurrent incoming requests to expect
22 *
23 * Returns 0 on success; otherwise a negative errno
24 */
25int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
26{
27 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
28
29 r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
30 trace_xprtrdma_cb_setup(r_xprt, reqs);
31 return 0;
32}
33
34/**
35 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
36 * @xprt: transport
37 *
38 * Returns maximum size, in bytes, of a backchannel message
39 */
40size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
41{
42 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
43 struct rpcrdma_ep *ep = r_xprt->rx_ep;
44 size_t maxmsg;
45
46 maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
47 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
48 return maxmsg - RPCRDMA_HDRLEN_MIN;
49}
50
51unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
52{
53 return RPCRDMA_BACKWARD_WRS >> 1;
54}
55
56static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
57{
58 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
59 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
60 __be32 *p;
61
62 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
63 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
64 rdmab_data(req->rl_rdmabuf), rqst);
65
66 p = xdr_reserve_space(&req->rl_stream, 28);
67 if (unlikely(!p))
68 return -EIO;
69 *p++ = rqst->rq_xid;
70 *p++ = rpcrdma_version;
71 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
72 *p++ = rdma_msg;
73 *p++ = xdr_zero;
74 *p++ = xdr_zero;
75 *p = xdr_zero;
76
77 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
78 &rqst->rq_snd_buf, rpcrdma_noch_pullup))
79 return -EIO;
80
81 trace_xprtrdma_cb_reply(r_xprt, rqst);
82 return 0;
83}
84
85/**
86 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
87 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
88 *
89 * Caller holds the transport's write lock.
90 *
91 * Returns:
92 * %0 if the RPC message has been sent
93 * %-ENOTCONN if the caller should reconnect and call again
94 * %-EIO if a permanent error occurred and the request was not
95 * sent. Do not try to send this message again.
96 */
97int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
98{
99 struct rpc_xprt *xprt = rqst->rq_xprt;
100 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
101 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
102 int rc;
103
104 if (!xprt_connected(xprt))
105 return -ENOTCONN;
106
107 if (!xprt_request_get_cong(xprt, rqst))
108 return -EBADSLT;
109
110 rc = rpcrdma_bc_marshal_reply(rqst);
111 if (rc < 0)
112 goto failed_marshal;
113
114 if (frwr_send(r_xprt, req))
115 goto drop_connection;
116 return 0;
117
118failed_marshal:
119 if (rc != -ENOTCONN)
120 return rc;
121drop_connection:
122 xprt_rdma_close(xprt);
123 return -ENOTCONN;
124}
125
126/**
127 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
128 * @xprt: transport associated with these backchannel resources
129 * @reqs: number of incoming requests to destroy; ignored
130 */
131void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
132{
133 struct rpc_rqst *rqst, *tmp;
134
135 spin_lock(&xprt->bc_pa_lock);
136 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
137 list_del(&rqst->rq_bc_pa_list);
138 spin_unlock(&xprt->bc_pa_lock);
139
140 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
141
142 spin_lock(&xprt->bc_pa_lock);
143 }
144 spin_unlock(&xprt->bc_pa_lock);
145}
146
147/**
148 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
149 * @rqst: request to release
150 */
151void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
152{
153 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
154 struct rpcrdma_rep *rep = req->rl_reply;
155 struct rpc_xprt *xprt = rqst->rq_xprt;
156 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
157
158 rpcrdma_rep_put(&r_xprt->rx_buf, rep);
159 req->rl_reply = NULL;
160
161 spin_lock(&xprt->bc_pa_lock);
162 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
163 spin_unlock(&xprt->bc_pa_lock);
164 xprt_put(xprt);
165}
166
167static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
168{
169 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
170 struct rpcrdma_req *req;
171 struct rpc_rqst *rqst;
172 size_t size;
173
174 spin_lock(&xprt->bc_pa_lock);
175 rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
176 rq_bc_pa_list);
177 if (!rqst)
178 goto create_req;
179 list_del(&rqst->rq_bc_pa_list);
180 spin_unlock(&xprt->bc_pa_lock);
181 return rqst;
182
183create_req:
184 spin_unlock(&xprt->bc_pa_lock);
185
186 /* Set a limit to prevent a remote from overrunning our resources.
187 */
188 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
189 return NULL;
190
191 size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
192 req = rpcrdma_req_create(r_xprt, size);
193 if (!req)
194 return NULL;
195 if (rpcrdma_req_setup(r_xprt, req)) {
196 rpcrdma_req_destroy(req);
197 return NULL;
198 }
199
200 xprt->bc_alloc_count++;
201 rqst = &req->rl_slot;
202 rqst->rq_xprt = xprt;
203 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
204 xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
205 return rqst;
206}
207
208/**
209 * rpcrdma_bc_receive_call - Handle a reverse-direction Call
210 * @r_xprt: transport receiving the call
211 * @rep: receive buffer containing the call
212 *
213 * Operational assumptions:
214 * o Backchannel credits are ignored, just as the NFS server
215 * forechannel currently does
216 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
217 * No replay detection is done at the transport level
218 */
219void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
220 struct rpcrdma_rep *rep)
221{
222 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
223 struct svc_serv *bc_serv;
224 struct rpcrdma_req *req;
225 struct rpc_rqst *rqst;
226 struct xdr_buf *buf;
227 size_t size;
228 __be32 *p;
229
230 p = xdr_inline_decode(&rep->rr_stream, 0);
231 size = xdr_stream_remaining(&rep->rr_stream);
232
233#ifdef RPCRDMA_BACKCHANNEL_DEBUG
234 pr_info("RPC: %s: callback XID %08x, length=%u\n",
235 __func__, be32_to_cpup(p), size);
236 pr_info("RPC: %s: %*ph\n", __func__, size, p);
237#endif
238
239 rqst = rpcrdma_bc_rqst_get(r_xprt);
240 if (!rqst)
241 goto out_overflow;
242
243 rqst->rq_reply_bytes_recvd = 0;
244 rqst->rq_xid = *p;
245
246 rqst->rq_private_buf.len = size;
247
248 buf = &rqst->rq_rcv_buf;
249 memset(buf, 0, sizeof(*buf));
250 buf->head[0].iov_base = p;
251 buf->head[0].iov_len = size;
252 buf->len = size;
253
254 /* The receive buffer has to be hooked to the rpcrdma_req
255 * so that it is not released while the req is pointing
256 * to its buffer, and so that it can be reposted after
257 * the Upper Layer is done decoding it.
258 */
259 req = rpcr_to_rdmar(rqst);
260 req->rl_reply = rep;
261 trace_xprtrdma_cb_call(r_xprt, rqst);
262
263 /* Queue rqst for ULP's callback service */
264 bc_serv = xprt->bc_serv;
265 xprt_get(xprt);
266 spin_lock(&bc_serv->sv_cb_lock);
267 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
268 spin_unlock(&bc_serv->sv_cb_lock);
269
270 wake_up(&bc_serv->sv_cb_waitq);
271
272 r_xprt->rx_stats.bcall_count++;
273 return;
274
275out_overflow:
276 pr_warn("RPC/RDMA backchannel overflow\n");
277 xprt_force_disconnect(xprt);
278 /* This receive buffer gets reposted automatically
279 * when the connection is re-established.
280 */
281 return;
282}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2015 Oracle. All rights reserved.
4 *
5 * Support for backward direction RPCs on RPC/RDMA.
6 */
7
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
10#include <linux/sunrpc/svc_xprt.h>
11#include <linux/sunrpc/svc_rdma.h>
12
13#include "xprt_rdma.h"
14#include <trace/events/rpcrdma.h>
15
16#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17# define RPCDBG_FACILITY RPCDBG_TRANS
18#endif
19
20#undef RPCRDMA_BACKCHANNEL_DEBUG
21
22/**
23 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
24 * @xprt: transport associated with these backchannel resources
25 * @reqs: number of concurrent incoming requests to expect
26 *
27 * Returns 0 on success; otherwise a negative errno
28 */
29int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
30{
31 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
32
33 r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
34 trace_xprtrdma_cb_setup(r_xprt, reqs);
35 return 0;
36}
37
38/**
39 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
40 * @xprt: transport
41 *
42 * Returns maximum size, in bytes, of a backchannel message
43 */
44size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
45{
46 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
47 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
48 size_t maxmsg;
49
50 maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv);
51 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
52 return maxmsg - RPCRDMA_HDRLEN_MIN;
53}
54
55unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
56{
57 return RPCRDMA_BACKWARD_WRS >> 1;
58}
59
60static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
61{
62 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
63 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
64 __be32 *p;
65
66 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
67 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
68 rdmab_data(req->rl_rdmabuf), rqst);
69
70 p = xdr_reserve_space(&req->rl_stream, 28);
71 if (unlikely(!p))
72 return -EIO;
73 *p++ = rqst->rq_xid;
74 *p++ = rpcrdma_version;
75 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
76 *p++ = rdma_msg;
77 *p++ = xdr_zero;
78 *p++ = xdr_zero;
79 *p = xdr_zero;
80
81 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
82 &rqst->rq_snd_buf, rpcrdma_noch))
83 return -EIO;
84
85 trace_xprtrdma_cb_reply(rqst);
86 return 0;
87}
88
89/**
90 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
91 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
92 *
93 * Caller holds the transport's write lock.
94 *
95 * Returns:
96 * %0 if the RPC message has been sent
97 * %-ENOTCONN if the caller should reconnect and call again
98 * %-EIO if a permanent error occurred and the request was not
99 * sent. Do not try to send this message again.
100 */
101int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
102{
103 struct rpc_xprt *xprt = rqst->rq_xprt;
104 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
105 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
106 int rc;
107
108 if (!xprt_connected(xprt))
109 return -ENOTCONN;
110
111 if (!xprt_request_get_cong(xprt, rqst))
112 return -EBADSLT;
113
114 rc = rpcrdma_bc_marshal_reply(rqst);
115 if (rc < 0)
116 goto failed_marshal;
117
118 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
119 goto drop_connection;
120 return 0;
121
122failed_marshal:
123 if (rc != -ENOTCONN)
124 return rc;
125drop_connection:
126 xprt_rdma_close(xprt);
127 return -ENOTCONN;
128}
129
130/**
131 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
132 * @xprt: transport associated with these backchannel resources
133 * @reqs: number of incoming requests to destroy; ignored
134 */
135void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
136{
137 struct rpc_rqst *rqst, *tmp;
138
139 spin_lock(&xprt->bc_pa_lock);
140 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
141 list_del(&rqst->rq_bc_pa_list);
142 spin_unlock(&xprt->bc_pa_lock);
143
144 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
145
146 spin_lock(&xprt->bc_pa_lock);
147 }
148 spin_unlock(&xprt->bc_pa_lock);
149}
150
151/**
152 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
153 * @rqst: request to release
154 */
155void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
156{
157 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
158 struct rpc_xprt *xprt = rqst->rq_xprt;
159
160 rpcrdma_recv_buffer_put(req->rl_reply);
161 req->rl_reply = NULL;
162
163 spin_lock(&xprt->bc_pa_lock);
164 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
165 spin_unlock(&xprt->bc_pa_lock);
166 xprt_put(xprt);
167}
168
169static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
170{
171 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
172 struct rpcrdma_req *req;
173 struct rpc_rqst *rqst;
174 size_t size;
175
176 spin_lock(&xprt->bc_pa_lock);
177 rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
178 rq_bc_pa_list);
179 if (!rqst)
180 goto create_req;
181 list_del(&rqst->rq_bc_pa_list);
182 spin_unlock(&xprt->bc_pa_lock);
183 return rqst;
184
185create_req:
186 spin_unlock(&xprt->bc_pa_lock);
187
188 /* Set a limit to prevent a remote from overrunning our resources.
189 */
190 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
191 return NULL;
192
193 size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE);
194 req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
195 if (!req)
196 return NULL;
197
198 xprt->bc_alloc_count++;
199 rqst = &req->rl_slot;
200 rqst->rq_xprt = xprt;
201 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
202 xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
203 return rqst;
204}
205
206/**
207 * rpcrdma_bc_receive_call - Handle a backward direction call
208 * @r_xprt: transport receiving the call
209 * @rep: receive buffer containing the call
210 *
211 * Operational assumptions:
212 * o Backchannel credits are ignored, just as the NFS server
213 * forechannel currently does
214 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
215 * No replay detection is done at the transport level
216 */
217void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
218 struct rpcrdma_rep *rep)
219{
220 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
221 struct svc_serv *bc_serv;
222 struct rpcrdma_req *req;
223 struct rpc_rqst *rqst;
224 struct xdr_buf *buf;
225 size_t size;
226 __be32 *p;
227
228 p = xdr_inline_decode(&rep->rr_stream, 0);
229 size = xdr_stream_remaining(&rep->rr_stream);
230
231#ifdef RPCRDMA_BACKCHANNEL_DEBUG
232 pr_info("RPC: %s: callback XID %08x, length=%u\n",
233 __func__, be32_to_cpup(p), size);
234 pr_info("RPC: %s: %*ph\n", __func__, size, p);
235#endif
236
237 rqst = rpcrdma_bc_rqst_get(r_xprt);
238 if (!rqst)
239 goto out_overflow;
240
241 rqst->rq_reply_bytes_recvd = 0;
242 rqst->rq_xid = *p;
243
244 rqst->rq_private_buf.len = size;
245
246 buf = &rqst->rq_rcv_buf;
247 memset(buf, 0, sizeof(*buf));
248 buf->head[0].iov_base = p;
249 buf->head[0].iov_len = size;
250 buf->len = size;
251
252 /* The receive buffer has to be hooked to the rpcrdma_req
253 * so that it is not released while the req is pointing
254 * to its buffer, and so that it can be reposted after
255 * the Upper Layer is done decoding it.
256 */
257 req = rpcr_to_rdmar(rqst);
258 req->rl_reply = rep;
259 trace_xprtrdma_cb_call(rqst);
260
261 /* Queue rqst for ULP's callback service */
262 bc_serv = xprt->bc_serv;
263 xprt_get(xprt);
264 spin_lock(&bc_serv->sv_cb_lock);
265 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
266 spin_unlock(&bc_serv->sv_cb_lock);
267
268 wake_up(&bc_serv->sv_cb_waitq);
269
270 r_xprt->rx_stats.bcall_count++;
271 return;
272
273out_overflow:
274 pr_warn("RPC/RDMA backchannel overflow\n");
275 xprt_force_disconnect(xprt);
276 /* This receive buffer gets reposted automatically
277 * when the connection is re-established.
278 */
279 return;
280}