Loading...
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h>
27#include <linux/export.h>
28#include <linux/sunrpc/bc_xprt.h>
29
30#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
31#define RPCDBG_FACILITY RPCDBG_TRANS
32#endif
33
34/*
35 * Helper routines that track the number of preallocation elements
36 * on the transport.
37 */
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
40 return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
41}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
45 atomic_add(n, &xprt->bc_free_slots);
46 xprt->bc_alloc_count += n;
47}
48
49static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50{
51 atomic_sub(n, &xprt->bc_free_slots);
52 return xprt->bc_alloc_count -= n;
53}
54
55/*
56 * Free the preallocated rpc_rqst structure and the memory
57 * buffers hanging off of it.
58 */
59static void xprt_free_allocation(struct rpc_rqst *req)
60{
61 struct xdr_buf *xbufp;
62
63 dprintk("RPC: free allocations for req= %p\n", req);
64 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
65 xbufp = &req->rq_rcv_buf;
66 free_page((unsigned long)xbufp->head[0].iov_base);
67 xbufp = &req->rq_snd_buf;
68 free_page((unsigned long)xbufp->head[0].iov_base);
69 kfree(req);
70}
71
72static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73{
74 struct page *page;
75 /* Preallocate one XDR receive buffer */
76 page = alloc_page(gfp_flags);
77 if (page == NULL)
78 return -ENOMEM;
79 buf->head[0].iov_base = page_address(page);
80 buf->head[0].iov_len = PAGE_SIZE;
81 buf->tail[0].iov_base = NULL;
82 buf->tail[0].iov_len = 0;
83 buf->page_len = 0;
84 buf->len = 0;
85 buf->buflen = PAGE_SIZE;
86 return 0;
87}
88
89static
90struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
91{
92 struct rpc_rqst *req;
93
94 /* Pre-allocate one backchannel rpc_rqst */
95 req = kzalloc(sizeof(*req), gfp_flags);
96 if (req == NULL)
97 return NULL;
98
99 req->rq_xprt = xprt;
100 INIT_LIST_HEAD(&req->rq_list);
101 INIT_LIST_HEAD(&req->rq_bc_list);
102
103 /* Preallocate one XDR receive buffer */
104 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
105 printk(KERN_ERR "Failed to create bc receive xbuf\n");
106 goto out_free;
107 }
108 req->rq_rcv_buf.len = PAGE_SIZE;
109
110 /* Preallocate one XDR send buffer */
111 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
112 printk(KERN_ERR "Failed to create bc snd xbuf\n");
113 goto out_free;
114 }
115 return req;
116out_free:
117 xprt_free_allocation(req);
118 return NULL;
119}
120
121/*
122 * Preallocate up to min_reqs structures and related buffers for use
123 * by the backchannel. This function can be called multiple times
124 * when creating new sessions that use the same rpc_xprt. The
125 * preallocated buffers are added to the pool of resources used by
126 * the rpc_xprt. Anyone of these resources may be used used by an
127 * incoming callback request. It's up to the higher levels in the
128 * stack to enforce that the maximum number of session slots is not
129 * being exceeded.
130 *
131 * Some callback arguments can be large. For example, a pNFS server
132 * using multiple deviceids. The list can be unbound, but the client
133 * has the ability to tell the server the maximum size of the callback
134 * requests. Each deviceID is 16 bytes, so allocate one page
135 * for the arguments to have enough room to receive a number of these
136 * deviceIDs. The NFS client indicates to the pNFS server that its
137 * callback requests can be up to 4096 bytes in size.
138 */
139int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
140{
141 if (!xprt->ops->bc_setup)
142 return 0;
143 return xprt->ops->bc_setup(xprt, min_reqs);
144}
145EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
146
147int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
148{
149 struct rpc_rqst *req;
150 struct list_head tmp_list;
151 int i;
152
153 dprintk("RPC: setup backchannel transport\n");
154
155 /*
156 * We use a temporary list to keep track of the preallocated
157 * buffers. Once we're done building the list we splice it
158 * into the backchannel preallocation list off of the rpc_xprt
159 * struct. This helps minimize the amount of time the list
160 * lock is held on the rpc_xprt struct. It also makes cleanup
161 * easier in case of memory allocation errors.
162 */
163 INIT_LIST_HEAD(&tmp_list);
164 for (i = 0; i < min_reqs; i++) {
165 /* Pre-allocate one backchannel rpc_rqst */
166 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
167 if (req == NULL) {
168 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
169 goto out_free;
170 }
171
172 /* Add the allocated buffer to the tmp list */
173 dprintk("RPC: adding req= %p\n", req);
174 list_add(&req->rq_bc_pa_list, &tmp_list);
175 }
176
177 /*
178 * Add the temporary list to the backchannel preallocation list
179 */
180 spin_lock_bh(&xprt->bc_pa_lock);
181 list_splice(&tmp_list, &xprt->bc_pa_list);
182 xprt_inc_alloc_count(xprt, min_reqs);
183 spin_unlock_bh(&xprt->bc_pa_lock);
184
185 dprintk("RPC: setup backchannel transport done\n");
186 return 0;
187
188out_free:
189 /*
190 * Memory allocation failed, free the temporary list
191 */
192 while (!list_empty(&tmp_list)) {
193 req = list_first_entry(&tmp_list,
194 struct rpc_rqst,
195 rq_bc_pa_list);
196 list_del(&req->rq_bc_pa_list);
197 xprt_free_allocation(req);
198 }
199
200 dprintk("RPC: setup backchannel transport failed\n");
201 return -ENOMEM;
202}
203
204/**
205 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
206 * @xprt: the transport holding the preallocated strucures
207 * @max_reqs the maximum number of preallocated structures to destroy
208 *
209 * Since these structures may have been allocated by multiple calls
210 * to xprt_setup_backchannel, we only destroy up to the maximum number
211 * of reqs specified by the caller.
212 */
213void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
214{
215 if (xprt->ops->bc_destroy)
216 xprt->ops->bc_destroy(xprt, max_reqs);
217}
218EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
219
220void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
221{
222 struct rpc_rqst *req = NULL, *tmp = NULL;
223
224 dprintk("RPC: destroy backchannel transport\n");
225
226 if (max_reqs == 0)
227 goto out;
228
229 spin_lock_bh(&xprt->bc_pa_lock);
230 xprt_dec_alloc_count(xprt, max_reqs);
231 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
232 dprintk("RPC: req=%p\n", req);
233 list_del(&req->rq_bc_pa_list);
234 xprt_free_allocation(req);
235 if (--max_reqs == 0)
236 break;
237 }
238 spin_unlock_bh(&xprt->bc_pa_lock);
239
240out:
241 dprintk("RPC: backchannel list empty= %s\n",
242 list_empty(&xprt->bc_pa_list) ? "true" : "false");
243}
244
245static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
246{
247 struct rpc_rqst *req = NULL;
248
249 dprintk("RPC: allocate a backchannel request\n");
250 if (atomic_read(&xprt->bc_free_slots) <= 0)
251 goto not_found;
252 if (list_empty(&xprt->bc_pa_list)) {
253 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
254 if (!req)
255 goto not_found;
256 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
257 xprt->bc_alloc_count++;
258 }
259 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
260 rq_bc_pa_list);
261 req->rq_reply_bytes_recvd = 0;
262 req->rq_bytes_sent = 0;
263 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
264 sizeof(req->rq_private_buf));
265 req->rq_xid = xid;
266 req->rq_connect_cookie = xprt->connect_cookie;
267not_found:
268 dprintk("RPC: backchannel req=%p\n", req);
269 return req;
270}
271
272/*
273 * Return the preallocated rpc_rqst structure and XDR buffers
274 * associated with this rpc_task.
275 */
276void xprt_free_bc_request(struct rpc_rqst *req)
277{
278 struct rpc_xprt *xprt = req->rq_xprt;
279
280 xprt->ops->bc_free_rqst(req);
281}
282
283void xprt_free_bc_rqst(struct rpc_rqst *req)
284{
285 struct rpc_xprt *xprt = req->rq_xprt;
286
287 dprintk("RPC: free backchannel req=%p\n", req);
288
289 req->rq_connect_cookie = xprt->connect_cookie - 1;
290 smp_mb__before_atomic();
291 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
292 smp_mb__after_atomic();
293
294 /*
295 * Return it to the list of preallocations so that it
296 * may be reused by a new callback request.
297 */
298 spin_lock_bh(&xprt->bc_pa_lock);
299 if (xprt_need_to_requeue(xprt)) {
300 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
301 xprt->bc_alloc_count++;
302 req = NULL;
303 }
304 spin_unlock_bh(&xprt->bc_pa_lock);
305 if (req != NULL) {
306 /*
307 * The last remaining session was destroyed while this
308 * entry was in use. Free the entry and don't attempt
309 * to add back to the list because there is no need to
310 * have anymore preallocated entries.
311 */
312 dprintk("RPC: Last session removed req=%p\n", req);
313 xprt_free_allocation(req);
314 return;
315 }
316}
317
318/*
319 * One or more rpc_rqst structure have been preallocated during the
320 * backchannel setup. Buffer space for the send and private XDR buffers
321 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
322 * to this request. Use xprt_free_bc_request to return it.
323 *
324 * We know that we're called in soft interrupt context, grab the spin_lock
325 * since there is no need to grab the bottom half spin_lock.
326 *
327 * Return an available rpc_rqst, otherwise NULL if non are available.
328 */
329struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
330{
331 struct rpc_rqst *req;
332
333 spin_lock(&xprt->bc_pa_lock);
334 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
335 if (req->rq_connect_cookie != xprt->connect_cookie)
336 continue;
337 if (req->rq_xid == xid)
338 goto found;
339 }
340 req = xprt_alloc_bc_request(xprt, xid);
341found:
342 spin_unlock(&xprt->bc_pa_lock);
343 return req;
344}
345
346/*
347 * Add callback request to callback list. The callback
348 * service sleeps on the sv_cb_waitq waiting for new
349 * requests. Wake it up after adding enqueing the
350 * request.
351 */
352void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
353{
354 struct rpc_xprt *xprt = req->rq_xprt;
355 struct svc_serv *bc_serv = xprt->bc_serv;
356
357 spin_lock(&xprt->bc_pa_lock);
358 list_del(&req->rq_bc_pa_list);
359 xprt_dec_alloc_count(xprt, 1);
360 spin_unlock(&xprt->bc_pa_lock);
361
362 req->rq_private_buf.len = copied;
363 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
364
365 dprintk("RPC: add callback request to list\n");
366 spin_lock(&bc_serv->sv_cb_lock);
367 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
368 wake_up(&bc_serv->sv_cb_waitq);
369 spin_unlock(&bc_serv->sv_cb_lock);
370}
371
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h>
27
28#ifdef RPC_DEBUG
29#define RPCDBG_FACILITY RPCDBG_TRANS
30#endif
31
32/*
33 * Helper routines that track the number of preallocation elements
34 * on the transport.
35 */
36static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
37{
38 return xprt->bc_alloc_count > 0;
39}
40
41static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
42{
43 xprt->bc_alloc_count += n;
44}
45
46static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
47{
48 return xprt->bc_alloc_count -= n;
49}
50
51/*
52 * Free the preallocated rpc_rqst structure and the memory
53 * buffers hanging off of it.
54 */
55static void xprt_free_allocation(struct rpc_rqst *req)
56{
57 struct xdr_buf *xbufp;
58
59 dprintk("RPC: free allocations for req= %p\n", req);
60 BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
61 xbufp = &req->rq_private_buf;
62 free_page((unsigned long)xbufp->head[0].iov_base);
63 xbufp = &req->rq_snd_buf;
64 free_page((unsigned long)xbufp->head[0].iov_base);
65 list_del(&req->rq_bc_pa_list);
66 kfree(req);
67}
68
69/*
70 * Preallocate up to min_reqs structures and related buffers for use
71 * by the backchannel. This function can be called multiple times
72 * when creating new sessions that use the same rpc_xprt. The
73 * preallocated buffers are added to the pool of resources used by
74 * the rpc_xprt. Anyone of these resources may be used used by an
75 * incoming callback request. It's up to the higher levels in the
76 * stack to enforce that the maximum number of session slots is not
77 * being exceeded.
78 *
79 * Some callback arguments can be large. For example, a pNFS server
80 * using multiple deviceids. The list can be unbound, but the client
81 * has the ability to tell the server the maximum size of the callback
82 * requests. Each deviceID is 16 bytes, so allocate one page
83 * for the arguments to have enough room to receive a number of these
84 * deviceIDs. The NFS client indicates to the pNFS server that its
85 * callback requests can be up to 4096 bytes in size.
86 */
87int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
88{
89 struct page *page_rcv = NULL, *page_snd = NULL;
90 struct xdr_buf *xbufp = NULL;
91 struct rpc_rqst *req, *tmp;
92 struct list_head tmp_list;
93 int i;
94
95 dprintk("RPC: setup backchannel transport\n");
96
97 /*
98 * We use a temporary list to keep track of the preallocated
99 * buffers. Once we're done building the list we splice it
100 * into the backchannel preallocation list off of the rpc_xprt
101 * struct. This helps minimize the amount of time the list
102 * lock is held on the rpc_xprt struct. It also makes cleanup
103 * easier in case of memory allocation errors.
104 */
105 INIT_LIST_HEAD(&tmp_list);
106 for (i = 0; i < min_reqs; i++) {
107 /* Pre-allocate one backchannel rpc_rqst */
108 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
109 if (req == NULL) {
110 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
111 goto out_free;
112 }
113
114 /* Add the allocated buffer to the tmp list */
115 dprintk("RPC: adding req= %p\n", req);
116 list_add(&req->rq_bc_pa_list, &tmp_list);
117
118 req->rq_xprt = xprt;
119 INIT_LIST_HEAD(&req->rq_list);
120 INIT_LIST_HEAD(&req->rq_bc_list);
121
122 /* Preallocate one XDR receive buffer */
123 page_rcv = alloc_page(GFP_KERNEL);
124 if (page_rcv == NULL) {
125 printk(KERN_ERR "Failed to create bc receive xbuf\n");
126 goto out_free;
127 }
128 xbufp = &req->rq_rcv_buf;
129 xbufp->head[0].iov_base = page_address(page_rcv);
130 xbufp->head[0].iov_len = PAGE_SIZE;
131 xbufp->tail[0].iov_base = NULL;
132 xbufp->tail[0].iov_len = 0;
133 xbufp->page_len = 0;
134 xbufp->len = PAGE_SIZE;
135 xbufp->buflen = PAGE_SIZE;
136
137 /* Preallocate one XDR send buffer */
138 page_snd = alloc_page(GFP_KERNEL);
139 if (page_snd == NULL) {
140 printk(KERN_ERR "Failed to create bc snd xbuf\n");
141 goto out_free;
142 }
143
144 xbufp = &req->rq_snd_buf;
145 xbufp->head[0].iov_base = page_address(page_snd);
146 xbufp->head[0].iov_len = 0;
147 xbufp->tail[0].iov_base = NULL;
148 xbufp->tail[0].iov_len = 0;
149 xbufp->page_len = 0;
150 xbufp->len = 0;
151 xbufp->buflen = PAGE_SIZE;
152 }
153
154 /*
155 * Add the temporary list to the backchannel preallocation list
156 */
157 spin_lock_bh(&xprt->bc_pa_lock);
158 list_splice(&tmp_list, &xprt->bc_pa_list);
159 xprt_inc_alloc_count(xprt, min_reqs);
160 spin_unlock_bh(&xprt->bc_pa_lock);
161
162 dprintk("RPC: setup backchannel transport done\n");
163 return 0;
164
165out_free:
166 /*
167 * Memory allocation failed, free the temporary list
168 */
169 list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
170 xprt_free_allocation(req);
171
172 dprintk("RPC: setup backchannel transport failed\n");
173 return -1;
174}
175EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
176
177/*
178 * Destroys the backchannel preallocated structures.
179 * Since these structures may have been allocated by multiple calls
180 * to xprt_setup_backchannel, we only destroy up to the maximum number
181 * of reqs specified by the caller.
182 * @xprt: the transport holding the preallocated strucures
183 * @max_reqs the maximum number of preallocated structures to destroy
184 */
185void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
186{
187 struct rpc_rqst *req = NULL, *tmp = NULL;
188
189 dprintk("RPC: destroy backchannel transport\n");
190
191 BUG_ON(max_reqs == 0);
192 spin_lock_bh(&xprt->bc_pa_lock);
193 xprt_dec_alloc_count(xprt, max_reqs);
194 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
195 dprintk("RPC: req=%p\n", req);
196 xprt_free_allocation(req);
197 if (--max_reqs == 0)
198 break;
199 }
200 spin_unlock_bh(&xprt->bc_pa_lock);
201
202 dprintk("RPC: backchannel list empty= %s\n",
203 list_empty(&xprt->bc_pa_list) ? "true" : "false");
204}
205EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
206
207/*
208 * One or more rpc_rqst structure have been preallocated during the
209 * backchannel setup. Buffer space for the send and private XDR buffers
210 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
211 * to this request. Use xprt_free_bc_request to return it.
212 *
213 * We know that we're called in soft interrupt context, grab the spin_lock
214 * since there is no need to grab the bottom half spin_lock.
215 *
216 * Return an available rpc_rqst, otherwise NULL if non are available.
217 */
218struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
219{
220 struct rpc_rqst *req;
221
222 dprintk("RPC: allocate a backchannel request\n");
223 spin_lock(&xprt->bc_pa_lock);
224 if (!list_empty(&xprt->bc_pa_list)) {
225 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
226 rq_bc_pa_list);
227 list_del(&req->rq_bc_pa_list);
228 } else {
229 req = NULL;
230 }
231 spin_unlock(&xprt->bc_pa_lock);
232
233 if (req != NULL) {
234 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
235 req->rq_reply_bytes_recvd = 0;
236 req->rq_bytes_sent = 0;
237 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
238 sizeof(req->rq_private_buf));
239 }
240 dprintk("RPC: backchannel req=%p\n", req);
241 return req;
242}
243
244/*
245 * Return the preallocated rpc_rqst structure and XDR buffers
246 * associated with this rpc_task.
247 */
248void xprt_free_bc_request(struct rpc_rqst *req)
249{
250 struct rpc_xprt *xprt = req->rq_xprt;
251
252 dprintk("RPC: free backchannel req=%p\n", req);
253
254 smp_mb__before_clear_bit();
255 BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
256 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
257 smp_mb__after_clear_bit();
258
259 if (!xprt_need_to_requeue(xprt)) {
260 /*
261 * The last remaining session was destroyed while this
262 * entry was in use. Free the entry and don't attempt
263 * to add back to the list because there is no need to
264 * have anymore preallocated entries.
265 */
266 dprintk("RPC: Last session removed req=%p\n", req);
267 xprt_free_allocation(req);
268 return;
269 }
270
271 /*
272 * Return it to the list of preallocations so that it
273 * may be reused by a new callback request.
274 */
275 spin_lock_bh(&xprt->bc_pa_lock);
276 list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
277 spin_unlock_bh(&xprt->bc_pa_lock);
278}
279