Loading...
1/*
2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41
42#include <linux/sunrpc/xdr.h>
43#include <linux/sunrpc/debug.h>
44#include <asm/unaligned.h>
45#include <linux/sunrpc/rpc_rdma.h>
46#include <linux/sunrpc/svc_rdma.h>
47
48#define RPCDBG_FACILITY RPCDBG_SVCXPRT
49
50/*
51 * Decodes a read chunk list. The expected format is as follows:
52 * descrim : xdr_one
53 * position : __be32 offset into XDR stream
54 * handle : __be32 RKEY
55 * . . .
56 * end-of-list: xdr_zero
57 */
58static __be32 *decode_read_list(__be32 *va, __be32 *vaend)
59{
60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
61
62 while (ch->rc_discrim != xdr_zero) {
63 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
64 (unsigned long)vaend) {
65 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
66 return NULL;
67 }
68 ch++;
69 }
70 return &ch->rc_position;
71}
72
73/*
74 * Decodes a write chunk list. The expected format is as follows:
75 * descrim : xdr_one
76 * nchunks : <count>
77 * handle : __be32 RKEY ---+
78 * length : __be32 <len of segment> |
79 * offset : remove va + <count>
80 * . . . |
81 * ---+
82 */
83static __be32 *decode_write_list(__be32 *va, __be32 *vaend)
84{
85 unsigned long start, end;
86 int nchunks;
87
88 struct rpcrdma_write_array *ary =
89 (struct rpcrdma_write_array *)va;
90
91 /* Check for not write-array */
92 if (ary->wc_discrim == xdr_zero)
93 return &ary->wc_nchunks;
94
95 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) >
96 (unsigned long)vaend) {
97 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
98 return NULL;
99 }
100 nchunks = be32_to_cpu(ary->wc_nchunks);
101
102 start = (unsigned long)&ary->wc_array[0];
103 end = (unsigned long)vaend;
104 if (nchunks < 0 ||
105 nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
106 (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
107 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
108 ary, nchunks, vaend);
109 return NULL;
110 }
111 /*
112 * rs_length is the 2nd 4B field in wc_target and taking its
113 * address skips the list terminator
114 */
115 return &ary->wc_array[nchunks].wc_target.rs_length;
116}
117
118static __be32 *decode_reply_array(__be32 *va, __be32 *vaend)
119{
120 unsigned long start, end;
121 int nchunks;
122 struct rpcrdma_write_array *ary =
123 (struct rpcrdma_write_array *)va;
124
125 /* Check for no reply-array */
126 if (ary->wc_discrim == xdr_zero)
127 return &ary->wc_nchunks;
128
129 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) >
130 (unsigned long)vaend) {
131 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
132 return NULL;
133 }
134 nchunks = be32_to_cpu(ary->wc_nchunks);
135
136 start = (unsigned long)&ary->wc_array[0];
137 end = (unsigned long)vaend;
138 if (nchunks < 0 ||
139 nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
140 (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
141 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
142 ary, nchunks, vaend);
143 return NULL;
144 }
145 return (__be32 *)&ary->wc_array[nchunks];
146}
147
148int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
149{
150 __be32 *va, *vaend;
151 unsigned int len;
152 u32 hdr_len;
153
154 /* Verify that there's enough bytes for header + something */
155 if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_ERR) {
156 dprintk("svcrdma: header too short = %d\n",
157 rqstp->rq_arg.len);
158 return -EINVAL;
159 }
160
161 if (rmsgp->rm_vers != rpcrdma_version) {
162 dprintk("%s: bad version %u\n", __func__,
163 be32_to_cpu(rmsgp->rm_vers));
164 return -EPROTONOSUPPORT;
165 }
166
167 switch (be32_to_cpu(rmsgp->rm_type)) {
168 case RDMA_MSG:
169 case RDMA_NOMSG:
170 break;
171
172 case RDMA_DONE:
173 /* Just drop it */
174 dprintk("svcrdma: dropping RDMA_DONE message\n");
175 return 0;
176
177 case RDMA_ERROR:
178 /* Possible if this is a backchannel reply.
179 * XXX: We should cancel this XID, though.
180 */
181 dprintk("svcrdma: dropping RDMA_ERROR message\n");
182 return 0;
183
184 case RDMA_MSGP:
185 /* Pull in the extra for the padded case, bump our pointer */
186 rmsgp->rm_body.rm_padded.rm_align =
187 be32_to_cpu(rmsgp->rm_body.rm_padded.rm_align);
188 rmsgp->rm_body.rm_padded.rm_thresh =
189 be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh);
190
191 va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
192 rqstp->rq_arg.head[0].iov_base = va;
193 len = (u32)((unsigned long)va - (unsigned long)rmsgp);
194 rqstp->rq_arg.head[0].iov_len -= len;
195 if (len > rqstp->rq_arg.len)
196 return -EINVAL;
197 return len;
198 default:
199 dprintk("svcrdma: bad rdma procedure (%u)\n",
200 be32_to_cpu(rmsgp->rm_type));
201 return -EINVAL;
202 }
203
204 /* The chunk list may contain either a read chunk list or a write
205 * chunk list and a reply chunk list.
206 */
207 va = &rmsgp->rm_body.rm_chunks[0];
208 vaend = (__be32 *)((unsigned long)rmsgp + rqstp->rq_arg.len);
209 va = decode_read_list(va, vaend);
210 if (!va) {
211 dprintk("svcrdma: failed to decode read list\n");
212 return -EINVAL;
213 }
214 va = decode_write_list(va, vaend);
215 if (!va) {
216 dprintk("svcrdma: failed to decode write list\n");
217 return -EINVAL;
218 }
219 va = decode_reply_array(va, vaend);
220 if (!va) {
221 dprintk("svcrdma: failed to decode reply chunk\n");
222 return -EINVAL;
223 }
224
225 rqstp->rq_arg.head[0].iov_base = va;
226 hdr_len = (unsigned long)va - (unsigned long)rmsgp;
227 rqstp->rq_arg.head[0].iov_len -= hdr_len;
228
229 return hdr_len;
230}
231
232int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt,
233 struct rpcrdma_msg *rmsgp,
234 enum rpcrdma_errcode err, __be32 *va)
235{
236 __be32 *startp = va;
237
238 *va++ = rmsgp->rm_xid;
239 *va++ = rmsgp->rm_vers;
240 *va++ = cpu_to_be32(xprt->sc_max_requests);
241 *va++ = rdma_error;
242 *va++ = cpu_to_be32(err);
243 if (err == ERR_VERS) {
244 *va++ = rpcrdma_version;
245 *va++ = rpcrdma_version;
246 }
247
248 return (int)((unsigned long)va - (unsigned long)startp);
249}
250
251int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *rmsgp)
252{
253 struct rpcrdma_write_array *wr_ary;
254
255 /* There is no read-list in a reply */
256
257 /* skip write list */
258 wr_ary = (struct rpcrdma_write_array *)
259 &rmsgp->rm_body.rm_chunks[1];
260 if (wr_ary->wc_discrim)
261 wr_ary = (struct rpcrdma_write_array *)
262 &wr_ary->wc_array[be32_to_cpu(wr_ary->wc_nchunks)].
263 wc_target.rs_length;
264 else
265 wr_ary = (struct rpcrdma_write_array *)
266 &wr_ary->wc_nchunks;
267
268 /* skip reply array */
269 if (wr_ary->wc_discrim)
270 wr_ary = (struct rpcrdma_write_array *)
271 &wr_ary->wc_array[be32_to_cpu(wr_ary->wc_nchunks)];
272 else
273 wr_ary = (struct rpcrdma_write_array *)
274 &wr_ary->wc_nchunks;
275
276 return (unsigned long) wr_ary - (unsigned long) rmsgp;
277}
278
279void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks)
280{
281 struct rpcrdma_write_array *ary;
282
283 /* no read-list */
284 rmsgp->rm_body.rm_chunks[0] = xdr_zero;
285
286 /* write-array discrim */
287 ary = (struct rpcrdma_write_array *)
288 &rmsgp->rm_body.rm_chunks[1];
289 ary->wc_discrim = xdr_one;
290 ary->wc_nchunks = cpu_to_be32(chunks);
291
292 /* write-list terminator */
293 ary->wc_array[chunks].wc_target.rs_handle = xdr_zero;
294
295 /* reply-array discriminator */
296 ary->wc_array[chunks].wc_target.rs_length = xdr_zero;
297}
298
299void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
300 int chunks)
301{
302 ary->wc_discrim = xdr_one;
303 ary->wc_nchunks = cpu_to_be32(chunks);
304}
305
306void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
307 int chunk_no,
308 __be32 rs_handle,
309 __be64 rs_offset,
310 u32 write_len)
311{
312 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
313 seg->rs_handle = rs_handle;
314 seg->rs_offset = rs_offset;
315 seg->rs_length = cpu_to_be32(write_len);
316}
317
318void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
319 struct rpcrdma_msg *rdma_argp,
320 struct rpcrdma_msg *rdma_resp,
321 enum rpcrdma_proc rdma_type)
322{
323 rdma_resp->rm_xid = rdma_argp->rm_xid;
324 rdma_resp->rm_vers = rdma_argp->rm_vers;
325 rdma_resp->rm_credit = cpu_to_be32(xprt->sc_max_requests);
326 rdma_resp->rm_type = cpu_to_be32(rdma_type);
327
328 /* Encode <nul> chunks lists */
329 rdma_resp->rm_body.rm_chunks[0] = xdr_zero;
330 rdma_resp->rm_body.rm_chunks[1] = xdr_zero;
331 rdma_resp->rm_body.rm_chunks[2] = xdr_zero;
332}
1/*
2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41
42#include <linux/sunrpc/xdr.h>
43#include <linux/sunrpc/debug.h>
44#include <asm/unaligned.h>
45#include <linux/sunrpc/rpc_rdma.h>
46#include <linux/sunrpc/svc_rdma.h>
47
48#define RPCDBG_FACILITY RPCDBG_SVCXPRT
49
50/*
51 * Decodes a read chunk list. The expected format is as follows:
52 * descrim : xdr_one
53 * position : u32 offset into XDR stream
54 * handle : u32 RKEY
55 * . . .
56 * end-of-list: xdr_zero
57 */
58static u32 *decode_read_list(u32 *va, u32 *vaend)
59{
60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
61
62 while (ch->rc_discrim != xdr_zero) {
63 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
64 (unsigned long)vaend) {
65 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
66 return NULL;
67 }
68 ch++;
69 }
70 return (u32 *)&ch->rc_position;
71}
72
73/*
74 * Determine number of chunks and total bytes in chunk list. The chunk
75 * list has already been verified to fit within the RPCRDMA header.
76 */
77void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
78 int *ch_count, int *byte_count)
79{
80 /* compute the number of bytes represented by read chunks */
81 *byte_count = 0;
82 *ch_count = 0;
83 for (; ch->rc_discrim != 0; ch++) {
84 *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
85 *ch_count = *ch_count + 1;
86 }
87}
88
89/*
90 * Decodes a write chunk list. The expected format is as follows:
91 * descrim : xdr_one
92 * nchunks : <count>
93 * handle : u32 RKEY ---+
94 * length : u32 <len of segment> |
95 * offset : remove va + <count>
96 * . . . |
97 * ---+
98 */
99static u32 *decode_write_list(u32 *va, u32 *vaend)
100{
101 int nchunks;
102
103 struct rpcrdma_write_array *ary =
104 (struct rpcrdma_write_array *)va;
105
106 /* Check for not write-array */
107 if (ary->wc_discrim == xdr_zero)
108 return (u32 *)&ary->wc_nchunks;
109
110 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) >
111 (unsigned long)vaend) {
112 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
113 return NULL;
114 }
115 nchunks = ntohl(ary->wc_nchunks);
116 if (((unsigned long)&ary->wc_array[0] +
117 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
118 (unsigned long)vaend) {
119 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
120 ary, nchunks, vaend);
121 return NULL;
122 }
123 /*
124 * rs_length is the 2nd 4B field in wc_target and taking its
125 * address skips the list terminator
126 */
127 return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
128}
129
130static u32 *decode_reply_array(u32 *va, u32 *vaend)
131{
132 int nchunks;
133 struct rpcrdma_write_array *ary =
134 (struct rpcrdma_write_array *)va;
135
136 /* Check for no reply-array */
137 if (ary->wc_discrim == xdr_zero)
138 return (u32 *)&ary->wc_nchunks;
139
140 if ((unsigned long)ary + sizeof(struct rpcrdma_write_array) >
141 (unsigned long)vaend) {
142 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
143 return NULL;
144 }
145 nchunks = ntohl(ary->wc_nchunks);
146 if (((unsigned long)&ary->wc_array[0] +
147 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
148 (unsigned long)vaend) {
149 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
150 ary, nchunks, vaend);
151 return NULL;
152 }
153 return (u32 *)&ary->wc_array[nchunks];
154}
155
156int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
157 struct svc_rqst *rqstp)
158{
159 struct rpcrdma_msg *rmsgp = NULL;
160 u32 *va;
161 u32 *vaend;
162 u32 hdr_len;
163
164 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
165
166 /* Verify that there's enough bytes for header + something */
167 if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_MIN) {
168 dprintk("svcrdma: header too short = %d\n",
169 rqstp->rq_arg.len);
170 return -EINVAL;
171 }
172
173 /* Decode the header */
174 rmsgp->rm_xid = ntohl(rmsgp->rm_xid);
175 rmsgp->rm_vers = ntohl(rmsgp->rm_vers);
176 rmsgp->rm_credit = ntohl(rmsgp->rm_credit);
177 rmsgp->rm_type = ntohl(rmsgp->rm_type);
178
179 if (rmsgp->rm_vers != RPCRDMA_VERSION)
180 return -ENOSYS;
181
182 /* Pull in the extra for the padded case and bump our pointer */
183 if (rmsgp->rm_type == RDMA_MSGP) {
184 int hdrlen;
185 rmsgp->rm_body.rm_padded.rm_align =
186 ntohl(rmsgp->rm_body.rm_padded.rm_align);
187 rmsgp->rm_body.rm_padded.rm_thresh =
188 ntohl(rmsgp->rm_body.rm_padded.rm_thresh);
189
190 va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
191 rqstp->rq_arg.head[0].iov_base = va;
192 hdrlen = (u32)((unsigned long)va - (unsigned long)rmsgp);
193 rqstp->rq_arg.head[0].iov_len -= hdrlen;
194 if (hdrlen > rqstp->rq_arg.len)
195 return -EINVAL;
196 return hdrlen;
197 }
198
199 /* The chunk list may contain either a read chunk list or a write
200 * chunk list and a reply chunk list.
201 */
202 va = &rmsgp->rm_body.rm_chunks[0];
203 vaend = (u32 *)((unsigned long)rmsgp + rqstp->rq_arg.len);
204 va = decode_read_list(va, vaend);
205 if (!va)
206 return -EINVAL;
207 va = decode_write_list(va, vaend);
208 if (!va)
209 return -EINVAL;
210 va = decode_reply_array(va, vaend);
211 if (!va)
212 return -EINVAL;
213
214 rqstp->rq_arg.head[0].iov_base = va;
215 hdr_len = (unsigned long)va - (unsigned long)rmsgp;
216 rqstp->rq_arg.head[0].iov_len -= hdr_len;
217
218 *rdma_req = rmsgp;
219 return hdr_len;
220}
221
222int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *rqstp)
223{
224 struct rpcrdma_msg *rmsgp = NULL;
225 struct rpcrdma_read_chunk *ch;
226 struct rpcrdma_write_array *ary;
227 u32 *va;
228 u32 hdrlen;
229
230 dprintk("svcrdma: processing deferred RDMA header on rqstp=%p\n",
231 rqstp);
232 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
233
234 /* Pull in the extra for the padded case and bump our pointer */
235 if (rmsgp->rm_type == RDMA_MSGP) {
236 va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
237 rqstp->rq_arg.head[0].iov_base = va;
238 hdrlen = (u32)((unsigned long)va - (unsigned long)rmsgp);
239 rqstp->rq_arg.head[0].iov_len -= hdrlen;
240 return hdrlen;
241 }
242
243 /*
244 * Skip all chunks to find RPC msg. These were previously processed
245 */
246 va = &rmsgp->rm_body.rm_chunks[0];
247
248 /* Skip read-list */
249 for (ch = (struct rpcrdma_read_chunk *)va;
250 ch->rc_discrim != xdr_zero; ch++);
251 va = (u32 *)&ch->rc_position;
252
253 /* Skip write-list */
254 ary = (struct rpcrdma_write_array *)va;
255 if (ary->wc_discrim == xdr_zero)
256 va = (u32 *)&ary->wc_nchunks;
257 else
258 /*
259 * rs_length is the 2nd 4B field in wc_target and taking its
260 * address skips the list terminator
261 */
262 va = (u32 *)&ary->wc_array[ary->wc_nchunks].wc_target.rs_length;
263
264 /* Skip reply-array */
265 ary = (struct rpcrdma_write_array *)va;
266 if (ary->wc_discrim == xdr_zero)
267 va = (u32 *)&ary->wc_nchunks;
268 else
269 va = (u32 *)&ary->wc_array[ary->wc_nchunks];
270
271 rqstp->rq_arg.head[0].iov_base = va;
272 hdrlen = (unsigned long)va - (unsigned long)rmsgp;
273 rqstp->rq_arg.head[0].iov_len -= hdrlen;
274
275 return hdrlen;
276}
277
278int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt,
279 struct rpcrdma_msg *rmsgp,
280 enum rpcrdma_errcode err, u32 *va)
281{
282 u32 *startp = va;
283
284 *va++ = htonl(rmsgp->rm_xid);
285 *va++ = htonl(rmsgp->rm_vers);
286 *va++ = htonl(xprt->sc_max_requests);
287 *va++ = htonl(RDMA_ERROR);
288 *va++ = htonl(err);
289 if (err == ERR_VERS) {
290 *va++ = htonl(RPCRDMA_VERSION);
291 *va++ = htonl(RPCRDMA_VERSION);
292 }
293
294 return (int)((unsigned long)va - (unsigned long)startp);
295}
296
297int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *rmsgp)
298{
299 struct rpcrdma_write_array *wr_ary;
300
301 /* There is no read-list in a reply */
302
303 /* skip write list */
304 wr_ary = (struct rpcrdma_write_array *)
305 &rmsgp->rm_body.rm_chunks[1];
306 if (wr_ary->wc_discrim)
307 wr_ary = (struct rpcrdma_write_array *)
308 &wr_ary->wc_array[ntohl(wr_ary->wc_nchunks)].
309 wc_target.rs_length;
310 else
311 wr_ary = (struct rpcrdma_write_array *)
312 &wr_ary->wc_nchunks;
313
314 /* skip reply array */
315 if (wr_ary->wc_discrim)
316 wr_ary = (struct rpcrdma_write_array *)
317 &wr_ary->wc_array[ntohl(wr_ary->wc_nchunks)];
318 else
319 wr_ary = (struct rpcrdma_write_array *)
320 &wr_ary->wc_nchunks;
321
322 return (unsigned long) wr_ary - (unsigned long) rmsgp;
323}
324
325void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks)
326{
327 struct rpcrdma_write_array *ary;
328
329 /* no read-list */
330 rmsgp->rm_body.rm_chunks[0] = xdr_zero;
331
332 /* write-array discrim */
333 ary = (struct rpcrdma_write_array *)
334 &rmsgp->rm_body.rm_chunks[1];
335 ary->wc_discrim = xdr_one;
336 ary->wc_nchunks = htonl(chunks);
337
338 /* write-list terminator */
339 ary->wc_array[chunks].wc_target.rs_handle = xdr_zero;
340
341 /* reply-array discriminator */
342 ary->wc_array[chunks].wc_target.rs_length = xdr_zero;
343}
344
345void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
346 int chunks)
347{
348 ary->wc_discrim = xdr_one;
349 ary->wc_nchunks = htonl(chunks);
350}
351
352void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
353 int chunk_no,
354 __be32 rs_handle,
355 __be64 rs_offset,
356 u32 write_len)
357{
358 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
359 seg->rs_handle = rs_handle;
360 seg->rs_offset = rs_offset;
361 seg->rs_length = htonl(write_len);
362}
363
364void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
365 struct rpcrdma_msg *rdma_argp,
366 struct rpcrdma_msg *rdma_resp,
367 enum rpcrdma_proc rdma_type)
368{
369 rdma_resp->rm_xid = htonl(rdma_argp->rm_xid);
370 rdma_resp->rm_vers = htonl(rdma_argp->rm_vers);
371 rdma_resp->rm_credit = htonl(xprt->sc_max_requests);
372 rdma_resp->rm_type = htonl(rdma_type);
373
374 /* Encode <nul> chunks lists */
375 rdma_resp->rm_body.rm_chunks[0] = xdr_zero;
376 rdma_resp->rm_body.rm_chunks[1] = xdr_zero;
377 rdma_resp->rm_body.rm_chunks[2] = xdr_zero;
378}