Loading...
1/*
2 * Copyright (c) 2006, 2020 Oracle and/or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/export.h>
36#include <linux/skbuff.h>
37#include <linux/list.h>
38#include <linux/errqueue.h>
39
40#include "rds.h"
41
42static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
43[RDS_EXTHDR_NONE] = 0,
44[RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
45[RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
46[RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
47[RDS_EXTHDR_NPATHS] = sizeof(u16),
48[RDS_EXTHDR_GEN_NUM] = sizeof(u32),
49};
50
51void rds_message_addref(struct rds_message *rm)
52{
53 rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
54 refcount_inc(&rm->m_refcount);
55}
56EXPORT_SYMBOL_GPL(rds_message_addref);
57
58static inline bool rds_zcookie_add(struct rds_msg_zcopy_info *info, u32 cookie)
59{
60 struct rds_zcopy_cookies *ck = &info->zcookies;
61 int ncookies = ck->num;
62
63 if (ncookies == RDS_MAX_ZCOOKIES)
64 return false;
65 ck->cookies[ncookies] = cookie;
66 ck->num = ++ncookies;
67 return true;
68}
69
70static struct rds_msg_zcopy_info *rds_info_from_znotifier(struct rds_znotifier *znotif)
71{
72 return container_of(znotif, struct rds_msg_zcopy_info, znotif);
73}
74
75void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
76{
77 unsigned long flags;
78 LIST_HEAD(copy);
79 struct rds_msg_zcopy_info *info, *tmp;
80
81 spin_lock_irqsave(&q->lock, flags);
82 list_splice(&q->zcookie_head, ©);
83 INIT_LIST_HEAD(&q->zcookie_head);
84 spin_unlock_irqrestore(&q->lock, flags);
85
86 list_for_each_entry_safe(info, tmp, ©, rs_zcookie_next) {
87 list_del(&info->rs_zcookie_next);
88 kfree(info);
89 }
90}
91
92static void rds_rm_zerocopy_callback(struct rds_sock *rs,
93 struct rds_znotifier *znotif)
94{
95 struct rds_msg_zcopy_info *info;
96 struct rds_msg_zcopy_queue *q;
97 u32 cookie = znotif->z_cookie;
98 struct rds_zcopy_cookies *ck;
99 struct list_head *head;
100 unsigned long flags;
101
102 mm_unaccount_pinned_pages(&znotif->z_mmp);
103 q = &rs->rs_zcookie_queue;
104 spin_lock_irqsave(&q->lock, flags);
105 head = &q->zcookie_head;
106 if (!list_empty(head)) {
107 info = list_entry(head, struct rds_msg_zcopy_info,
108 rs_zcookie_next);
109 if (info && rds_zcookie_add(info, cookie)) {
110 spin_unlock_irqrestore(&q->lock, flags);
111 kfree(rds_info_from_znotifier(znotif));
112 /* caller invokes rds_wake_sk_sleep() */
113 return;
114 }
115 }
116
117 info = rds_info_from_znotifier(znotif);
118 ck = &info->zcookies;
119 memset(ck, 0, sizeof(*ck));
120 WARN_ON(!rds_zcookie_add(info, cookie));
121 list_add_tail(&q->zcookie_head, &info->rs_zcookie_next);
122
123 spin_unlock_irqrestore(&q->lock, flags);
124 /* caller invokes rds_wake_sk_sleep() */
125}
126
127/*
128 * This relies on dma_map_sg() not touching sg[].page during merging.
129 */
130static void rds_message_purge(struct rds_message *rm)
131{
132 unsigned long i, flags;
133 bool zcopy = false;
134
135 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
136 return;
137
138 spin_lock_irqsave(&rm->m_rs_lock, flags);
139 if (rm->m_rs) {
140 struct rds_sock *rs = rm->m_rs;
141
142 if (rm->data.op_mmp_znotifier) {
143 zcopy = true;
144 rds_rm_zerocopy_callback(rs, rm->data.op_mmp_znotifier);
145 rds_wake_sk_sleep(rs);
146 rm->data.op_mmp_znotifier = NULL;
147 }
148 sock_put(rds_rs_to_sk(rs));
149 rm->m_rs = NULL;
150 }
151 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
152
153 for (i = 0; i < rm->data.op_nents; i++) {
154 /* XXX will have to put_page for page refs */
155 if (!zcopy)
156 __free_page(sg_page(&rm->data.op_sg[i]));
157 else
158 put_page(sg_page(&rm->data.op_sg[i]));
159 }
160 rm->data.op_nents = 0;
161
162 if (rm->rdma.op_active)
163 rds_rdma_free_op(&rm->rdma);
164 if (rm->rdma.op_rdma_mr)
165 kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final);
166
167 if (rm->atomic.op_active)
168 rds_atomic_free_op(&rm->atomic);
169 if (rm->atomic.op_rdma_mr)
170 kref_put(&rm->atomic.op_rdma_mr->r_kref, __rds_put_mr_final);
171}
172
173void rds_message_put(struct rds_message *rm)
174{
175 rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
176 WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
177 if (refcount_dec_and_test(&rm->m_refcount)) {
178 BUG_ON(!list_empty(&rm->m_sock_item));
179 BUG_ON(!list_empty(&rm->m_conn_item));
180 rds_message_purge(rm);
181
182 kfree(rm);
183 }
184}
185EXPORT_SYMBOL_GPL(rds_message_put);
186
187void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
188 __be16 dport, u64 seq)
189{
190 hdr->h_flags = 0;
191 hdr->h_sport = sport;
192 hdr->h_dport = dport;
193 hdr->h_sequence = cpu_to_be64(seq);
194 hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
195}
196EXPORT_SYMBOL_GPL(rds_message_populate_header);
197
198int rds_message_add_extension(struct rds_header *hdr, unsigned int type,
199 const void *data, unsigned int len)
200{
201 unsigned int ext_len = sizeof(u8) + len;
202 unsigned char *dst;
203
204 /* For now, refuse to add more than one extension header */
205 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
206 return 0;
207
208 if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
209 return 0;
210
211 if (ext_len >= RDS_HEADER_EXT_SPACE)
212 return 0;
213 dst = hdr->h_exthdr;
214
215 *dst++ = type;
216 memcpy(dst, data, len);
217
218 dst[len] = RDS_EXTHDR_NONE;
219 return 1;
220}
221EXPORT_SYMBOL_GPL(rds_message_add_extension);
222
223/*
224 * If a message has extension headers, retrieve them here.
225 * Call like this:
226 *
227 * unsigned int pos = 0;
228 *
229 * while (1) {
230 * buflen = sizeof(buffer);
231 * type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
232 * if (type == RDS_EXTHDR_NONE)
233 * break;
234 * ...
235 * }
236 */
237int rds_message_next_extension(struct rds_header *hdr,
238 unsigned int *pos, void *buf, unsigned int *buflen)
239{
240 unsigned int offset, ext_type, ext_len;
241 u8 *src = hdr->h_exthdr;
242
243 offset = *pos;
244 if (offset >= RDS_HEADER_EXT_SPACE)
245 goto none;
246
247 /* Get the extension type and length. For now, the
248 * length is implied by the extension type. */
249 ext_type = src[offset++];
250
251 if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
252 goto none;
253 ext_len = rds_exthdr_size[ext_type];
254 if (offset + ext_len > RDS_HEADER_EXT_SPACE)
255 goto none;
256
257 *pos = offset + ext_len;
258 if (ext_len < *buflen)
259 *buflen = ext_len;
260 memcpy(buf, src + offset, *buflen);
261 return ext_type;
262
263none:
264 *pos = RDS_HEADER_EXT_SPACE;
265 *buflen = 0;
266 return RDS_EXTHDR_NONE;
267}
268
269int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
270{
271 struct rds_ext_header_rdma_dest ext_hdr;
272
273 ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
274 ext_hdr.h_rdma_offset = cpu_to_be32(offset);
275 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
276}
277EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
278
279/*
280 * Each rds_message is allocated with extra space for the scatterlist entries
281 * rds ops will need. This is to minimize memory allocation count. Then, each rds op
282 * can grab SGs when initializing its part of the rds_message.
283 */
284struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
285{
286 struct rds_message *rm;
287
288 if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
289 return NULL;
290
291 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
292 if (!rm)
293 goto out;
294
295 rm->m_used_sgs = 0;
296 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
297
298 refcount_set(&rm->m_refcount, 1);
299 INIT_LIST_HEAD(&rm->m_sock_item);
300 INIT_LIST_HEAD(&rm->m_conn_item);
301 spin_lock_init(&rm->m_rs_lock);
302 init_waitqueue_head(&rm->m_flush_wait);
303
304out:
305 return rm;
306}
307
308/*
309 * RDS ops use this to grab SG entries from the rm's sg pool.
310 */
311struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
312{
313 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
314 struct scatterlist *sg_ret;
315
316 if (nents <= 0) {
317 pr_warn("rds: alloc sgs failed! nents <= 0\n");
318 return ERR_PTR(-EINVAL);
319 }
320
321 if (rm->m_used_sgs + nents > rm->m_total_sgs) {
322 pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
323 rm->m_total_sgs, rm->m_used_sgs, nents);
324 return ERR_PTR(-ENOMEM);
325 }
326
327 sg_ret = &sg_first[rm->m_used_sgs];
328 sg_init_table(sg_ret, nents);
329 rm->m_used_sgs += nents;
330
331 return sg_ret;
332}
333
334struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
335{
336 struct rds_message *rm;
337 unsigned int i;
338 int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
339 int extra_bytes = num_sgs * sizeof(struct scatterlist);
340
341 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
342 if (!rm)
343 return ERR_PTR(-ENOMEM);
344
345 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
346 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
347 rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
348 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
349 if (IS_ERR(rm->data.op_sg)) {
350 void *err = ERR_CAST(rm->data.op_sg);
351 rds_message_put(rm);
352 return err;
353 }
354
355 for (i = 0; i < rm->data.op_nents; ++i) {
356 sg_set_page(&rm->data.op_sg[i],
357 virt_to_page(page_addrs[i]),
358 PAGE_SIZE, 0);
359 }
360
361 return rm;
362}
363
364static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *from)
365{
366 struct scatterlist *sg;
367 int ret = 0;
368 int length = iov_iter_count(from);
369 int total_copied = 0;
370 struct rds_msg_zcopy_info *info;
371
372 rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
373
374 /*
375 * now allocate and copy in the data payload.
376 */
377 sg = rm->data.op_sg;
378
379 info = kzalloc(sizeof(*info), GFP_KERNEL);
380 if (!info)
381 return -ENOMEM;
382 INIT_LIST_HEAD(&info->rs_zcookie_next);
383 rm->data.op_mmp_znotifier = &info->znotif;
384 if (mm_account_pinned_pages(&rm->data.op_mmp_znotifier->z_mmp,
385 length)) {
386 ret = -ENOMEM;
387 goto err;
388 }
389 while (iov_iter_count(from)) {
390 struct page *pages;
391 size_t start;
392 ssize_t copied;
393
394 copied = iov_iter_get_pages(from, &pages, PAGE_SIZE,
395 1, &start);
396 if (copied < 0) {
397 struct mmpin *mmp;
398 int i;
399
400 for (i = 0; i < rm->data.op_nents; i++)
401 put_page(sg_page(&rm->data.op_sg[i]));
402 mmp = &rm->data.op_mmp_znotifier->z_mmp;
403 mm_unaccount_pinned_pages(mmp);
404 ret = -EFAULT;
405 goto err;
406 }
407 total_copied += copied;
408 iov_iter_advance(from, copied);
409 length -= copied;
410 sg_set_page(sg, pages, copied, start);
411 rm->data.op_nents++;
412 sg++;
413 }
414 WARN_ON_ONCE(length != 0);
415 return ret;
416err:
417 kfree(info);
418 rm->data.op_mmp_znotifier = NULL;
419 return ret;
420}
421
422int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
423 bool zcopy)
424{
425 unsigned long to_copy, nbytes;
426 unsigned long sg_off;
427 struct scatterlist *sg;
428 int ret = 0;
429
430 rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
431
432 /* now allocate and copy in the data payload. */
433 sg = rm->data.op_sg;
434 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
435
436 if (zcopy)
437 return rds_message_zcopy_from_user(rm, from);
438
439 while (iov_iter_count(from)) {
440 if (!sg_page(sg)) {
441 ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
442 GFP_HIGHUSER);
443 if (ret)
444 return ret;
445 rm->data.op_nents++;
446 sg_off = 0;
447 }
448
449 to_copy = min_t(unsigned long, iov_iter_count(from),
450 sg->length - sg_off);
451
452 rds_stats_add(s_copy_from_user, to_copy);
453 nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
454 to_copy, from);
455 if (nbytes != to_copy)
456 return -EFAULT;
457
458 sg_off += to_copy;
459
460 if (sg_off == sg->length)
461 sg++;
462 }
463
464 return ret;
465}
466
467int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
468{
469 struct rds_message *rm;
470 struct scatterlist *sg;
471 unsigned long to_copy;
472 unsigned long vec_off;
473 int copied;
474 int ret;
475 u32 len;
476
477 rm = container_of(inc, struct rds_message, m_inc);
478 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
479
480 sg = rm->data.op_sg;
481 vec_off = 0;
482 copied = 0;
483
484 while (iov_iter_count(to) && copied < len) {
485 to_copy = min_t(unsigned long, iov_iter_count(to),
486 sg->length - vec_off);
487 to_copy = min_t(unsigned long, to_copy, len - copied);
488
489 rds_stats_add(s_copy_to_user, to_copy);
490 ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
491 to_copy, to);
492 if (ret != to_copy)
493 return -EFAULT;
494
495 vec_off += to_copy;
496 copied += to_copy;
497
498 if (vec_off == sg->length) {
499 vec_off = 0;
500 sg++;
501 }
502 }
503
504 return copied;
505}
506
507/*
508 * If the message is still on the send queue, wait until the transport
509 * is done with it. This is particularly important for RDMA operations.
510 */
511void rds_message_wait(struct rds_message *rm)
512{
513 wait_event_interruptible(rm->m_flush_wait,
514 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
515}
516
517void rds_message_unmapped(struct rds_message *rm)
518{
519 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
520 wake_up_interruptible(&rm->m_flush_wait);
521}
522EXPORT_SYMBOL_GPL(rds_message_unmapped);
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35
36#include "rds.h"
37
38static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
39[RDS_EXTHDR_NONE] = 0,
40[RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
41[RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
42[RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
43};
44
45
46void rds_message_addref(struct rds_message *rm)
47{
48 rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
49 atomic_inc(&rm->m_refcount);
50}
51EXPORT_SYMBOL_GPL(rds_message_addref);
52
53/*
54 * This relies on dma_map_sg() not touching sg[].page during merging.
55 */
56static void rds_message_purge(struct rds_message *rm)
57{
58 unsigned long i;
59
60 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
61 return;
62
63 for (i = 0; i < rm->data.op_nents; i++) {
64 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
65 /* XXX will have to put_page for page refs */
66 __free_page(sg_page(&rm->data.op_sg[i]));
67 }
68 rm->data.op_nents = 0;
69
70 if (rm->rdma.op_active)
71 rds_rdma_free_op(&rm->rdma);
72 if (rm->rdma.op_rdma_mr)
73 rds_mr_put(rm->rdma.op_rdma_mr);
74
75 if (rm->atomic.op_active)
76 rds_atomic_free_op(&rm->atomic);
77 if (rm->atomic.op_rdma_mr)
78 rds_mr_put(rm->atomic.op_rdma_mr);
79}
80
81void rds_message_put(struct rds_message *rm)
82{
83 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
84 if (atomic_read(&rm->m_refcount) == 0) {
85printk(KERN_CRIT "danger refcount zero on %p\n", rm);
86WARN_ON(1);
87 }
88 if (atomic_dec_and_test(&rm->m_refcount)) {
89 BUG_ON(!list_empty(&rm->m_sock_item));
90 BUG_ON(!list_empty(&rm->m_conn_item));
91 rds_message_purge(rm);
92
93 kfree(rm);
94 }
95}
96EXPORT_SYMBOL_GPL(rds_message_put);
97
98void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
99 __be16 dport, u64 seq)
100{
101 hdr->h_flags = 0;
102 hdr->h_sport = sport;
103 hdr->h_dport = dport;
104 hdr->h_sequence = cpu_to_be64(seq);
105 hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
106}
107EXPORT_SYMBOL_GPL(rds_message_populate_header);
108
109int rds_message_add_extension(struct rds_header *hdr, unsigned int type,
110 const void *data, unsigned int len)
111{
112 unsigned int ext_len = sizeof(u8) + len;
113 unsigned char *dst;
114
115 /* For now, refuse to add more than one extension header */
116 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
117 return 0;
118
119 if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
120 return 0;
121
122 if (ext_len >= RDS_HEADER_EXT_SPACE)
123 return 0;
124 dst = hdr->h_exthdr;
125
126 *dst++ = type;
127 memcpy(dst, data, len);
128
129 dst[len] = RDS_EXTHDR_NONE;
130 return 1;
131}
132EXPORT_SYMBOL_GPL(rds_message_add_extension);
133
134/*
135 * If a message has extension headers, retrieve them here.
136 * Call like this:
137 *
138 * unsigned int pos = 0;
139 *
140 * while (1) {
141 * buflen = sizeof(buffer);
142 * type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
143 * if (type == RDS_EXTHDR_NONE)
144 * break;
145 * ...
146 * }
147 */
148int rds_message_next_extension(struct rds_header *hdr,
149 unsigned int *pos, void *buf, unsigned int *buflen)
150{
151 unsigned int offset, ext_type, ext_len;
152 u8 *src = hdr->h_exthdr;
153
154 offset = *pos;
155 if (offset >= RDS_HEADER_EXT_SPACE)
156 goto none;
157
158 /* Get the extension type and length. For now, the
159 * length is implied by the extension type. */
160 ext_type = src[offset++];
161
162 if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
163 goto none;
164 ext_len = rds_exthdr_size[ext_type];
165 if (offset + ext_len > RDS_HEADER_EXT_SPACE)
166 goto none;
167
168 *pos = offset + ext_len;
169 if (ext_len < *buflen)
170 *buflen = ext_len;
171 memcpy(buf, src + offset, *buflen);
172 return ext_type;
173
174none:
175 *pos = RDS_HEADER_EXT_SPACE;
176 *buflen = 0;
177 return RDS_EXTHDR_NONE;
178}
179
180int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
181{
182 struct rds_ext_header_rdma_dest ext_hdr;
183
184 ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
185 ext_hdr.h_rdma_offset = cpu_to_be32(offset);
186 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
187}
188EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
189
190/*
191 * Each rds_message is allocated with extra space for the scatterlist entries
192 * rds ops will need. This is to minimize memory allocation count. Then, each rds op
193 * can grab SGs when initializing its part of the rds_message.
194 */
195struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
196{
197 struct rds_message *rm;
198
199 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
200 if (!rm)
201 goto out;
202
203 rm->m_used_sgs = 0;
204 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
205
206 atomic_set(&rm->m_refcount, 1);
207 INIT_LIST_HEAD(&rm->m_sock_item);
208 INIT_LIST_HEAD(&rm->m_conn_item);
209 spin_lock_init(&rm->m_rs_lock);
210 init_waitqueue_head(&rm->m_flush_wait);
211
212out:
213 return rm;
214}
215
216/*
217 * RDS ops use this to grab SG entries from the rm's sg pool.
218 */
219struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
220{
221 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
222 struct scatterlist *sg_ret;
223
224 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
225 WARN_ON(!nents);
226
227 if (rm->m_used_sgs + nents > rm->m_total_sgs)
228 return NULL;
229
230 sg_ret = &sg_first[rm->m_used_sgs];
231 sg_init_table(sg_ret, nents);
232 rm->m_used_sgs += nents;
233
234 return sg_ret;
235}
236
237struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
238{
239 struct rds_message *rm;
240 unsigned int i;
241 int num_sgs = ceil(total_len, PAGE_SIZE);
242 int extra_bytes = num_sgs * sizeof(struct scatterlist);
243
244 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
245 if (!rm)
246 return ERR_PTR(-ENOMEM);
247
248 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
249 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
250 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
251 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
252 if (!rm->data.op_sg) {
253 rds_message_put(rm);
254 return ERR_PTR(-ENOMEM);
255 }
256
257 for (i = 0; i < rm->data.op_nents; ++i) {
258 sg_set_page(&rm->data.op_sg[i],
259 virt_to_page(page_addrs[i]),
260 PAGE_SIZE, 0);
261 }
262
263 return rm;
264}
265
266int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
267 size_t total_len)
268{
269 unsigned long to_copy;
270 unsigned long iov_off;
271 unsigned long sg_off;
272 struct iovec *iov;
273 struct scatterlist *sg;
274 int ret = 0;
275
276 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
277
278 /*
279 * now allocate and copy in the data payload.
280 */
281 sg = rm->data.op_sg;
282 iov = first_iov;
283 iov_off = 0;
284 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
285
286 while (total_len) {
287 if (!sg_page(sg)) {
288 ret = rds_page_remainder_alloc(sg, total_len,
289 GFP_HIGHUSER);
290 if (ret)
291 goto out;
292 rm->data.op_nents++;
293 sg_off = 0;
294 }
295
296 while (iov_off == iov->iov_len) {
297 iov_off = 0;
298 iov++;
299 }
300
301 to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
302 to_copy = min_t(size_t, to_copy, total_len);
303
304 rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
305 "sg [%p, %u, %u] + %lu\n",
306 to_copy, iov->iov_base, iov->iov_len, iov_off,
307 (void *)sg_page(sg), sg->offset, sg->length, sg_off);
308
309 ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
310 iov->iov_base + iov_off,
311 to_copy);
312 if (ret)
313 goto out;
314
315 iov_off += to_copy;
316 total_len -= to_copy;
317 sg_off += to_copy;
318
319 if (sg_off == sg->length)
320 sg++;
321 }
322
323out:
324 return ret;
325}
326
327int rds_message_inc_copy_to_user(struct rds_incoming *inc,
328 struct iovec *first_iov, size_t size)
329{
330 struct rds_message *rm;
331 struct iovec *iov;
332 struct scatterlist *sg;
333 unsigned long to_copy;
334 unsigned long iov_off;
335 unsigned long vec_off;
336 int copied;
337 int ret;
338 u32 len;
339
340 rm = container_of(inc, struct rds_message, m_inc);
341 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
342
343 iov = first_iov;
344 iov_off = 0;
345 sg = rm->data.op_sg;
346 vec_off = 0;
347 copied = 0;
348
349 while (copied < size && copied < len) {
350 while (iov_off == iov->iov_len) {
351 iov_off = 0;
352 iov++;
353 }
354
355 to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
356 to_copy = min_t(size_t, to_copy, size - copied);
357 to_copy = min_t(unsigned long, to_copy, len - copied);
358
359 rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
360 "sg [%p, %u, %u] + %lu\n",
361 to_copy, iov->iov_base, iov->iov_len, iov_off,
362 sg_page(sg), sg->offset, sg->length, vec_off);
363
364 ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
365 iov->iov_base + iov_off,
366 to_copy);
367 if (ret) {
368 copied = ret;
369 break;
370 }
371
372 iov_off += to_copy;
373 vec_off += to_copy;
374 copied += to_copy;
375
376 if (vec_off == sg->length) {
377 vec_off = 0;
378 sg++;
379 }
380 }
381
382 return copied;
383}
384
385/*
386 * If the message is still on the send queue, wait until the transport
387 * is done with it. This is particularly important for RDMA operations.
388 */
389void rds_message_wait(struct rds_message *rm)
390{
391 wait_event_interruptible(rm->m_flush_wait,
392 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
393}
394
395void rds_message_unmapped(struct rds_message *rm)
396{
397 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
398 wake_up_interruptible(&rm->m_flush_wait);
399}
400EXPORT_SYMBOL_GPL(rds_message_unmapped);
401