Loading...
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/in.h>
36#include <linux/module.h>
37#include <net/tcp.h>
38#include <net/net_namespace.h>
39#include <net/netns/generic.h>
40
41#include "rds.h"
42#include "tcp.h"
43
44/* only for info exporting */
45static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
46static LIST_HEAD(rds_tcp_tc_list);
47static unsigned int rds_tcp_tc_count;
48
49/* Track rds_tcp_connection structs so they can be cleaned up */
50static DEFINE_SPINLOCK(rds_tcp_conn_lock);
51static LIST_HEAD(rds_tcp_conn_list);
52
53static struct kmem_cache *rds_tcp_conn_slab;
54
55static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
56 void __user *buffer, size_t *lenp,
57 loff_t *fpos);
58
59int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
60int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
61
62static struct ctl_table rds_tcp_sysctl_table[] = {
63#define RDS_TCP_SNDBUF 0
64 {
65 .procname = "rds_tcp_sndbuf",
66 /* data is per-net pointer */
67 .maxlen = sizeof(int),
68 .mode = 0644,
69 .proc_handler = rds_tcp_skbuf_handler,
70 .extra1 = &rds_tcp_min_sndbuf,
71 },
72#define RDS_TCP_RCVBUF 1
73 {
74 .procname = "rds_tcp_rcvbuf",
75 /* data is per-net pointer */
76 .maxlen = sizeof(int),
77 .mode = 0644,
78 .proc_handler = rds_tcp_skbuf_handler,
79 .extra1 = &rds_tcp_min_rcvbuf,
80 },
81 { }
82};
83
84/* doing it this way avoids calling tcp_sk() */
85void rds_tcp_nonagle(struct socket *sock)
86{
87 mm_segment_t oldfs = get_fs();
88 int val = 1;
89
90 set_fs(KERNEL_DS);
91 sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
92 sizeof(val));
93 set_fs(oldfs);
94}
95
96u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
97{
98 return tcp_sk(tc->t_sock->sk)->snd_nxt;
99}
100
101u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
102{
103 return tcp_sk(tc->t_sock->sk)->snd_una;
104}
105
106void rds_tcp_restore_callbacks(struct socket *sock,
107 struct rds_tcp_connection *tc)
108{
109 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
110 write_lock_bh(&sock->sk->sk_callback_lock);
111
112 /* done under the callback_lock to serialize with write_space */
113 spin_lock(&rds_tcp_tc_list_lock);
114 list_del_init(&tc->t_list_item);
115 rds_tcp_tc_count--;
116 spin_unlock(&rds_tcp_tc_list_lock);
117
118 tc->t_sock = NULL;
119
120 sock->sk->sk_write_space = tc->t_orig_write_space;
121 sock->sk->sk_data_ready = tc->t_orig_data_ready;
122 sock->sk->sk_state_change = tc->t_orig_state_change;
123 sock->sk->sk_user_data = NULL;
124
125 write_unlock_bh(&sock->sk->sk_callback_lock);
126}
127
128/*
129 * This is the only path that sets tc->t_sock. Send and receive trust that
130 * it is set. The RDS_CONN_UP bit protects those paths from being
131 * called while it isn't set.
132 */
133void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
134{
135 struct rds_tcp_connection *tc = conn->c_transport_data;
136
137 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
138 write_lock_bh(&sock->sk->sk_callback_lock);
139
140 /* done under the callback_lock to serialize with write_space */
141 spin_lock(&rds_tcp_tc_list_lock);
142 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
143 rds_tcp_tc_count++;
144 spin_unlock(&rds_tcp_tc_list_lock);
145
146 /* accepted sockets need our listen data ready undone */
147 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
148 sock->sk->sk_data_ready = sock->sk->sk_user_data;
149
150 tc->t_sock = sock;
151 tc->conn = conn;
152 tc->t_orig_data_ready = sock->sk->sk_data_ready;
153 tc->t_orig_write_space = sock->sk->sk_write_space;
154 tc->t_orig_state_change = sock->sk->sk_state_change;
155
156 sock->sk->sk_user_data = conn;
157 sock->sk->sk_data_ready = rds_tcp_data_ready;
158 sock->sk->sk_write_space = rds_tcp_write_space;
159 sock->sk->sk_state_change = rds_tcp_state_change;
160
161 write_unlock_bh(&sock->sk->sk_callback_lock);
162}
163
164static void rds_tcp_tc_info(struct socket *sock, unsigned int len,
165 struct rds_info_iterator *iter,
166 struct rds_info_lengths *lens)
167{
168 struct rds_info_tcp_socket tsinfo;
169 struct rds_tcp_connection *tc;
170 unsigned long flags;
171 struct sockaddr_in sin;
172 int sinlen;
173
174 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
175
176 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
177 goto out;
178
179 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
180
181 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0);
182 tsinfo.local_addr = sin.sin_addr.s_addr;
183 tsinfo.local_port = sin.sin_port;
184 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1);
185 tsinfo.peer_addr = sin.sin_addr.s_addr;
186 tsinfo.peer_port = sin.sin_port;
187
188 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
189 tsinfo.data_rem = tc->t_tinc_data_rem;
190 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
191 tsinfo.last_expected_una = tc->t_last_expected_una;
192 tsinfo.last_seen_una = tc->t_last_seen_una;
193
194 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
195 }
196
197out:
198 lens->nr = rds_tcp_tc_count;
199 lens->each = sizeof(tsinfo);
200
201 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
202}
203
204static int rds_tcp_laddr_check(struct net *net, __be32 addr)
205{
206 if (inet_addr_type(net, addr) == RTN_LOCAL)
207 return 0;
208 return -EADDRNOTAVAIL;
209}
210
211static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
212{
213 struct rds_tcp_connection *tc;
214
215 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
216 if (!tc)
217 return -ENOMEM;
218
219 mutex_init(&tc->t_conn_lock);
220 tc->t_sock = NULL;
221 tc->t_tinc = NULL;
222 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
223 tc->t_tinc_data_rem = 0;
224
225 conn->c_transport_data = tc;
226
227 spin_lock_irq(&rds_tcp_conn_lock);
228 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
229 spin_unlock_irq(&rds_tcp_conn_lock);
230
231 rdsdebug("alloced tc %p\n", conn->c_transport_data);
232 return 0;
233}
234
235static void rds_tcp_conn_free(void *arg)
236{
237 struct rds_tcp_connection *tc = arg;
238 unsigned long flags;
239 rdsdebug("freeing tc %p\n", tc);
240
241 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
242 list_del(&tc->t_tcp_node);
243 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
244
245 kmem_cache_free(rds_tcp_conn_slab, tc);
246}
247
248static void rds_tcp_destroy_conns(void)
249{
250 struct rds_tcp_connection *tc, *_tc;
251 LIST_HEAD(tmp_list);
252
253 /* avoid calling conn_destroy with irqs off */
254 spin_lock_irq(&rds_tcp_conn_lock);
255 list_splice(&rds_tcp_conn_list, &tmp_list);
256 INIT_LIST_HEAD(&rds_tcp_conn_list);
257 spin_unlock_irq(&rds_tcp_conn_lock);
258
259 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
260 if (tc->conn->c_passive)
261 rds_conn_destroy(tc->conn->c_passive);
262 rds_conn_destroy(tc->conn);
263 }
264}
265
266static void rds_tcp_exit(void);
267
268struct rds_transport rds_tcp_transport = {
269 .laddr_check = rds_tcp_laddr_check,
270 .xmit_prepare = rds_tcp_xmit_prepare,
271 .xmit_complete = rds_tcp_xmit_complete,
272 .xmit = rds_tcp_xmit,
273 .recv = rds_tcp_recv,
274 .conn_alloc = rds_tcp_conn_alloc,
275 .conn_free = rds_tcp_conn_free,
276 .conn_connect = rds_tcp_conn_connect,
277 .conn_shutdown = rds_tcp_conn_shutdown,
278 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
279 .inc_free = rds_tcp_inc_free,
280 .stats_info_copy = rds_tcp_stats_info_copy,
281 .exit = rds_tcp_exit,
282 .t_owner = THIS_MODULE,
283 .t_name = "tcp",
284 .t_type = RDS_TRANS_TCP,
285 .t_prefer_loopback = 1,
286};
287
288static int rds_tcp_netid;
289
290/* per-network namespace private data for this module */
291struct rds_tcp_net {
292 struct socket *rds_tcp_listen_sock;
293 struct work_struct rds_tcp_accept_w;
294 struct ctl_table_header *rds_tcp_sysctl;
295 struct ctl_table *ctl_table;
296 int sndbuf_size;
297 int rcvbuf_size;
298};
299
300/* All module specific customizations to the RDS-TCP socket should be done in
301 * rds_tcp_tune() and applied after socket creation.
302 */
303void rds_tcp_tune(struct socket *sock)
304{
305 struct sock *sk = sock->sk;
306 struct net *net = sock_net(sk);
307 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
308
309 rds_tcp_nonagle(sock);
310 lock_sock(sk);
311 if (rtn->sndbuf_size > 0) {
312 sk->sk_sndbuf = rtn->sndbuf_size;
313 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
314 }
315 if (rtn->rcvbuf_size > 0) {
316 sk->sk_sndbuf = rtn->rcvbuf_size;
317 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
318 }
319 release_sock(sk);
320}
321
322static void rds_tcp_accept_worker(struct work_struct *work)
323{
324 struct rds_tcp_net *rtn = container_of(work,
325 struct rds_tcp_net,
326 rds_tcp_accept_w);
327
328 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
329 cond_resched();
330}
331
332void rds_tcp_accept_work(struct sock *sk)
333{
334 struct net *net = sock_net(sk);
335 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
336
337 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
338}
339
340static __net_init int rds_tcp_init_net(struct net *net)
341{
342 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
343 struct ctl_table *tbl;
344 int err = 0;
345
346 memset(rtn, 0, sizeof(*rtn));
347
348 /* {snd, rcv}buf_size default to 0, which implies we let the
349 * stack pick the value, and permit auto-tuning of buffer size.
350 */
351 if (net == &init_net) {
352 tbl = rds_tcp_sysctl_table;
353 } else {
354 tbl = kmemdup(rds_tcp_sysctl_table,
355 sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
356 if (!tbl) {
357 pr_warn("could not set allocate syctl table\n");
358 return -ENOMEM;
359 }
360 rtn->ctl_table = tbl;
361 }
362 tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
363 tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
364 rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl);
365 if (!rtn->rds_tcp_sysctl) {
366 pr_warn("could not register sysctl\n");
367 err = -ENOMEM;
368 goto fail;
369 }
370 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
371 if (!rtn->rds_tcp_listen_sock) {
372 pr_warn("could not set up listen sock\n");
373 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
374 rtn->rds_tcp_sysctl = NULL;
375 err = -EAFNOSUPPORT;
376 goto fail;
377 }
378 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
379 return 0;
380
381fail:
382 if (net != &init_net)
383 kfree(tbl);
384 return err;
385}
386
387static void __net_exit rds_tcp_exit_net(struct net *net)
388{
389 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
390
391 if (rtn->rds_tcp_sysctl)
392 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
393
394 if (net != &init_net && rtn->ctl_table)
395 kfree(rtn->ctl_table);
396
397 /* If rds_tcp_exit_net() is called as a result of netns deletion,
398 * the rds_tcp_kill_sock() device notifier would already have cleaned
399 * up the listen socket, thus there is no work to do in this function.
400 *
401 * If rds_tcp_exit_net() is called as a result of module unload,
402 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
403 * we do need to clean up the listen socket here.
404 */
405 if (rtn->rds_tcp_listen_sock) {
406 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
407 rtn->rds_tcp_listen_sock = NULL;
408 flush_work(&rtn->rds_tcp_accept_w);
409 }
410}
411
412static struct pernet_operations rds_tcp_net_ops = {
413 .init = rds_tcp_init_net,
414 .exit = rds_tcp_exit_net,
415 .id = &rds_tcp_netid,
416 .size = sizeof(struct rds_tcp_net),
417};
418
419static void rds_tcp_kill_sock(struct net *net)
420{
421 struct rds_tcp_connection *tc, *_tc;
422 struct sock *sk;
423 LIST_HEAD(tmp_list);
424 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
425
426 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
427 rtn->rds_tcp_listen_sock = NULL;
428 flush_work(&rtn->rds_tcp_accept_w);
429 spin_lock_irq(&rds_tcp_conn_lock);
430 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
431 struct net *c_net = read_pnet(&tc->conn->c_net);
432
433 if (net != c_net || !tc->t_sock)
434 continue;
435 list_move_tail(&tc->t_tcp_node, &tmp_list);
436 }
437 spin_unlock_irq(&rds_tcp_conn_lock);
438 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
439 sk = tc->t_sock->sk;
440 sk->sk_prot->disconnect(sk, 0);
441 tcp_done(sk);
442 if (tc->conn->c_passive)
443 rds_conn_destroy(tc->conn->c_passive);
444 rds_conn_destroy(tc->conn);
445 }
446}
447
448static int rds_tcp_dev_event(struct notifier_block *this,
449 unsigned long event, void *ptr)
450{
451 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
452
453 /* rds-tcp registers as a pernet subys, so the ->exit will only
454 * get invoked after network acitivity has quiesced. We need to
455 * clean up all sockets to quiesce network activity, and use
456 * the unregistration of the per-net loopback device as a trigger
457 * to start that cleanup.
458 */
459 if (event == NETDEV_UNREGISTER_FINAL &&
460 dev->ifindex == LOOPBACK_IFINDEX)
461 rds_tcp_kill_sock(dev_net(dev));
462
463 return NOTIFY_DONE;
464}
465
466static struct notifier_block rds_tcp_dev_notifier = {
467 .notifier_call = rds_tcp_dev_event,
468 .priority = -10, /* must be called after other network notifiers */
469};
470
471/* when sysctl is used to modify some kernel socket parameters,this
472 * function resets the RDS connections in that netns so that we can
473 * restart with new parameters. The assumption is that such reset
474 * events are few and far-between.
475 */
476static void rds_tcp_sysctl_reset(struct net *net)
477{
478 struct rds_tcp_connection *tc, *_tc;
479
480 spin_lock_irq(&rds_tcp_conn_lock);
481 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
482 struct net *c_net = read_pnet(&tc->conn->c_net);
483
484 if (net != c_net || !tc->t_sock)
485 continue;
486
487 rds_conn_drop(tc->conn); /* reconnect with new parameters */
488 }
489 spin_unlock_irq(&rds_tcp_conn_lock);
490}
491
492static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
493 void __user *buffer, size_t *lenp,
494 loff_t *fpos)
495{
496 struct net *net = current->nsproxy->net_ns;
497 int err;
498
499 err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
500 if (err < 0) {
501 pr_warn("Invalid input. Must be >= %d\n",
502 *(int *)(ctl->extra1));
503 return err;
504 }
505 if (write)
506 rds_tcp_sysctl_reset(net);
507 return 0;
508}
509
510static void rds_tcp_exit(void)
511{
512 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
513 unregister_pernet_subsys(&rds_tcp_net_ops);
514 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
515 pr_warn("could not unregister rds_tcp_dev_notifier\n");
516 rds_tcp_destroy_conns();
517 rds_trans_unregister(&rds_tcp_transport);
518 rds_tcp_recv_exit();
519 kmem_cache_destroy(rds_tcp_conn_slab);
520}
521module_exit(rds_tcp_exit);
522
523static int rds_tcp_init(void)
524{
525 int ret;
526
527 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
528 sizeof(struct rds_tcp_connection),
529 0, 0, NULL);
530 if (!rds_tcp_conn_slab) {
531 ret = -ENOMEM;
532 goto out;
533 }
534
535 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
536 if (ret) {
537 pr_warn("could not register rds_tcp_dev_notifier\n");
538 goto out;
539 }
540
541 ret = register_pernet_subsys(&rds_tcp_net_ops);
542 if (ret)
543 goto out_slab;
544
545 ret = rds_tcp_recv_init();
546 if (ret)
547 goto out_slab;
548
549 ret = rds_trans_register(&rds_tcp_transport);
550 if (ret)
551 goto out_recv;
552
553 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
554
555 goto out;
556
557out_recv:
558 rds_tcp_recv_exit();
559out_slab:
560 unregister_pernet_subsys(&rds_tcp_net_ops);
561 kmem_cache_destroy(rds_tcp_conn_slab);
562out:
563 return ret;
564}
565module_init(rds_tcp_init);
566
567MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
568MODULE_DESCRIPTION("RDS: TCP transport");
569MODULE_LICENSE("Dual BSD/GPL");
570
1/*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/in.h>
36#include <linux/module.h>
37#include <net/tcp.h>
38#include <net/net_namespace.h>
39#include <net/netns/generic.h>
40#include <net/addrconf.h>
41
42#include "rds.h"
43#include "tcp.h"
44
45/* only for info exporting */
46static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
47static LIST_HEAD(rds_tcp_tc_list);
48
49/* rds_tcp_tc_count counts only IPv4 connections.
50 * rds6_tcp_tc_count counts both IPv4 and IPv6 connections.
51 */
52static unsigned int rds_tcp_tc_count;
53#if IS_ENABLED(CONFIG_IPV6)
54static unsigned int rds6_tcp_tc_count;
55#endif
56
57/* Track rds_tcp_connection structs so they can be cleaned up */
58static DEFINE_SPINLOCK(rds_tcp_conn_lock);
59static LIST_HEAD(rds_tcp_conn_list);
60static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
61
62static struct kmem_cache *rds_tcp_conn_slab;
63
64static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
65 void *buffer, size_t *lenp, loff_t *fpos);
66static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
67 void *buffer, size_t *lenp, loff_t *fpos);
68
69static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
70static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
71
72static struct ctl_table rds_tcp_sysctl_table[] = {
73#define RDS_TCP_SNDBUF 0
74 {
75 .procname = "rds_tcp_sndbuf",
76 /* data is per-net pointer */
77 .maxlen = sizeof(int),
78 .mode = 0644,
79 .proc_handler = rds_tcp_sndbuf_handler,
80 .extra1 = &rds_tcp_min_sndbuf,
81 },
82#define RDS_TCP_RCVBUF 1
83 {
84 .procname = "rds_tcp_rcvbuf",
85 /* data is per-net pointer */
86 .maxlen = sizeof(int),
87 .mode = 0644,
88 .proc_handler = rds_tcp_rcvbuf_handler,
89 .extra1 = &rds_tcp_min_rcvbuf,
90 },
91};
92
93u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94{
95 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
97}
98
99u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
100{
101 return tcp_sk(tc->t_sock->sk)->snd_una;
102}
103
104void rds_tcp_restore_callbacks(struct socket *sock,
105 struct rds_tcp_connection *tc)
106{
107 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
108 write_lock_bh(&sock->sk->sk_callback_lock);
109
110 /* done under the callback_lock to serialize with write_space */
111 spin_lock(&rds_tcp_tc_list_lock);
112 list_del_init(&tc->t_list_item);
113#if IS_ENABLED(CONFIG_IPV6)
114 rds6_tcp_tc_count--;
115#endif
116 if (!tc->t_cpath->cp_conn->c_isv6)
117 rds_tcp_tc_count--;
118 spin_unlock(&rds_tcp_tc_list_lock);
119
120 tc->t_sock = NULL;
121
122 sock->sk->sk_write_space = tc->t_orig_write_space;
123 sock->sk->sk_data_ready = tc->t_orig_data_ready;
124 sock->sk->sk_state_change = tc->t_orig_state_change;
125 sock->sk->sk_user_data = NULL;
126
127 write_unlock_bh(&sock->sk->sk_callback_lock);
128}
129
130/*
131 * rds_tcp_reset_callbacks() switches the to the new sock and
132 * returns the existing tc->t_sock.
133 *
134 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
135 * and rds_tcp_reset_callbacks. Send and receive trust that
136 * it is set. The absence of RDS_CONN_UP bit protects those paths
137 * from being called while it isn't set.
138 */
139void rds_tcp_reset_callbacks(struct socket *sock,
140 struct rds_conn_path *cp)
141{
142 struct rds_tcp_connection *tc = cp->cp_transport_data;
143 struct socket *osock = tc->t_sock;
144
145 if (!osock)
146 goto newsock;
147
148 /* Need to resolve a duelling SYN between peers.
149 * We have an outstanding SYN to this peer, which may
150 * potentially have transitioned to the RDS_CONN_UP state,
151 * so we must quiesce any send threads before resetting
152 * cp_transport_data. We quiesce these threads by setting
153 * cp_state to something other than RDS_CONN_UP, and then
154 * waiting for any existing threads in rds_send_xmit to
155 * complete release_in_xmit(). (Subsequent threads entering
156 * rds_send_xmit() will bail on !rds_conn_up().
157 *
158 * However an incoming syn-ack at this point would end up
159 * marking the conn as RDS_CONN_UP, and would again permit
160 * rds_send_xmi() threads through, so ideally we would
161 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
162 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
163 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
164 * would not get set. As a result, we set c_state to
165 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
166 * cannot mark rds_conn_path_up() in the window before lock_sock()
167 */
168 atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
169 wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
170 /* reset receive side state for rds_tcp_data_recv() for osock */
171 cancel_delayed_work_sync(&cp->cp_send_w);
172 cancel_delayed_work_sync(&cp->cp_recv_w);
173 lock_sock(osock->sk);
174 if (tc->t_tinc) {
175 rds_inc_put(&tc->t_tinc->ti_inc);
176 tc->t_tinc = NULL;
177 }
178 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
179 tc->t_tinc_data_rem = 0;
180 rds_tcp_restore_callbacks(osock, tc);
181 release_sock(osock->sk);
182 sock_release(osock);
183newsock:
184 rds_send_path_reset(cp);
185 lock_sock(sock->sk);
186 rds_tcp_set_callbacks(sock, cp);
187 release_sock(sock->sk);
188}
189
190/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
191 * above rds_tcp_reset_callbacks for notes about synchronization
192 * with data path
193 */
194void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
195{
196 struct rds_tcp_connection *tc = cp->cp_transport_data;
197
198 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
199 write_lock_bh(&sock->sk->sk_callback_lock);
200
201 /* done under the callback_lock to serialize with write_space */
202 spin_lock(&rds_tcp_tc_list_lock);
203 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
204#if IS_ENABLED(CONFIG_IPV6)
205 rds6_tcp_tc_count++;
206#endif
207 if (!tc->t_cpath->cp_conn->c_isv6)
208 rds_tcp_tc_count++;
209 spin_unlock(&rds_tcp_tc_list_lock);
210
211 /* accepted sockets need our listen data ready undone */
212 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
213 sock->sk->sk_data_ready = sock->sk->sk_user_data;
214
215 tc->t_sock = sock;
216 tc->t_cpath = cp;
217 tc->t_orig_data_ready = sock->sk->sk_data_ready;
218 tc->t_orig_write_space = sock->sk->sk_write_space;
219 tc->t_orig_state_change = sock->sk->sk_state_change;
220
221 sock->sk->sk_user_data = cp;
222 sock->sk->sk_data_ready = rds_tcp_data_ready;
223 sock->sk->sk_write_space = rds_tcp_write_space;
224 sock->sk->sk_state_change = rds_tcp_state_change;
225
226 write_unlock_bh(&sock->sk->sk_callback_lock);
227}
228
229/* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4
230 * connections for backward compatibility.
231 */
232static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
233 struct rds_info_iterator *iter,
234 struct rds_info_lengths *lens)
235{
236 struct rds_info_tcp_socket tsinfo;
237 struct rds_tcp_connection *tc;
238 unsigned long flags;
239
240 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
241
242 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
243 goto out;
244
245 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
246 struct inet_sock *inet = inet_sk(tc->t_sock->sk);
247
248 if (tc->t_cpath->cp_conn->c_isv6)
249 continue;
250
251 tsinfo.local_addr = inet->inet_saddr;
252 tsinfo.local_port = inet->inet_sport;
253 tsinfo.peer_addr = inet->inet_daddr;
254 tsinfo.peer_port = inet->inet_dport;
255
256 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
257 tsinfo.data_rem = tc->t_tinc_data_rem;
258 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
259 tsinfo.last_expected_una = tc->t_last_expected_una;
260 tsinfo.last_seen_una = tc->t_last_seen_una;
261 tsinfo.tos = tc->t_cpath->cp_conn->c_tos;
262
263 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
264 }
265
266out:
267 lens->nr = rds_tcp_tc_count;
268 lens->each = sizeof(tsinfo);
269
270 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
271}
272
273#if IS_ENABLED(CONFIG_IPV6)
274/* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and
275 * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped
276 * address.
277 */
278static void rds6_tcp_tc_info(struct socket *sock, unsigned int len,
279 struct rds_info_iterator *iter,
280 struct rds_info_lengths *lens)
281{
282 struct rds6_info_tcp_socket tsinfo6;
283 struct rds_tcp_connection *tc;
284 unsigned long flags;
285
286 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
287
288 if (len / sizeof(tsinfo6) < rds6_tcp_tc_count)
289 goto out;
290
291 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
292 struct sock *sk = tc->t_sock->sk;
293 struct inet_sock *inet = inet_sk(sk);
294
295 tsinfo6.local_addr = sk->sk_v6_rcv_saddr;
296 tsinfo6.local_port = inet->inet_sport;
297 tsinfo6.peer_addr = sk->sk_v6_daddr;
298 tsinfo6.peer_port = inet->inet_dport;
299
300 tsinfo6.hdr_rem = tc->t_tinc_hdr_rem;
301 tsinfo6.data_rem = tc->t_tinc_data_rem;
302 tsinfo6.last_sent_nxt = tc->t_last_sent_nxt;
303 tsinfo6.last_expected_una = tc->t_last_expected_una;
304 tsinfo6.last_seen_una = tc->t_last_seen_una;
305
306 rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6));
307 }
308
309out:
310 lens->nr = rds6_tcp_tc_count;
311 lens->each = sizeof(tsinfo6);
312
313 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
314}
315#endif
316
317int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
318 __u32 scope_id)
319{
320 struct net_device *dev = NULL;
321#if IS_ENABLED(CONFIG_IPV6)
322 int ret;
323#endif
324
325 if (ipv6_addr_v4mapped(addr)) {
326 if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL)
327 return 0;
328 return -EADDRNOTAVAIL;
329 }
330
331 /* If the scope_id is specified, check only those addresses
332 * hosted on the specified interface.
333 */
334 if (scope_id != 0) {
335 rcu_read_lock();
336 dev = dev_get_by_index_rcu(net, scope_id);
337 /* scope_id is not valid... */
338 if (!dev) {
339 rcu_read_unlock();
340 return -EADDRNOTAVAIL;
341 }
342 rcu_read_unlock();
343 }
344#if IS_ENABLED(CONFIG_IPV6)
345 ret = ipv6_chk_addr(net, addr, dev, 0);
346 if (ret)
347 return 0;
348#endif
349 return -EADDRNOTAVAIL;
350}
351
352static void rds_tcp_conn_free(void *arg)
353{
354 struct rds_tcp_connection *tc = arg;
355 unsigned long flags;
356
357 rdsdebug("freeing tc %p\n", tc);
358
359 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
360 if (!tc->t_tcp_node_detached)
361 list_del(&tc->t_tcp_node);
362 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
363
364 kmem_cache_free(rds_tcp_conn_slab, tc);
365}
366
367static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
368{
369 struct rds_tcp_connection *tc;
370 int i, j;
371 int ret = 0;
372
373 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
374 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
375 if (!tc) {
376 ret = -ENOMEM;
377 goto fail;
378 }
379 mutex_init(&tc->t_conn_path_lock);
380 tc->t_sock = NULL;
381 tc->t_tinc = NULL;
382 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
383 tc->t_tinc_data_rem = 0;
384
385 conn->c_path[i].cp_transport_data = tc;
386 tc->t_cpath = &conn->c_path[i];
387 tc->t_tcp_node_detached = true;
388
389 rdsdebug("rds_conn_path [%d] tc %p\n", i,
390 conn->c_path[i].cp_transport_data);
391 }
392 spin_lock_irq(&rds_tcp_conn_lock);
393 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
394 tc = conn->c_path[i].cp_transport_data;
395 tc->t_tcp_node_detached = false;
396 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
397 }
398 spin_unlock_irq(&rds_tcp_conn_lock);
399fail:
400 if (ret) {
401 for (j = 0; j < i; j++)
402 rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
403 }
404 return ret;
405}
406
407static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
408{
409 struct rds_tcp_connection *tc, *_tc;
410
411 list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
412 if (tc->t_cpath->cp_conn == conn)
413 return true;
414 }
415 return false;
416}
417
418static void rds_tcp_set_unloading(void)
419{
420 atomic_set(&rds_tcp_unloading, 1);
421}
422
423static bool rds_tcp_is_unloading(struct rds_connection *conn)
424{
425 return atomic_read(&rds_tcp_unloading) != 0;
426}
427
428static void rds_tcp_destroy_conns(void)
429{
430 struct rds_tcp_connection *tc, *_tc;
431 LIST_HEAD(tmp_list);
432
433 /* avoid calling conn_destroy with irqs off */
434 spin_lock_irq(&rds_tcp_conn_lock);
435 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
436 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
437 list_move_tail(&tc->t_tcp_node, &tmp_list);
438 }
439 spin_unlock_irq(&rds_tcp_conn_lock);
440
441 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
442 rds_conn_destroy(tc->t_cpath->cp_conn);
443}
444
445static void rds_tcp_exit(void);
446
447static u8 rds_tcp_get_tos_map(u8 tos)
448{
449 /* all user tos mapped to default 0 for TCP transport */
450 return 0;
451}
452
453struct rds_transport rds_tcp_transport = {
454 .laddr_check = rds_tcp_laddr_check,
455 .xmit_path_prepare = rds_tcp_xmit_path_prepare,
456 .xmit_path_complete = rds_tcp_xmit_path_complete,
457 .xmit = rds_tcp_xmit,
458 .recv_path = rds_tcp_recv_path,
459 .conn_alloc = rds_tcp_conn_alloc,
460 .conn_free = rds_tcp_conn_free,
461 .conn_path_connect = rds_tcp_conn_path_connect,
462 .conn_path_shutdown = rds_tcp_conn_path_shutdown,
463 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
464 .inc_free = rds_tcp_inc_free,
465 .stats_info_copy = rds_tcp_stats_info_copy,
466 .exit = rds_tcp_exit,
467 .get_tos_map = rds_tcp_get_tos_map,
468 .t_owner = THIS_MODULE,
469 .t_name = "tcp",
470 .t_type = RDS_TRANS_TCP,
471 .t_prefer_loopback = 1,
472 .t_mp_capable = 1,
473 .t_unloading = rds_tcp_is_unloading,
474};
475
476static unsigned int rds_tcp_netid;
477
478/* per-network namespace private data for this module */
479struct rds_tcp_net {
480 struct socket *rds_tcp_listen_sock;
481 struct work_struct rds_tcp_accept_w;
482 struct ctl_table_header *rds_tcp_sysctl;
483 struct ctl_table *ctl_table;
484 int sndbuf_size;
485 int rcvbuf_size;
486};
487
488/* All module specific customizations to the RDS-TCP socket should be done in
489 * rds_tcp_tune() and applied after socket creation.
490 */
491bool rds_tcp_tune(struct socket *sock)
492{
493 struct sock *sk = sock->sk;
494 struct net *net = sock_net(sk);
495 struct rds_tcp_net *rtn;
496
497 tcp_sock_set_nodelay(sock->sk);
498 lock_sock(sk);
499 /* TCP timer functions might access net namespace even after
500 * a process which created this net namespace terminated.
501 */
502 if (!sk->sk_net_refcnt) {
503 if (!maybe_get_net(net)) {
504 release_sock(sk);
505 return false;
506 }
507 sk_net_refcnt_upgrade(sk);
508 put_net(net);
509 }
510 rtn = net_generic(net, rds_tcp_netid);
511 if (rtn->sndbuf_size > 0) {
512 sk->sk_sndbuf = rtn->sndbuf_size;
513 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
514 }
515 if (rtn->rcvbuf_size > 0) {
516 sk->sk_rcvbuf = rtn->rcvbuf_size;
517 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
518 }
519 release_sock(sk);
520 return true;
521}
522
523static void rds_tcp_accept_worker(struct work_struct *work)
524{
525 struct rds_tcp_net *rtn = container_of(work,
526 struct rds_tcp_net,
527 rds_tcp_accept_w);
528
529 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
530 cond_resched();
531}
532
533void rds_tcp_accept_work(struct sock *sk)
534{
535 struct net *net = sock_net(sk);
536 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
537
538 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
539}
540
541static __net_init int rds_tcp_init_net(struct net *net)
542{
543 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
544 struct ctl_table *tbl;
545 int err = 0;
546
547 memset(rtn, 0, sizeof(*rtn));
548
549 /* {snd, rcv}buf_size default to 0, which implies we let the
550 * stack pick the value, and permit auto-tuning of buffer size.
551 */
552 if (net == &init_net) {
553 tbl = rds_tcp_sysctl_table;
554 } else {
555 tbl = kmemdup(rds_tcp_sysctl_table,
556 sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
557 if (!tbl) {
558 pr_warn("could not set allocate sysctl table\n");
559 return -ENOMEM;
560 }
561 rtn->ctl_table = tbl;
562 }
563 tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
564 tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
565 rtn->rds_tcp_sysctl = register_net_sysctl_sz(net, "net/rds/tcp", tbl,
566 ARRAY_SIZE(rds_tcp_sysctl_table));
567 if (!rtn->rds_tcp_sysctl) {
568 pr_warn("could not register sysctl\n");
569 err = -ENOMEM;
570 goto fail;
571 }
572
573#if IS_ENABLED(CONFIG_IPV6)
574 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true);
575#else
576 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
577#endif
578 if (!rtn->rds_tcp_listen_sock) {
579 pr_warn("could not set up IPv6 listen sock\n");
580
581#if IS_ENABLED(CONFIG_IPV6)
582 /* Try IPv4 as some systems disable IPv6 */
583 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
584 if (!rtn->rds_tcp_listen_sock) {
585#endif
586 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
587 rtn->rds_tcp_sysctl = NULL;
588 err = -EAFNOSUPPORT;
589 goto fail;
590#if IS_ENABLED(CONFIG_IPV6)
591 }
592#endif
593 }
594 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
595 return 0;
596
597fail:
598 if (net != &init_net)
599 kfree(tbl);
600 return err;
601}
602
603static void rds_tcp_kill_sock(struct net *net)
604{
605 struct rds_tcp_connection *tc, *_tc;
606 LIST_HEAD(tmp_list);
607 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
608 struct socket *lsock = rtn->rds_tcp_listen_sock;
609
610 rtn->rds_tcp_listen_sock = NULL;
611 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
612 spin_lock_irq(&rds_tcp_conn_lock);
613 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
614 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
615
616 if (net != c_net)
617 continue;
618 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
619 list_move_tail(&tc->t_tcp_node, &tmp_list);
620 } else {
621 list_del(&tc->t_tcp_node);
622 tc->t_tcp_node_detached = true;
623 }
624 }
625 spin_unlock_irq(&rds_tcp_conn_lock);
626 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
627 rds_conn_destroy(tc->t_cpath->cp_conn);
628}
629
630static void __net_exit rds_tcp_exit_net(struct net *net)
631{
632 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
633
634 rds_tcp_kill_sock(net);
635
636 if (rtn->rds_tcp_sysctl)
637 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
638
639 if (net != &init_net)
640 kfree(rtn->ctl_table);
641}
642
643static struct pernet_operations rds_tcp_net_ops = {
644 .init = rds_tcp_init_net,
645 .exit = rds_tcp_exit_net,
646 .id = &rds_tcp_netid,
647 .size = sizeof(struct rds_tcp_net),
648};
649
650void *rds_tcp_listen_sock_def_readable(struct net *net)
651{
652 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
653 struct socket *lsock = rtn->rds_tcp_listen_sock;
654
655 if (!lsock)
656 return NULL;
657
658 return lsock->sk->sk_user_data;
659}
660
661/* when sysctl is used to modify some kernel socket parameters,this
662 * function resets the RDS connections in that netns so that we can
663 * restart with new parameters. The assumption is that such reset
664 * events are few and far-between.
665 */
666static void rds_tcp_sysctl_reset(struct net *net)
667{
668 struct rds_tcp_connection *tc, *_tc;
669
670 spin_lock_irq(&rds_tcp_conn_lock);
671 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
672 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
673
674 if (net != c_net || !tc->t_sock)
675 continue;
676
677 /* reconnect with new parameters */
678 rds_conn_path_drop(tc->t_cpath, false);
679 }
680 spin_unlock_irq(&rds_tcp_conn_lock);
681}
682
683static int rds_tcp_skbuf_handler(struct rds_tcp_net *rtn,
684 const struct ctl_table *ctl, int write,
685 void *buffer, size_t *lenp, loff_t *fpos)
686{
687 int err;
688
689 err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
690 if (err < 0) {
691 pr_warn("Invalid input. Must be >= %d\n",
692 *(int *)(ctl->extra1));
693 return err;
694 }
695
696 if (write && rtn->rds_tcp_listen_sock && rtn->rds_tcp_listen_sock->sk) {
697 struct net *net = sock_net(rtn->rds_tcp_listen_sock->sk);
698
699 rds_tcp_sysctl_reset(net);
700 }
701
702 return 0;
703}
704
705static int rds_tcp_sndbuf_handler(const struct ctl_table *ctl, int write,
706 void *buffer, size_t *lenp, loff_t *fpos)
707{
708 struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
709 sndbuf_size);
710
711 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
712}
713
714static int rds_tcp_rcvbuf_handler(const struct ctl_table *ctl, int write,
715 void *buffer, size_t *lenp, loff_t *fpos)
716{
717 struct rds_tcp_net *rtn = container_of(ctl->data, struct rds_tcp_net,
718 rcvbuf_size);
719
720 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos);
721}
722
723static void rds_tcp_exit(void)
724{
725 rds_tcp_set_unloading();
726 synchronize_rcu();
727 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
728#if IS_ENABLED(CONFIG_IPV6)
729 rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
730#endif
731 unregister_pernet_device(&rds_tcp_net_ops);
732 rds_tcp_destroy_conns();
733 rds_trans_unregister(&rds_tcp_transport);
734 rds_tcp_recv_exit();
735 kmem_cache_destroy(rds_tcp_conn_slab);
736}
737module_exit(rds_tcp_exit);
738
739static int __init rds_tcp_init(void)
740{
741 int ret;
742
743 rds_tcp_conn_slab = KMEM_CACHE(rds_tcp_connection, 0);
744 if (!rds_tcp_conn_slab) {
745 ret = -ENOMEM;
746 goto out;
747 }
748
749 ret = rds_tcp_recv_init();
750 if (ret)
751 goto out_slab;
752
753 ret = register_pernet_device(&rds_tcp_net_ops);
754 if (ret)
755 goto out_recv;
756
757 rds_trans_register(&rds_tcp_transport);
758
759 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
760#if IS_ENABLED(CONFIG_IPV6)
761 rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
762#endif
763
764 goto out;
765out_recv:
766 rds_tcp_recv_exit();
767out_slab:
768 kmem_cache_destroy(rds_tcp_conn_slab);
769out:
770 return ret;
771}
772module_init(rds_tcp_init);
773
774MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
775MODULE_DESCRIPTION("RDS: TCP transport");
776MODULE_LICENSE("Dual BSD/GPL");