Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC remote transport endpoint record management
3 *
4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/net.h>
12#include <linux/skbuff.h>
13#include <linux/udp.h>
14#include <linux/in.h>
15#include <linux/in6.h>
16#include <linux/slab.h>
17#include <linux/hashtable.h>
18#include <net/sock.h>
19#include <net/af_rxrpc.h>
20#include <net/ip.h>
21#include <net/route.h>
22#include <net/ip6_route.h>
23#include "ar-internal.h"
24
25static const struct sockaddr_rxrpc rxrpc_null_addr;
26
27/*
28 * Hash a peer key.
29 */
30static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
31 const struct sockaddr_rxrpc *srx)
32{
33 const u16 *p;
34 unsigned int i, size;
35 unsigned long hash_key;
36
37 _enter("");
38
39 hash_key = (unsigned long)local / __alignof__(*local);
40 hash_key += srx->transport_type;
41 hash_key += srx->transport_len;
42 hash_key += srx->transport.family;
43
44 switch (srx->transport.family) {
45 case AF_INET:
46 hash_key += (u16 __force)srx->transport.sin.sin_port;
47 size = sizeof(srx->transport.sin.sin_addr);
48 p = (u16 *)&srx->transport.sin.sin_addr;
49 break;
50#ifdef CONFIG_AF_RXRPC_IPV6
51 case AF_INET6:
52 hash_key += (u16 __force)srx->transport.sin.sin_port;
53 size = sizeof(srx->transport.sin6.sin6_addr);
54 p = (u16 *)&srx->transport.sin6.sin6_addr;
55 break;
56#endif
57 default:
58 WARN(1, "AF_RXRPC: Unsupported transport address family\n");
59 return 0;
60 }
61
62 /* Step through the peer address in 16-bit portions for speed */
63 for (i = 0; i < size; i += sizeof(*p), p++)
64 hash_key += *p;
65
66 _leave(" 0x%lx", hash_key);
67 return hash_key;
68}
69
70/*
71 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
72 * or greater than.
73 *
74 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
75 * buckets and mid-bucket insertion, so we don't make full use of this
76 * information at this point.
77 */
78static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
79 struct rxrpc_local *local,
80 const struct sockaddr_rxrpc *srx,
81 unsigned long hash_key)
82{
83 long diff;
84
85 diff = ((peer->hash_key - hash_key) ?:
86 ((unsigned long)peer->local - (unsigned long)local) ?:
87 (peer->srx.transport_type - srx->transport_type) ?:
88 (peer->srx.transport_len - srx->transport_len) ?:
89 (peer->srx.transport.family - srx->transport.family));
90 if (diff != 0)
91 return diff;
92
93 switch (srx->transport.family) {
94 case AF_INET:
95 return ((u16 __force)peer->srx.transport.sin.sin_port -
96 (u16 __force)srx->transport.sin.sin_port) ?:
97 memcmp(&peer->srx.transport.sin.sin_addr,
98 &srx->transport.sin.sin_addr,
99 sizeof(struct in_addr));
100#ifdef CONFIG_AF_RXRPC_IPV6
101 case AF_INET6:
102 return ((u16 __force)peer->srx.transport.sin6.sin6_port -
103 (u16 __force)srx->transport.sin6.sin6_port) ?:
104 memcmp(&peer->srx.transport.sin6.sin6_addr,
105 &srx->transport.sin6.sin6_addr,
106 sizeof(struct in6_addr));
107#endif
108 default:
109 BUG();
110 }
111}
112
113/*
114 * Look up a remote transport endpoint for the specified address using RCU.
115 */
116static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
117 struct rxrpc_local *local,
118 const struct sockaddr_rxrpc *srx,
119 unsigned long hash_key)
120{
121 struct rxrpc_peer *peer;
122 struct rxrpc_net *rxnet = local->rxnet;
123
124 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
125 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
126 refcount_read(&peer->ref) > 0)
127 return peer;
128 }
129
130 return NULL;
131}
132
133/*
134 * Look up a remote transport endpoint for the specified address using RCU.
135 */
136struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
137 const struct sockaddr_rxrpc *srx)
138{
139 struct rxrpc_peer *peer;
140 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
141
142 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
143 if (peer)
144 _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
145 return peer;
146}
147
148/*
149 * assess the MTU size for the network interface through which this peer is
150 * reached
151 */
152static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
153 struct rxrpc_peer *peer)
154{
155 struct net *net = local->net;
156 struct dst_entry *dst;
157 struct rtable *rt;
158 struct flowi fl;
159 struct flowi4 *fl4 = &fl.u.ip4;
160#ifdef CONFIG_AF_RXRPC_IPV6
161 struct flowi6 *fl6 = &fl.u.ip6;
162#endif
163
164 peer->if_mtu = 1500;
165
166 memset(&fl, 0, sizeof(fl));
167 switch (peer->srx.transport.family) {
168 case AF_INET:
169 rt = ip_route_output_ports(
170 net, fl4, NULL,
171 peer->srx.transport.sin.sin_addr.s_addr, 0,
172 htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
173 if (IS_ERR(rt)) {
174 _leave(" [route err %ld]", PTR_ERR(rt));
175 return;
176 }
177 dst = &rt->dst;
178 break;
179
180#ifdef CONFIG_AF_RXRPC_IPV6
181 case AF_INET6:
182 fl6->flowi6_iif = LOOPBACK_IFINDEX;
183 fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
184 fl6->flowi6_proto = IPPROTO_UDP;
185 memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
186 sizeof(struct in6_addr));
187 fl6->fl6_dport = htons(7001);
188 fl6->fl6_sport = htons(7000);
189 dst = ip6_route_output(net, NULL, fl6);
190 if (dst->error) {
191 _leave(" [route err %d]", dst->error);
192 return;
193 }
194 break;
195#endif
196
197 default:
198 BUG();
199 }
200
201 peer->if_mtu = dst_mtu(dst);
202 dst_release(dst);
203
204 _leave(" [if_mtu %u]", peer->if_mtu);
205}
206
207/*
208 * Allocate a peer.
209 */
210struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
211 enum rxrpc_peer_trace why)
212{
213 struct rxrpc_peer *peer;
214
215 _enter("");
216
217 peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
218 if (peer) {
219 refcount_set(&peer->ref, 1);
220 peer->local = rxrpc_get_local(local, rxrpc_local_get_peer);
221 INIT_HLIST_HEAD(&peer->error_targets);
222 peer->service_conns = RB_ROOT;
223 seqlock_init(&peer->service_conn_lock);
224 spin_lock_init(&peer->lock);
225 spin_lock_init(&peer->rtt_input_lock);
226 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
227
228 rxrpc_peer_init_rtt(peer);
229
230 peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
231 trace_rxrpc_peer(peer->debug_id, 1, why);
232 }
233
234 _leave(" = %p", peer);
235 return peer;
236}
237
238/*
239 * Initialise peer record.
240 */
241static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
242 unsigned long hash_key)
243{
244 peer->hash_key = hash_key;
245 rxrpc_assess_MTU_size(local, peer);
246 peer->mtu = peer->if_mtu;
247 peer->rtt_last_req = ktime_get_real();
248
249 switch (peer->srx.transport.family) {
250 case AF_INET:
251 peer->hdrsize = sizeof(struct iphdr);
252 break;
253#ifdef CONFIG_AF_RXRPC_IPV6
254 case AF_INET6:
255 peer->hdrsize = sizeof(struct ipv6hdr);
256 break;
257#endif
258 default:
259 BUG();
260 }
261
262 switch (peer->srx.transport_type) {
263 case SOCK_DGRAM:
264 peer->hdrsize += sizeof(struct udphdr);
265 break;
266 default:
267 BUG();
268 }
269
270 peer->hdrsize += sizeof(struct rxrpc_wire_header);
271 peer->maxdata = peer->mtu - peer->hdrsize;
272}
273
274/*
275 * Set up a new peer.
276 */
277static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
278 struct sockaddr_rxrpc *srx,
279 unsigned long hash_key,
280 gfp_t gfp)
281{
282 struct rxrpc_peer *peer;
283
284 _enter("");
285
286 peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client);
287 if (peer) {
288 memcpy(&peer->srx, srx, sizeof(*srx));
289 rxrpc_init_peer(local, peer, hash_key);
290 }
291
292 _leave(" = %p", peer);
293 return peer;
294}
295
296static void rxrpc_free_peer(struct rxrpc_peer *peer)
297{
298 trace_rxrpc_peer(peer->debug_id, 0, rxrpc_peer_free);
299 rxrpc_put_local(peer->local, rxrpc_local_put_peer);
300 kfree_rcu(peer, rcu);
301}
302
303/*
304 * Set up a new incoming peer. There shouldn't be any other matching peers
305 * since we've already done a search in the list from the non-reentrant context
306 * (the data_ready handler) that is the only place we can add new peers.
307 */
308void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
309{
310 struct rxrpc_net *rxnet = local->rxnet;
311 unsigned long hash_key;
312
313 hash_key = rxrpc_peer_hash_key(local, &peer->srx);
314 rxrpc_init_peer(local, peer, hash_key);
315
316 spin_lock_bh(&rxnet->peer_hash_lock);
317 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
318 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
319 spin_unlock_bh(&rxnet->peer_hash_lock);
320}
321
322/*
323 * obtain a remote transport endpoint for the specified address
324 */
325struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
326 struct sockaddr_rxrpc *srx, gfp_t gfp)
327{
328 struct rxrpc_peer *peer, *candidate;
329 struct rxrpc_net *rxnet = local->rxnet;
330 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
331
332 _enter("{%pISp}", &srx->transport);
333
334 /* search the peer list first */
335 rcu_read_lock();
336 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
337 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
338 peer = NULL;
339 rcu_read_unlock();
340
341 if (!peer) {
342 /* The peer is not yet present in hash - create a candidate
343 * for a new record and then redo the search.
344 */
345 candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
346 if (!candidate) {
347 _leave(" = NULL [nomem]");
348 return NULL;
349 }
350
351 spin_lock_bh(&rxnet->peer_hash_lock);
352
353 /* Need to check that we aren't racing with someone else */
354 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
355 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client))
356 peer = NULL;
357 if (!peer) {
358 hash_add_rcu(rxnet->peer_hash,
359 &candidate->hash_link, hash_key);
360 list_add_tail(&candidate->keepalive_link,
361 &rxnet->peer_keepalive_new);
362 }
363
364 spin_unlock_bh(&rxnet->peer_hash_lock);
365
366 if (peer)
367 rxrpc_free_peer(candidate);
368 else
369 peer = candidate;
370 }
371
372 _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
373 return peer;
374}
375
376/*
377 * Get a ref on a peer record.
378 */
379struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
380{
381 int r;
382
383 __refcount_inc(&peer->ref, &r);
384 trace_rxrpc_peer(peer->debug_id, r + 1, why);
385 return peer;
386}
387
388/*
389 * Get a ref on a peer record unless its usage has already reached 0.
390 */
391struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer,
392 enum rxrpc_peer_trace why)
393{
394 int r;
395
396 if (peer) {
397 if (__refcount_inc_not_zero(&peer->ref, &r))
398 trace_rxrpc_peer(peer->debug_id, r + 1, why);
399 else
400 peer = NULL;
401 }
402 return peer;
403}
404
405/*
406 * Discard a peer record.
407 */
408static void __rxrpc_put_peer(struct rxrpc_peer *peer)
409{
410 struct rxrpc_net *rxnet = peer->local->rxnet;
411
412 ASSERT(hlist_empty(&peer->error_targets));
413
414 spin_lock_bh(&rxnet->peer_hash_lock);
415 hash_del_rcu(&peer->hash_link);
416 list_del_init(&peer->keepalive_link);
417 spin_unlock_bh(&rxnet->peer_hash_lock);
418
419 rxrpc_free_peer(peer);
420}
421
422/*
423 * Drop a ref on a peer record.
424 */
425void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
426{
427 unsigned int debug_id;
428 bool dead;
429 int r;
430
431 if (peer) {
432 debug_id = peer->debug_id;
433 dead = __refcount_dec_and_test(&peer->ref, &r);
434 trace_rxrpc_peer(debug_id, r - 1, why);
435 if (dead)
436 __rxrpc_put_peer(peer);
437 }
438}
439
440/*
441 * Make sure all peer records have been discarded.
442 */
443void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
444{
445 struct rxrpc_peer *peer;
446 int i;
447
448 for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
449 if (hlist_empty(&rxnet->peer_hash[i]))
450 continue;
451
452 hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
453 pr_err("Leaked peer %u {%u} %pISp\n",
454 peer->debug_id,
455 refcount_read(&peer->ref),
456 &peer->srx.transport);
457 }
458 }
459}
460
461/**
462 * rxrpc_kernel_get_call_peer - Get the peer address of a call
463 * @sock: The socket on which the call is in progress.
464 * @call: The call to query
465 *
466 * Get a record for the remote peer in a call.
467 */
468struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call)
469{
470 return call->peer;
471}
472EXPORT_SYMBOL(rxrpc_kernel_get_call_peer);
473
474/**
475 * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
476 * @peer: The peer to query
477 *
478 * Get the call's peer smoothed RTT in uS or UINT_MAX if we have no samples.
479 */
480unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *peer)
481{
482 return peer->rtt_count > 0 ? peer->srtt_us >> 3 : UINT_MAX;
483}
484EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
485
486/**
487 * rxrpc_kernel_remote_srx - Get the address of a peer
488 * @peer: The peer to query
489 *
490 * Get a pointer to the address from a peer record. The caller is responsible
491 * for making sure that the address is not deallocated.
492 */
493const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer)
494{
495 return peer ? &peer->srx : &rxrpc_null_addr;
496}
497EXPORT_SYMBOL(rxrpc_kernel_remote_srx);
498
499/**
500 * rxrpc_kernel_remote_addr - Get the peer transport address of a call
501 * @peer: The peer to query
502 *
503 * Get a pointer to the transport address from a peer record. The caller is
504 * responsible for making sure that the address is not deallocated.
505 */
506const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer)
507{
508 return (const struct sockaddr *)
509 (peer ? &peer->srx.transport : &rxrpc_null_addr.transport);
510}
511EXPORT_SYMBOL(rxrpc_kernel_remote_addr);
1/* RxRPC remote transport endpoint record management
2 *
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
17#include <linux/udp.h>
18#include <linux/in.h>
19#include <linux/in6.h>
20#include <linux/slab.h>
21#include <linux/hashtable.h>
22#include <net/sock.h>
23#include <net/af_rxrpc.h>
24#include <net/ip.h>
25#include <net/route.h>
26#include <net/ip6_route.h>
27#include "ar-internal.h"
28
29/*
30 * Hash a peer key.
31 */
32static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
33 const struct sockaddr_rxrpc *srx)
34{
35 const u16 *p;
36 unsigned int i, size;
37 unsigned long hash_key;
38
39 _enter("");
40
41 hash_key = (unsigned long)local / __alignof__(*local);
42 hash_key += srx->transport_type;
43 hash_key += srx->transport_len;
44 hash_key += srx->transport.family;
45
46 switch (srx->transport.family) {
47 case AF_INET:
48 hash_key += (u16 __force)srx->transport.sin.sin_port;
49 size = sizeof(srx->transport.sin.sin_addr);
50 p = (u16 *)&srx->transport.sin.sin_addr;
51 break;
52#ifdef CONFIG_AF_RXRPC_IPV6
53 case AF_INET6:
54 hash_key += (u16 __force)srx->transport.sin.sin_port;
55 size = sizeof(srx->transport.sin6.sin6_addr);
56 p = (u16 *)&srx->transport.sin6.sin6_addr;
57 break;
58#endif
59 default:
60 WARN(1, "AF_RXRPC: Unsupported transport address family\n");
61 return 0;
62 }
63
64 /* Step through the peer address in 16-bit portions for speed */
65 for (i = 0; i < size; i += sizeof(*p), p++)
66 hash_key += *p;
67
68 _leave(" 0x%lx", hash_key);
69 return hash_key;
70}
71
72/*
73 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
74 * or greater than.
75 *
76 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
77 * buckets and mid-bucket insertion, so we don't make full use of this
78 * information at this point.
79 */
80static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
81 struct rxrpc_local *local,
82 const struct sockaddr_rxrpc *srx,
83 unsigned long hash_key)
84{
85 long diff;
86
87 diff = ((peer->hash_key - hash_key) ?:
88 ((unsigned long)peer->local - (unsigned long)local) ?:
89 (peer->srx.transport_type - srx->transport_type) ?:
90 (peer->srx.transport_len - srx->transport_len) ?:
91 (peer->srx.transport.family - srx->transport.family));
92 if (diff != 0)
93 return diff;
94
95 switch (srx->transport.family) {
96 case AF_INET:
97 return ((u16 __force)peer->srx.transport.sin.sin_port -
98 (u16 __force)srx->transport.sin.sin_port) ?:
99 memcmp(&peer->srx.transport.sin.sin_addr,
100 &srx->transport.sin.sin_addr,
101 sizeof(struct in_addr));
102#ifdef CONFIG_AF_RXRPC_IPV6
103 case AF_INET6:
104 return ((u16 __force)peer->srx.transport.sin6.sin6_port -
105 (u16 __force)srx->transport.sin6.sin6_port) ?:
106 memcmp(&peer->srx.transport.sin6.sin6_addr,
107 &srx->transport.sin6.sin6_addr,
108 sizeof(struct in6_addr));
109#endif
110 default:
111 BUG();
112 }
113}
114
115/*
116 * Look up a remote transport endpoint for the specified address using RCU.
117 */
118static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
119 struct rxrpc_local *local,
120 const struct sockaddr_rxrpc *srx,
121 unsigned long hash_key)
122{
123 struct rxrpc_peer *peer;
124 struct rxrpc_net *rxnet = local->rxnet;
125
126 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
127 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
128 if (atomic_read(&peer->usage) == 0)
129 return NULL;
130 return peer;
131 }
132 }
133
134 return NULL;
135}
136
137/*
138 * Look up a remote transport endpoint for the specified address using RCU.
139 */
140struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
141 const struct sockaddr_rxrpc *srx)
142{
143 struct rxrpc_peer *peer;
144 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
145
146 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
147 if (peer) {
148 _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
149 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
150 }
151 return peer;
152}
153
154/*
155 * assess the MTU size for the network interface through which this peer is
156 * reached
157 */
158static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
159{
160 struct dst_entry *dst;
161 struct rtable *rt;
162 struct flowi fl;
163 struct flowi4 *fl4 = &fl.u.ip4;
164#ifdef CONFIG_AF_RXRPC_IPV6
165 struct flowi6 *fl6 = &fl.u.ip6;
166#endif
167
168 peer->if_mtu = 1500;
169
170 memset(&fl, 0, sizeof(fl));
171 switch (peer->srx.transport.family) {
172 case AF_INET:
173 rt = ip_route_output_ports(
174 &init_net, fl4, NULL,
175 peer->srx.transport.sin.sin_addr.s_addr, 0,
176 htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
177 if (IS_ERR(rt)) {
178 _leave(" [route err %ld]", PTR_ERR(rt));
179 return;
180 }
181 dst = &rt->dst;
182 break;
183
184#ifdef CONFIG_AF_RXRPC_IPV6
185 case AF_INET6:
186 fl6->flowi6_iif = LOOPBACK_IFINDEX;
187 fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
188 fl6->flowi6_proto = IPPROTO_UDP;
189 memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
190 sizeof(struct in6_addr));
191 fl6->fl6_dport = htons(7001);
192 fl6->fl6_sport = htons(7000);
193 dst = ip6_route_output(&init_net, NULL, fl6);
194 if (dst->error) {
195 _leave(" [route err %d]", dst->error);
196 return;
197 }
198 break;
199#endif
200
201 default:
202 BUG();
203 }
204
205 peer->if_mtu = dst_mtu(dst);
206 dst_release(dst);
207
208 _leave(" [if_mtu %u]", peer->if_mtu);
209}
210
211/*
212 * Allocate a peer.
213 */
214struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
215{
216 struct rxrpc_peer *peer;
217
218 _enter("");
219
220 peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
221 if (peer) {
222 atomic_set(&peer->usage, 1);
223 peer->local = local;
224 INIT_HLIST_HEAD(&peer->error_targets);
225 INIT_WORK(&peer->error_distributor,
226 &rxrpc_peer_error_distributor);
227 peer->service_conns = RB_ROOT;
228 seqlock_init(&peer->service_conn_lock);
229 spin_lock_init(&peer->lock);
230 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
231
232 if (RXRPC_TX_SMSS > 2190)
233 peer->cong_cwnd = 2;
234 else if (RXRPC_TX_SMSS > 1095)
235 peer->cong_cwnd = 3;
236 else
237 peer->cong_cwnd = 4;
238 }
239
240 _leave(" = %p", peer);
241 return peer;
242}
243
244/*
245 * Initialise peer record.
246 */
247static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
248{
249 peer->hash_key = hash_key;
250 rxrpc_assess_MTU_size(peer);
251 peer->mtu = peer->if_mtu;
252 peer->rtt_last_req = ktime_get_real();
253
254 switch (peer->srx.transport.family) {
255 case AF_INET:
256 peer->hdrsize = sizeof(struct iphdr);
257 break;
258#ifdef CONFIG_AF_RXRPC_IPV6
259 case AF_INET6:
260 peer->hdrsize = sizeof(struct ipv6hdr);
261 break;
262#endif
263 default:
264 BUG();
265 }
266
267 switch (peer->srx.transport_type) {
268 case SOCK_DGRAM:
269 peer->hdrsize += sizeof(struct udphdr);
270 break;
271 default:
272 BUG();
273 }
274
275 peer->hdrsize += sizeof(struct rxrpc_wire_header);
276 peer->maxdata = peer->mtu - peer->hdrsize;
277}
278
279/*
280 * Set up a new peer.
281 */
282static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
283 struct sockaddr_rxrpc *srx,
284 unsigned long hash_key,
285 gfp_t gfp)
286{
287 struct rxrpc_peer *peer;
288
289 _enter("");
290
291 peer = rxrpc_alloc_peer(local, gfp);
292 if (peer) {
293 memcpy(&peer->srx, srx, sizeof(*srx));
294 rxrpc_init_peer(peer, hash_key);
295 }
296
297 _leave(" = %p", peer);
298 return peer;
299}
300
301/*
302 * Set up a new incoming peer. The address is prestored in the preallocated
303 * peer.
304 */
305struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
306 struct rxrpc_peer *prealloc)
307{
308 struct rxrpc_peer *peer;
309 struct rxrpc_net *rxnet = local->rxnet;
310 unsigned long hash_key;
311
312 hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
313 prealloc->local = local;
314 rxrpc_init_peer(prealloc, hash_key);
315
316 spin_lock(&rxnet->peer_hash_lock);
317
318 /* Need to check that we aren't racing with someone else */
319 peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
320 if (peer && !rxrpc_get_peer_maybe(peer))
321 peer = NULL;
322 if (!peer) {
323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
326 }
327
328 spin_unlock(&rxnet->peer_hash_lock);
329 return peer;
330}
331
332/*
333 * obtain a remote transport endpoint for the specified address
334 */
335struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
336 struct sockaddr_rxrpc *srx, gfp_t gfp)
337{
338 struct rxrpc_peer *peer, *candidate;
339 struct rxrpc_net *rxnet = local->rxnet;
340 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
341
342 _enter("{%pISp}", &srx->transport);
343
344 /* search the peer list first */
345 rcu_read_lock();
346 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
347 if (peer && !rxrpc_get_peer_maybe(peer))
348 peer = NULL;
349 rcu_read_unlock();
350
351 if (!peer) {
352 /* The peer is not yet present in hash - create a candidate
353 * for a new record and then redo the search.
354 */
355 candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
356 if (!candidate) {
357 _leave(" = NULL [nomem]");
358 return NULL;
359 }
360
361 spin_lock_bh(&rxnet->peer_hash_lock);
362
363 /* Need to check that we aren't racing with someone else */
364 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
365 if (peer && !rxrpc_get_peer_maybe(peer))
366 peer = NULL;
367 if (!peer) {
368 hash_add_rcu(rxnet->peer_hash,
369 &candidate->hash_link, hash_key);
370 hlist_add_head(&candidate->keepalive_link,
371 &rxnet->peer_keepalive_new);
372 }
373
374 spin_unlock_bh(&rxnet->peer_hash_lock);
375
376 if (peer)
377 kfree(candidate);
378 else
379 peer = candidate;
380 }
381
382 _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
383
384 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
385 return peer;
386}
387
388/*
389 * Get a ref on a peer record.
390 */
391struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
392{
393 const void *here = __builtin_return_address(0);
394 int n;
395
396 n = atomic_inc_return(&peer->usage);
397 trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
398 return peer;
399}
400
401/*
402 * Get a ref on a peer record unless its usage has already reached 0.
403 */
404struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
405{
406 const void *here = __builtin_return_address(0);
407
408 if (peer) {
409 int n = __atomic_add_unless(&peer->usage, 1, 0);
410 if (n > 0)
411 trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
412 else
413 peer = NULL;
414 }
415 return peer;
416}
417
418/*
419 * Queue a peer record. This passes the caller's ref to the workqueue.
420 */
421void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
422{
423 const void *here = __builtin_return_address(0);
424 int n;
425
426 n = atomic_read(&peer->usage);
427 if (rxrpc_queue_work(&peer->error_distributor))
428 trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
429 else
430 rxrpc_put_peer(peer);
431}
432
433/*
434 * Discard a peer record.
435 */
436static void __rxrpc_put_peer(struct rxrpc_peer *peer)
437{
438 struct rxrpc_net *rxnet = peer->local->rxnet;
439
440 ASSERT(hlist_empty(&peer->error_targets));
441
442 spin_lock_bh(&rxnet->peer_hash_lock);
443 hash_del_rcu(&peer->hash_link);
444 hlist_del_init(&peer->keepalive_link);
445 spin_unlock_bh(&rxnet->peer_hash_lock);
446
447 kfree_rcu(peer, rcu);
448}
449
450/*
451 * Drop a ref on a peer record.
452 */
453void rxrpc_put_peer(struct rxrpc_peer *peer)
454{
455 const void *here = __builtin_return_address(0);
456 int n;
457
458 if (peer) {
459 n = atomic_dec_return(&peer->usage);
460 trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
461 if (n == 0)
462 __rxrpc_put_peer(peer);
463 }
464}
465
466/*
467 * Make sure all peer records have been discarded.
468 */
469void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
470{
471 struct rxrpc_peer *peer;
472 int i;
473
474 for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
475 if (hlist_empty(&rxnet->peer_hash[i]))
476 continue;
477
478 hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
479 pr_err("Leaked peer %u {%u} %pISp\n",
480 peer->debug_id,
481 atomic_read(&peer->usage),
482 &peer->srx.transport);
483 }
484 }
485}
486
487/**
488 * rxrpc_kernel_get_peer - Get the peer address of a call
489 * @sock: The socket on which the call is in progress.
490 * @call: The call to query
491 * @_srx: Where to place the result
492 *
493 * Get the address of the remote peer in a call.
494 */
495void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
496 struct sockaddr_rxrpc *_srx)
497{
498 *_srx = call->peer->srx;
499}
500EXPORT_SYMBOL(rxrpc_kernel_get_peer);
501
502/**
503 * rxrpc_kernel_get_rtt - Get a call's peer RTT
504 * @sock: The socket on which the call is in progress.
505 * @call: The call to query
506 *
507 * Get the call's peer RTT.
508 */
509u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call)
510{
511 return call->peer->rtt;
512}
513EXPORT_SYMBOL(rxrpc_kernel_get_rtt);