Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/module.h>
3#include <linux/errno.h>
4#include <linux/socket.h>
5#include <linux/kernel.h>
6#include <net/dst_metadata.h>
7#include <net/udp.h>
8#include <net/udp_tunnel.h>
9#include <net/inet_dscp.h>
10
11int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
12 struct socket **sockp)
13{
14 int err;
15 struct socket *sock = NULL;
16 struct sockaddr_in udp_addr;
17
18 err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
19 if (err < 0)
20 goto error;
21
22 if (cfg->bind_ifindex) {
23 err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true);
24 if (err < 0)
25 goto error;
26 }
27
28 udp_addr.sin_family = AF_INET;
29 udp_addr.sin_addr = cfg->local_ip;
30 udp_addr.sin_port = cfg->local_udp_port;
31 err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
32 sizeof(udp_addr));
33 if (err < 0)
34 goto error;
35
36 if (cfg->peer_udp_port) {
37 udp_addr.sin_family = AF_INET;
38 udp_addr.sin_addr = cfg->peer_ip;
39 udp_addr.sin_port = cfg->peer_udp_port;
40 err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
41 sizeof(udp_addr), 0);
42 if (err < 0)
43 goto error;
44 }
45
46 sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
47
48 *sockp = sock;
49 return 0;
50
51error:
52 if (sock) {
53 kernel_sock_shutdown(sock, SHUT_RDWR);
54 sock_release(sock);
55 }
56 *sockp = NULL;
57 return err;
58}
59EXPORT_SYMBOL(udp_sock_create4);
60
61void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
62 struct udp_tunnel_sock_cfg *cfg)
63{
64 struct sock *sk = sock->sk;
65
66 /* Disable multicast loopback */
67 inet_clear_bit(MC_LOOP, sk);
68
69 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
70 inet_inc_convert_csum(sk);
71
72 rcu_assign_sk_user_data(sk, cfg->sk_user_data);
73
74 udp_sk(sk)->encap_type = cfg->encap_type;
75 udp_sk(sk)->encap_rcv = cfg->encap_rcv;
76 udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
77 udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
78 udp_sk(sk)->encap_destroy = cfg->encap_destroy;
79 udp_sk(sk)->gro_receive = cfg->gro_receive;
80 udp_sk(sk)->gro_complete = cfg->gro_complete;
81
82 udp_tunnel_encap_enable(sk);
83}
84EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
85
86void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
87 unsigned short type)
88{
89 struct sock *sk = sock->sk;
90 struct udp_tunnel_info ti;
91
92 ti.type = type;
93 ti.sa_family = sk->sk_family;
94 ti.port = inet_sk(sk)->inet_sport;
95
96 udp_tunnel_nic_add_port(dev, &ti);
97}
98EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port);
99
100void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
101 unsigned short type)
102{
103 struct sock *sk = sock->sk;
104 struct udp_tunnel_info ti;
105
106 ti.type = type;
107 ti.sa_family = sk->sk_family;
108 ti.port = inet_sk(sk)->inet_sport;
109
110 udp_tunnel_nic_del_port(dev, &ti);
111}
112EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port);
113
114/* Notify netdevs that UDP port started listening */
115void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
116{
117 struct sock *sk = sock->sk;
118 struct net *net = sock_net(sk);
119 struct udp_tunnel_info ti;
120 struct net_device *dev;
121
122 ti.type = type;
123 ti.sa_family = sk->sk_family;
124 ti.port = inet_sk(sk)->inet_sport;
125
126 rcu_read_lock();
127 for_each_netdev_rcu(net, dev) {
128 udp_tunnel_nic_add_port(dev, &ti);
129 }
130 rcu_read_unlock();
131}
132EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
133
134/* Notify netdevs that UDP port is no more listening */
135void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
136{
137 struct sock *sk = sock->sk;
138 struct net *net = sock_net(sk);
139 struct udp_tunnel_info ti;
140 struct net_device *dev;
141
142 ti.type = type;
143 ti.sa_family = sk->sk_family;
144 ti.port = inet_sk(sk)->inet_sport;
145
146 rcu_read_lock();
147 for_each_netdev_rcu(net, dev) {
148 udp_tunnel_nic_del_port(dev, &ti);
149 }
150 rcu_read_unlock();
151}
152EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
153
154void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
155 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
156 __be16 df, __be16 src_port, __be16 dst_port,
157 bool xnet, bool nocheck)
158{
159 struct udphdr *uh;
160
161 __skb_push(skb, sizeof(*uh));
162 skb_reset_transport_header(skb);
163 uh = udp_hdr(skb);
164
165 uh->dest = dst_port;
166 uh->source = src_port;
167 uh->len = htons(skb->len);
168
169 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
170
171 udp_set_csum(nocheck, skb, src, dst, skb->len);
172
173 iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
174}
175EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
176
177void udp_tunnel_sock_release(struct socket *sock)
178{
179 rcu_assign_sk_user_data(sock->sk, NULL);
180 synchronize_rcu();
181 kernel_sock_shutdown(sock, SHUT_RDWR);
182 sock_release(sock);
183}
184EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
185
186struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
187 const unsigned long *flags,
188 __be64 tunnel_id, int md_size)
189{
190 struct metadata_dst *tun_dst;
191 struct ip_tunnel_info *info;
192
193 if (family == AF_INET)
194 tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
195 else
196 tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
197 if (!tun_dst)
198 return NULL;
199
200 info = &tun_dst->u.tun_info;
201 info->key.tp_src = udp_hdr(skb)->source;
202 info->key.tp_dst = udp_hdr(skb)->dest;
203 if (udp_hdr(skb)->check)
204 __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
205 return tun_dst;
206}
207EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
208
209struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
210 struct net_device *dev,
211 struct net *net, int oif,
212 __be32 *saddr,
213 const struct ip_tunnel_key *key,
214 __be16 sport, __be16 dport, u8 tos,
215 struct dst_cache *dst_cache)
216{
217 struct rtable *rt = NULL;
218 struct flowi4 fl4;
219
220#ifdef CONFIG_DST_CACHE
221 if (dst_cache) {
222 rt = dst_cache_get_ip4(dst_cache, saddr);
223 if (rt)
224 return rt;
225 }
226#endif
227
228 memset(&fl4, 0, sizeof(fl4));
229 fl4.flowi4_mark = skb->mark;
230 fl4.flowi4_proto = IPPROTO_UDP;
231 fl4.flowi4_oif = oif;
232 fl4.daddr = key->u.ipv4.dst;
233 fl4.saddr = key->u.ipv4.src;
234 fl4.fl4_dport = dport;
235 fl4.fl4_sport = sport;
236 fl4.flowi4_tos = tos & INET_DSCP_MASK;
237 fl4.flowi4_flags = key->flow_flags;
238
239 rt = ip_route_output_key(net, &fl4);
240 if (IS_ERR(rt)) {
241 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
242 return ERR_PTR(-ENETUNREACH);
243 }
244 if (rt->dst.dev == dev) { /* is this necessary? */
245 netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
246 ip_rt_put(rt);
247 return ERR_PTR(-ELOOP);
248 }
249#ifdef CONFIG_DST_CACHE
250 if (dst_cache)
251 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
252#endif
253 *saddr = fl4.saddr;
254 return rt;
255}
256EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup);
257
258MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver");
259MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/module.h>
3#include <linux/errno.h>
4#include <linux/socket.h>
5#include <linux/kernel.h>
6#include <net/dst_metadata.h>
7#include <net/udp.h>
8#include <net/udp_tunnel.h>
9
10int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
11 struct socket **sockp)
12{
13 int err;
14 struct socket *sock = NULL;
15 struct sockaddr_in udp_addr;
16
17 err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
18 if (err < 0)
19 goto error;
20
21 if (cfg->bind_ifindex) {
22 err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true);
23 if (err < 0)
24 goto error;
25 }
26
27 udp_addr.sin_family = AF_INET;
28 udp_addr.sin_addr = cfg->local_ip;
29 udp_addr.sin_port = cfg->local_udp_port;
30 err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
31 sizeof(udp_addr));
32 if (err < 0)
33 goto error;
34
35 if (cfg->peer_udp_port) {
36 udp_addr.sin_family = AF_INET;
37 udp_addr.sin_addr = cfg->peer_ip;
38 udp_addr.sin_port = cfg->peer_udp_port;
39 err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
40 sizeof(udp_addr), 0);
41 if (err < 0)
42 goto error;
43 }
44
45 sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
46
47 *sockp = sock;
48 return 0;
49
50error:
51 if (sock) {
52 kernel_sock_shutdown(sock, SHUT_RDWR);
53 sock_release(sock);
54 }
55 *sockp = NULL;
56 return err;
57}
58EXPORT_SYMBOL(udp_sock_create4);
59
60void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
61 struct udp_tunnel_sock_cfg *cfg)
62{
63 struct sock *sk = sock->sk;
64
65 /* Disable multicast loopback */
66 inet_clear_bit(MC_LOOP, sk);
67
68 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
69 inet_inc_convert_csum(sk);
70
71 rcu_assign_sk_user_data(sk, cfg->sk_user_data);
72
73 udp_sk(sk)->encap_type = cfg->encap_type;
74 udp_sk(sk)->encap_rcv = cfg->encap_rcv;
75 udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
76 udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
77 udp_sk(sk)->encap_destroy = cfg->encap_destroy;
78 udp_sk(sk)->gro_receive = cfg->gro_receive;
79 udp_sk(sk)->gro_complete = cfg->gro_complete;
80
81 udp_tunnel_encap_enable(sk);
82}
83EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
84
85void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
86 unsigned short type)
87{
88 struct sock *sk = sock->sk;
89 struct udp_tunnel_info ti;
90
91 ti.type = type;
92 ti.sa_family = sk->sk_family;
93 ti.port = inet_sk(sk)->inet_sport;
94
95 udp_tunnel_nic_add_port(dev, &ti);
96}
97EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port);
98
99void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
100 unsigned short type)
101{
102 struct sock *sk = sock->sk;
103 struct udp_tunnel_info ti;
104
105 ti.type = type;
106 ti.sa_family = sk->sk_family;
107 ti.port = inet_sk(sk)->inet_sport;
108
109 udp_tunnel_nic_del_port(dev, &ti);
110}
111EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port);
112
113/* Notify netdevs that UDP port started listening */
114void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
115{
116 struct sock *sk = sock->sk;
117 struct net *net = sock_net(sk);
118 struct udp_tunnel_info ti;
119 struct net_device *dev;
120
121 ti.type = type;
122 ti.sa_family = sk->sk_family;
123 ti.port = inet_sk(sk)->inet_sport;
124
125 rcu_read_lock();
126 for_each_netdev_rcu(net, dev) {
127 udp_tunnel_nic_add_port(dev, &ti);
128 }
129 rcu_read_unlock();
130}
131EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
132
133/* Notify netdevs that UDP port is no more listening */
134void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
135{
136 struct sock *sk = sock->sk;
137 struct net *net = sock_net(sk);
138 struct udp_tunnel_info ti;
139 struct net_device *dev;
140
141 ti.type = type;
142 ti.sa_family = sk->sk_family;
143 ti.port = inet_sk(sk)->inet_sport;
144
145 rcu_read_lock();
146 for_each_netdev_rcu(net, dev) {
147 udp_tunnel_nic_del_port(dev, &ti);
148 }
149 rcu_read_unlock();
150}
151EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
152
153void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
154 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
155 __be16 df, __be16 src_port, __be16 dst_port,
156 bool xnet, bool nocheck)
157{
158 struct udphdr *uh;
159
160 __skb_push(skb, sizeof(*uh));
161 skb_reset_transport_header(skb);
162 uh = udp_hdr(skb);
163
164 uh->dest = dst_port;
165 uh->source = src_port;
166 uh->len = htons(skb->len);
167
168 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
169
170 udp_set_csum(nocheck, skb, src, dst, skb->len);
171
172 iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
173}
174EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
175
176void udp_tunnel_sock_release(struct socket *sock)
177{
178 rcu_assign_sk_user_data(sock->sk, NULL);
179 synchronize_rcu();
180 kernel_sock_shutdown(sock, SHUT_RDWR);
181 sock_release(sock);
182}
183EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
184
185struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
186 __be16 flags, __be64 tunnel_id, int md_size)
187{
188 struct metadata_dst *tun_dst;
189 struct ip_tunnel_info *info;
190
191 if (family == AF_INET)
192 tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
193 else
194 tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
195 if (!tun_dst)
196 return NULL;
197
198 info = &tun_dst->u.tun_info;
199 info->key.tp_src = udp_hdr(skb)->source;
200 info->key.tp_dst = udp_hdr(skb)->dest;
201 if (udp_hdr(skb)->check)
202 info->key.tun_flags |= TUNNEL_CSUM;
203 return tun_dst;
204}
205EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
206
207struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
208 struct net_device *dev,
209 struct net *net, int oif,
210 __be32 *saddr,
211 const struct ip_tunnel_key *key,
212 __be16 sport, __be16 dport, u8 tos,
213 struct dst_cache *dst_cache)
214{
215 struct rtable *rt = NULL;
216 struct flowi4 fl4;
217
218#ifdef CONFIG_DST_CACHE
219 if (dst_cache) {
220 rt = dst_cache_get_ip4(dst_cache, saddr);
221 if (rt)
222 return rt;
223 }
224#endif
225
226 memset(&fl4, 0, sizeof(fl4));
227 fl4.flowi4_mark = skb->mark;
228 fl4.flowi4_proto = IPPROTO_UDP;
229 fl4.flowi4_oif = oif;
230 fl4.daddr = key->u.ipv4.dst;
231 fl4.saddr = key->u.ipv4.src;
232 fl4.fl4_dport = dport;
233 fl4.fl4_sport = sport;
234 fl4.flowi4_tos = RT_TOS(tos);
235 fl4.flowi4_flags = key->flow_flags;
236
237 rt = ip_route_output_key(net, &fl4);
238 if (IS_ERR(rt)) {
239 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
240 return ERR_PTR(-ENETUNREACH);
241 }
242 if (rt->dst.dev == dev) { /* is this necessary? */
243 netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
244 ip_rt_put(rt);
245 return ERR_PTR(-ELOOP);
246 }
247#ifdef CONFIG_DST_CACHE
248 if (dst_cache)
249 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
250#endif
251 *saddr = fl4.saddr;
252 return rt;
253}
254EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup);
255
256MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver");
257MODULE_LICENSE("GPL");