Loading...
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/ipv6.h>
4#include <linux/netfilter.h>
5#include <linux/netfilter_ipv6.h>
6#include <net/dst.h>
7#include <net/ipv6.h>
8#include <net/ip6_route.h>
9#include <net/xfrm.h>
10#include <net/ip6_checksum.h>
11#include <net/netfilter/nf_queue.h>
12
13int ip6_route_me_harder(struct sk_buff *skb)
14{
15 struct net *net = dev_net(skb_dst(skb)->dev);
16 const struct ipv6hdr *iph = ipv6_hdr(skb);
17 struct dst_entry *dst;
18 struct flowi6 fl6 = {
19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
20 .flowi6_mark = skb->mark,
21 .daddr = iph->daddr,
22 .saddr = iph->saddr,
23 };
24
25 dst = ip6_route_output(net, skb->sk, &fl6);
26 if (dst->error) {
27 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
28 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
29 dst_release(dst);
30 return -EINVAL;
31 }
32
33 /* Drop old route. */
34 skb_dst_drop(skb);
35
36 skb_dst_set(skb, dst);
37
38#ifdef CONFIG_XFRM
39 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
40 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
41 skb_dst_set(skb, NULL);
42 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
43 if (IS_ERR(dst))
44 return -1;
45 skb_dst_set(skb, dst);
46 }
47#endif
48
49 return 0;
50}
51EXPORT_SYMBOL(ip6_route_me_harder);
52
53/*
54 * Extra routing may needed on local out, as the QUEUE target never
55 * returns control to the table.
56 */
57
58struct ip6_rt_info {
59 struct in6_addr daddr;
60 struct in6_addr saddr;
61 u_int32_t mark;
62};
63
64static void nf_ip6_saveroute(const struct sk_buff *skb,
65 struct nf_queue_entry *entry)
66{
67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
68
69 if (entry->hook == NF_INET_LOCAL_OUT) {
70 const struct ipv6hdr *iph = ipv6_hdr(skb);
71
72 rt_info->daddr = iph->daddr;
73 rt_info->saddr = iph->saddr;
74 rt_info->mark = skb->mark;
75 }
76}
77
78static int nf_ip6_reroute(struct sk_buff *skb,
79 const struct nf_queue_entry *entry)
80{
81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
82
83 if (entry->hook == NF_INET_LOCAL_OUT) {
84 const struct ipv6hdr *iph = ipv6_hdr(skb);
85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
87 skb->mark != rt_info->mark)
88 return ip6_route_me_harder(skb);
89 }
90 return 0;
91}
92
93static int nf_ip6_route(struct net *net, struct dst_entry **dst,
94 struct flowi *fl, bool strict)
95{
96 static const struct ipv6_pinfo fake_pinfo;
97 static const struct inet_sock fake_sk = {
98 /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
99 .sk.sk_bound_dev_if = 1,
100 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
101 };
102 const void *sk = strict ? &fake_sk : NULL;
103
104 *dst = ip6_route_output(net, sk, &fl->u.ip6);
105 return (*dst)->error;
106}
107
108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
109 unsigned int dataoff, u_int8_t protocol)
110{
111 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
112 __sum16 csum = 0;
113
114 switch (skb->ip_summed) {
115 case CHECKSUM_COMPLETE:
116 if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
117 break;
118 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
119 skb->len - dataoff, protocol,
120 csum_sub(skb->csum,
121 skb_checksum(skb, 0,
122 dataoff, 0)))) {
123 skb->ip_summed = CHECKSUM_UNNECESSARY;
124 break;
125 }
126 /* fall through */
127 case CHECKSUM_NONE:
128 skb->csum = ~csum_unfold(
129 csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
130 skb->len - dataoff,
131 protocol,
132 csum_sub(0,
133 skb_checksum(skb, 0,
134 dataoff, 0))));
135 csum = __skb_checksum_complete(skb);
136 }
137 return csum;
138}
139EXPORT_SYMBOL(nf_ip6_checksum);
140
141static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
142 unsigned int dataoff, unsigned int len,
143 u_int8_t protocol)
144{
145 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
146 __wsum hsum;
147 __sum16 csum = 0;
148
149 switch (skb->ip_summed) {
150 case CHECKSUM_COMPLETE:
151 if (len == skb->len - dataoff)
152 return nf_ip6_checksum(skb, hook, dataoff, protocol);
153 /* fall through */
154 case CHECKSUM_NONE:
155 hsum = skb_checksum(skb, 0, dataoff, 0);
156 skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
157 &ip6h->daddr,
158 skb->len - dataoff,
159 protocol,
160 csum_sub(0, hsum)));
161 skb->ip_summed = CHECKSUM_NONE;
162 return __skb_checksum_complete_head(skb, dataoff + len);
163 }
164 return csum;
165};
166
167static const struct nf_afinfo nf_ip6_afinfo = {
168 .family = AF_INET6,
169 .checksum = nf_ip6_checksum,
170 .checksum_partial = nf_ip6_checksum_partial,
171 .route = nf_ip6_route,
172 .saveroute = nf_ip6_saveroute,
173 .reroute = nf_ip6_reroute,
174 .route_key_size = sizeof(struct ip6_rt_info),
175};
176
177int __init ipv6_netfilter_init(void)
178{
179 return nf_register_afinfo(&nf_ip6_afinfo);
180}
181
182/* This can be called from inet6_init() on errors, so it cannot
183 * be marked __exit. -DaveM
184 */
185void ipv6_netfilter_fini(void)
186{
187 nf_unregister_afinfo(&nf_ip6_afinfo);
188}
1/*
2 * IPv6 specific functions of netfilter core
3 *
4 * Rusty Russell (C) 2000 -- This code is GPL.
5 * Patrick McHardy (C) 2006-2012
6 */
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/ipv6.h>
10#include <linux/netfilter.h>
11#include <linux/netfilter_ipv6.h>
12#include <linux/export.h>
13#include <net/addrconf.h>
14#include <net/dst.h>
15#include <net/ipv6.h>
16#include <net/ip6_route.h>
17#include <net/xfrm.h>
18#include <net/netfilter/nf_queue.h>
19#include <net/netfilter/nf_conntrack_bridge.h>
20#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
21#include "../bridge/br_private.h"
22
23int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
24{
25 const struct ipv6hdr *iph = ipv6_hdr(skb);
26 struct sock *sk = sk_to_full_sk(skb->sk);
27 unsigned int hh_len;
28 struct dst_entry *dst;
29 int strict = (ipv6_addr_type(&iph->daddr) &
30 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
31 struct flowi6 fl6 = {
32 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
33 strict ? skb_dst(skb)->dev->ifindex : 0,
34 .flowi6_mark = skb->mark,
35 .flowi6_uid = sock_net_uid(net, sk),
36 .daddr = iph->daddr,
37 .saddr = iph->saddr,
38 };
39 int err;
40
41 dst = ip6_route_output(net, sk, &fl6);
42 err = dst->error;
43 if (err) {
44 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
45 net_dbg_ratelimited("ip6_route_me_harder: No more route\n");
46 dst_release(dst);
47 return err;
48 }
49
50 /* Drop old route. */
51 skb_dst_drop(skb);
52
53 skb_dst_set(skb, dst);
54
55#ifdef CONFIG_XFRM
56 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
57 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
58 skb_dst_set(skb, NULL);
59 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
60 if (IS_ERR(dst))
61 return PTR_ERR(dst);
62 skb_dst_set(skb, dst);
63 }
64#endif
65
66 /* Change in oif may mean change in hh_len. */
67 hh_len = skb_dst(skb)->dev->hard_header_len;
68 if (skb_headroom(skb) < hh_len &&
69 pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
70 0, GFP_ATOMIC))
71 return -ENOMEM;
72
73 return 0;
74}
75EXPORT_SYMBOL(ip6_route_me_harder);
76
77static int nf_ip6_reroute(struct sk_buff *skb,
78 const struct nf_queue_entry *entry)
79{
80 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
81
82 if (entry->state.hook == NF_INET_LOCAL_OUT) {
83 const struct ipv6hdr *iph = ipv6_hdr(skb);
84 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
85 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
86 skb->mark != rt_info->mark)
87 return ip6_route_me_harder(entry->state.net, skb);
88 }
89 return 0;
90}
91
92int __nf_ip6_route(struct net *net, struct dst_entry **dst,
93 struct flowi *fl, bool strict)
94{
95 static const struct ipv6_pinfo fake_pinfo;
96 static const struct inet_sock fake_sk = {
97 /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
98 .sk.sk_bound_dev_if = 1,
99 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
100 };
101 const void *sk = strict ? &fake_sk : NULL;
102 struct dst_entry *result;
103 int err;
104
105 result = ip6_route_output(net, sk, &fl->u.ip6);
106 err = result->error;
107 if (err)
108 dst_release(result);
109 else
110 *dst = result;
111 return err;
112}
113EXPORT_SYMBOL_GPL(__nf_ip6_route);
114
115int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
116 struct nf_bridge_frag_data *data,
117 int (*output)(struct net *, struct sock *sk,
118 const struct nf_bridge_frag_data *data,
119 struct sk_buff *))
120{
121 int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
122 ktime_t tstamp = skb->tstamp;
123 struct ip6_frag_state state;
124 u8 *prevhdr, nexthdr = 0;
125 unsigned int mtu, hlen;
126 int hroom, err = 0;
127 __be32 frag_id;
128
129 err = ip6_find_1stfragopt(skb, &prevhdr);
130 if (err < 0)
131 goto blackhole;
132 hlen = err;
133 nexthdr = *prevhdr;
134
135 mtu = skb->dev->mtu;
136 if (frag_max_size > mtu ||
137 frag_max_size < IPV6_MIN_MTU)
138 goto blackhole;
139
140 mtu = frag_max_size;
141 if (mtu < hlen + sizeof(struct frag_hdr) + 8)
142 goto blackhole;
143 mtu -= hlen + sizeof(struct frag_hdr);
144
145 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
146 &ipv6_hdr(skb)->saddr);
147
148 if (skb->ip_summed == CHECKSUM_PARTIAL &&
149 (err = skb_checksum_help(skb)))
150 goto blackhole;
151
152 hroom = LL_RESERVED_SPACE(skb->dev);
153 if (skb_has_frag_list(skb)) {
154 unsigned int first_len = skb_pagelen(skb);
155 struct ip6_fraglist_iter iter;
156 struct sk_buff *frag2;
157
158 if (first_len - hlen > mtu ||
159 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
160 goto blackhole;
161
162 if (skb_cloned(skb))
163 goto slow_path;
164
165 skb_walk_frags(skb, frag2) {
166 if (frag2->len > mtu ||
167 skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
168 goto blackhole;
169
170 /* Partially cloned skb? */
171 if (skb_shared(frag2))
172 goto slow_path;
173 }
174
175 err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
176 &iter);
177 if (err < 0)
178 goto blackhole;
179
180 for (;;) {
181 /* Prepare header of the next frame,
182 * before previous one went down.
183 */
184 if (iter.frag)
185 ip6_fraglist_prepare(skb, &iter);
186
187 skb->tstamp = tstamp;
188 err = output(net, sk, data, skb);
189 if (err || !iter.frag)
190 break;
191
192 skb = ip6_fraglist_next(&iter);
193 }
194
195 kfree(iter.tmp_hdr);
196 if (!err)
197 return 0;
198
199 kfree_skb_list(iter.frag);
200 return err;
201 }
202slow_path:
203 /* This is a linearized skbuff, the original geometry is lost for us.
204 * This may also be a clone skbuff, we could preserve the geometry for
205 * the copies but probably not worth the effort.
206 */
207 ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom,
208 LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id,
209 &state);
210
211 while (state.left > 0) {
212 struct sk_buff *skb2;
213
214 skb2 = ip6_frag_next(skb, &state);
215 if (IS_ERR(skb2)) {
216 err = PTR_ERR(skb2);
217 goto blackhole;
218 }
219
220 skb2->tstamp = tstamp;
221 err = output(net, sk, data, skb2);
222 if (err)
223 goto blackhole;
224 }
225 consume_skb(skb);
226 return err;
227
228blackhole:
229 kfree_skb(skb);
230 return 0;
231}
232EXPORT_SYMBOL_GPL(br_ip6_fragment);
233
234static const struct nf_ipv6_ops ipv6ops = {
235#if IS_MODULE(CONFIG_IPV6)
236 .chk_addr = ipv6_chk_addr,
237 .route_me_harder = ip6_route_me_harder,
238 .dev_get_saddr = ipv6_dev_get_saddr,
239 .route = __nf_ip6_route,
240#if IS_ENABLED(CONFIG_SYN_COOKIES)
241 .cookie_init_sequence = __cookie_v6_init_sequence,
242 .cookie_v6_check = __cookie_v6_check,
243#endif
244#endif
245 .route_input = ip6_route_input,
246 .fragment = ip6_fragment,
247 .reroute = nf_ip6_reroute,
248#if IS_MODULE(CONFIG_IPV6)
249 .br_fragment = br_ip6_fragment,
250#endif
251};
252
253int __init ipv6_netfilter_init(void)
254{
255 RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
256 return 0;
257}
258
259/* This can be called from inet6_init() on errors, so it cannot
260 * be marked __exit. -DaveM
261 */
262void ipv6_netfilter_fini(void)
263{
264 RCU_INIT_POINTER(nf_ipv6_ops, NULL);
265}