Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * IPv6 specific functions of netfilter core
  3 *
  4 * Rusty Russell (C) 2000 -- This code is GPL.
  5 * Patrick McHardy (C) 2006-2012
  6 */
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/ipv6.h>
 10#include <linux/netfilter.h>
 11#include <linux/netfilter_ipv6.h>
 12#include <linux/export.h>
 13#include <net/addrconf.h>
 14#include <net/dst.h>
 15#include <net/ipv6.h>
 16#include <net/ip6_route.h>
 17#include <net/xfrm.h>
 18#include <net/netfilter/nf_queue.h>
 19#include <net/netfilter/nf_conntrack_bridge.h>
 20#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 21#include "../bridge/br_private.h"
 22
 23int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb)
 24{
 25	const struct ipv6hdr *iph = ipv6_hdr(skb);
 26	struct sock *sk = sk_to_full_sk(sk_partial);
 27	struct net_device *dev = skb_dst(skb)->dev;
 28	struct flow_keys flkeys;
 29	unsigned int hh_len;
 30	struct dst_entry *dst;
 31	int strict = (ipv6_addr_type(&iph->daddr) &
 32		      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
 33	struct flowi6 fl6 = {
 34		.flowi6_l3mdev = l3mdev_master_ifindex(dev),
 
 35		.flowi6_mark = skb->mark,
 36		.flowi6_uid = sock_net_uid(net, sk),
 37		.daddr = iph->daddr,
 38		.saddr = iph->saddr,
 39	};
 40	int err;
 41
 42	if (sk && sk->sk_bound_dev_if)
 43		fl6.flowi6_oif = sk->sk_bound_dev_if;
 44	else if (strict)
 45		fl6.flowi6_oif = dev->ifindex;
 46
 47	fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
 48	dst = ip6_route_output(net, sk, &fl6);
 49	err = dst->error;
 50	if (err) {
 51		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 52		net_dbg_ratelimited("ip6_route_me_harder: No more route\n");
 53		dst_release(dst);
 54		return err;
 55	}
 56
 57	/* Drop old route. */
 58	skb_dst_drop(skb);
 59
 60	skb_dst_set(skb, dst);
 61
 62#ifdef CONFIG_XFRM
 63	if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
 64	    xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
 65		skb_dst_set(skb, NULL);
 66		dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
 67		if (IS_ERR(dst))
 68			return PTR_ERR(dst);
 69		skb_dst_set(skb, dst);
 70	}
 71#endif
 72
 73	/* Change in oif may mean change in hh_len. */
 74	hh_len = skb_dst(skb)->dev->hard_header_len;
 75	if (skb_headroom(skb) < hh_len &&
 76	    pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
 77			     0, GFP_ATOMIC))
 78		return -ENOMEM;
 79
 80	return 0;
 81}
 82EXPORT_SYMBOL(ip6_route_me_harder);
 83
 84static int nf_ip6_reroute(struct sk_buff *skb,
 85			  const struct nf_queue_entry *entry)
 86{
 87	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
 88
 89	if (entry->state.hook == NF_INET_LOCAL_OUT) {
 90		const struct ipv6hdr *iph = ipv6_hdr(skb);
 91		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
 92		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
 93		    skb->mark != rt_info->mark)
 94			return ip6_route_me_harder(entry->state.net, entry->state.sk, skb);
 95	}
 96	return 0;
 97}
 98
 99int __nf_ip6_route(struct net *net, struct dst_entry **dst,
100		   struct flowi *fl, bool strict)
101{
102	static const struct ipv6_pinfo fake_pinfo;
103	static const struct inet_sock fake_sk = {
104		/* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
105		.sk.sk_bound_dev_if = 1,
106		.pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
107	};
108	const void *sk = strict ? &fake_sk : NULL;
109	struct dst_entry *result;
110	int err;
111
112	result = ip6_route_output(net, sk, &fl->u.ip6);
113	err = result->error;
114	if (err)
115		dst_release(result);
116	else
117		*dst = result;
118	return err;
119}
120EXPORT_SYMBOL_GPL(__nf_ip6_route);
121
122int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
123		    struct nf_bridge_frag_data *data,
124		    int (*output)(struct net *, struct sock *sk,
125				  const struct nf_bridge_frag_data *data,
126				  struct sk_buff *))
127{
128	int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
129	bool mono_delivery_time = skb->mono_delivery_time;
130	ktime_t tstamp = skb->tstamp;
131	struct ip6_frag_state state;
132	u8 *prevhdr, nexthdr = 0;
133	unsigned int mtu, hlen;
134	int hroom, err = 0;
135	__be32 frag_id;
136
137	err = ip6_find_1stfragopt(skb, &prevhdr);
138	if (err < 0)
139		goto blackhole;
140	hlen = err;
141	nexthdr = *prevhdr;
142
143	mtu = skb->dev->mtu;
144	if (frag_max_size > mtu ||
145	    frag_max_size < IPV6_MIN_MTU)
146		goto blackhole;
147
148	mtu = frag_max_size;
149	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
150		goto blackhole;
151	mtu -= hlen + sizeof(struct frag_hdr);
152
153	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
154				    &ipv6_hdr(skb)->saddr);
155
156	if (skb->ip_summed == CHECKSUM_PARTIAL &&
157	    (err = skb_checksum_help(skb)))
158		goto blackhole;
159
160	hroom = LL_RESERVED_SPACE(skb->dev);
161	if (skb_has_frag_list(skb)) {
162		unsigned int first_len = skb_pagelen(skb);
163		struct ip6_fraglist_iter iter;
164		struct sk_buff *frag2;
165
166		if (first_len - hlen > mtu ||
167		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
168			goto blackhole;
169
170		if (skb_cloned(skb))
171			goto slow_path;
172
173		skb_walk_frags(skb, frag2) {
174			if (frag2->len > mtu ||
175			    skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
176				goto blackhole;
177
178			/* Partially cloned skb? */
179			if (skb_shared(frag2))
180				goto slow_path;
181		}
182
183		err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
184					&iter);
185		if (err < 0)
186			goto blackhole;
187
188		for (;;) {
189			/* Prepare header of the next frame,
190			 * before previous one went down.
191			 */
192			if (iter.frag)
193				ip6_fraglist_prepare(skb, &iter);
194
195			skb_set_delivery_time(skb, tstamp, mono_delivery_time);
196			err = output(net, sk, data, skb);
197			if (err || !iter.frag)
198				break;
199
200			skb = ip6_fraglist_next(&iter);
201		}
202
203		kfree(iter.tmp_hdr);
204		if (!err)
205			return 0;
206
207		kfree_skb_list(iter.frag);
208		return err;
209	}
210slow_path:
211	/* This is a linearized skbuff, the original geometry is lost for us.
212	 * This may also be a clone skbuff, we could preserve the geometry for
213	 * the copies but probably not worth the effort.
214	 */
215	ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom,
216		      LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id,
217		      &state);
218
219	while (state.left > 0) {
220		struct sk_buff *skb2;
221
222		skb2 = ip6_frag_next(skb, &state);
223		if (IS_ERR(skb2)) {
224			err = PTR_ERR(skb2);
225			goto blackhole;
226		}
227
228		skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
229		err = output(net, sk, data, skb2);
230		if (err)
231			goto blackhole;
232	}
233	consume_skb(skb);
234	return err;
235
236blackhole:
237	kfree_skb(skb);
238	return 0;
239}
240EXPORT_SYMBOL_GPL(br_ip6_fragment);
241
242static const struct nf_ipv6_ops ipv6ops = {
243#if IS_MODULE(CONFIG_IPV6)
244	.chk_addr		= ipv6_chk_addr,
245	.route_me_harder	= ip6_route_me_harder,
246	.dev_get_saddr		= ipv6_dev_get_saddr,
247	.route			= __nf_ip6_route,
248#if IS_ENABLED(CONFIG_SYN_COOKIES)
249	.cookie_init_sequence	= __cookie_v6_init_sequence,
250	.cookie_v6_check	= __cookie_v6_check,
251#endif
252#endif
253	.route_input		= ip6_route_input,
254	.fragment		= ip6_fragment,
255	.reroute		= nf_ip6_reroute,
256#if IS_MODULE(CONFIG_IPV6)
257	.br_fragment		= br_ip6_fragment,
258#endif
259};
260
261int __init ipv6_netfilter_init(void)
262{
263	RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
264	return 0;
265}
266
267/* This can be called from inet6_init() on errors, so it cannot
268 * be marked __exit. -DaveM
269 */
270void ipv6_netfilter_fini(void)
271{
272	RCU_INIT_POINTER(nf_ipv6_ops, NULL);
273}
v5.14.15
  1/*
  2 * IPv6 specific functions of netfilter core
  3 *
  4 * Rusty Russell (C) 2000 -- This code is GPL.
  5 * Patrick McHardy (C) 2006-2012
  6 */
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/ipv6.h>
 10#include <linux/netfilter.h>
 11#include <linux/netfilter_ipv6.h>
 12#include <linux/export.h>
 13#include <net/addrconf.h>
 14#include <net/dst.h>
 15#include <net/ipv6.h>
 16#include <net/ip6_route.h>
 17#include <net/xfrm.h>
 18#include <net/netfilter/nf_queue.h>
 19#include <net/netfilter/nf_conntrack_bridge.h>
 20#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 21#include "../bridge/br_private.h"
 22
 23int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb)
 24{
 25	const struct ipv6hdr *iph = ipv6_hdr(skb);
 26	struct sock *sk = sk_to_full_sk(sk_partial);
 
 27	struct flow_keys flkeys;
 28	unsigned int hh_len;
 29	struct dst_entry *dst;
 30	int strict = (ipv6_addr_type(&iph->daddr) &
 31		      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
 32	struct flowi6 fl6 = {
 33		.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
 34			strict ? skb_dst(skb)->dev->ifindex : 0,
 35		.flowi6_mark = skb->mark,
 36		.flowi6_uid = sock_net_uid(net, sk),
 37		.daddr = iph->daddr,
 38		.saddr = iph->saddr,
 39	};
 40	int err;
 41
 
 
 
 
 
 42	fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
 43	dst = ip6_route_output(net, sk, &fl6);
 44	err = dst->error;
 45	if (err) {
 46		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 47		net_dbg_ratelimited("ip6_route_me_harder: No more route\n");
 48		dst_release(dst);
 49		return err;
 50	}
 51
 52	/* Drop old route. */
 53	skb_dst_drop(skb);
 54
 55	skb_dst_set(skb, dst);
 56
 57#ifdef CONFIG_XFRM
 58	if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
 59	    xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
 60		skb_dst_set(skb, NULL);
 61		dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
 62		if (IS_ERR(dst))
 63			return PTR_ERR(dst);
 64		skb_dst_set(skb, dst);
 65	}
 66#endif
 67
 68	/* Change in oif may mean change in hh_len. */
 69	hh_len = skb_dst(skb)->dev->hard_header_len;
 70	if (skb_headroom(skb) < hh_len &&
 71	    pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
 72			     0, GFP_ATOMIC))
 73		return -ENOMEM;
 74
 75	return 0;
 76}
 77EXPORT_SYMBOL(ip6_route_me_harder);
 78
 79static int nf_ip6_reroute(struct sk_buff *skb,
 80			  const struct nf_queue_entry *entry)
 81{
 82	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
 83
 84	if (entry->state.hook == NF_INET_LOCAL_OUT) {
 85		const struct ipv6hdr *iph = ipv6_hdr(skb);
 86		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
 87		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
 88		    skb->mark != rt_info->mark)
 89			return ip6_route_me_harder(entry->state.net, entry->state.sk, skb);
 90	}
 91	return 0;
 92}
 93
 94int __nf_ip6_route(struct net *net, struct dst_entry **dst,
 95		   struct flowi *fl, bool strict)
 96{
 97	static const struct ipv6_pinfo fake_pinfo;
 98	static const struct inet_sock fake_sk = {
 99		/* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
100		.sk.sk_bound_dev_if = 1,
101		.pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
102	};
103	const void *sk = strict ? &fake_sk : NULL;
104	struct dst_entry *result;
105	int err;
106
107	result = ip6_route_output(net, sk, &fl->u.ip6);
108	err = result->error;
109	if (err)
110		dst_release(result);
111	else
112		*dst = result;
113	return err;
114}
115EXPORT_SYMBOL_GPL(__nf_ip6_route);
116
117int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
118		    struct nf_bridge_frag_data *data,
119		    int (*output)(struct net *, struct sock *sk,
120				  const struct nf_bridge_frag_data *data,
121				  struct sk_buff *))
122{
123	int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
 
124	ktime_t tstamp = skb->tstamp;
125	struct ip6_frag_state state;
126	u8 *prevhdr, nexthdr = 0;
127	unsigned int mtu, hlen;
128	int hroom, err = 0;
129	__be32 frag_id;
130
131	err = ip6_find_1stfragopt(skb, &prevhdr);
132	if (err < 0)
133		goto blackhole;
134	hlen = err;
135	nexthdr = *prevhdr;
136
137	mtu = skb->dev->mtu;
138	if (frag_max_size > mtu ||
139	    frag_max_size < IPV6_MIN_MTU)
140		goto blackhole;
141
142	mtu = frag_max_size;
143	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
144		goto blackhole;
145	mtu -= hlen + sizeof(struct frag_hdr);
146
147	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
148				    &ipv6_hdr(skb)->saddr);
149
150	if (skb->ip_summed == CHECKSUM_PARTIAL &&
151	    (err = skb_checksum_help(skb)))
152		goto blackhole;
153
154	hroom = LL_RESERVED_SPACE(skb->dev);
155	if (skb_has_frag_list(skb)) {
156		unsigned int first_len = skb_pagelen(skb);
157		struct ip6_fraglist_iter iter;
158		struct sk_buff *frag2;
159
160		if (first_len - hlen > mtu ||
161		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
162			goto blackhole;
163
164		if (skb_cloned(skb))
165			goto slow_path;
166
167		skb_walk_frags(skb, frag2) {
168			if (frag2->len > mtu ||
169			    skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
170				goto blackhole;
171
172			/* Partially cloned skb? */
173			if (skb_shared(frag2))
174				goto slow_path;
175		}
176
177		err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
178					&iter);
179		if (err < 0)
180			goto blackhole;
181
182		for (;;) {
183			/* Prepare header of the next frame,
184			 * before previous one went down.
185			 */
186			if (iter.frag)
187				ip6_fraglist_prepare(skb, &iter);
188
189			skb->tstamp = tstamp;
190			err = output(net, sk, data, skb);
191			if (err || !iter.frag)
192				break;
193
194			skb = ip6_fraglist_next(&iter);
195		}
196
197		kfree(iter.tmp_hdr);
198		if (!err)
199			return 0;
200
201		kfree_skb_list(iter.frag);
202		return err;
203	}
204slow_path:
205	/* This is a linearized skbuff, the original geometry is lost for us.
206	 * This may also be a clone skbuff, we could preserve the geometry for
207	 * the copies but probably not worth the effort.
208	 */
209	ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom,
210		      LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id,
211		      &state);
212
213	while (state.left > 0) {
214		struct sk_buff *skb2;
215
216		skb2 = ip6_frag_next(skb, &state);
217		if (IS_ERR(skb2)) {
218			err = PTR_ERR(skb2);
219			goto blackhole;
220		}
221
222		skb2->tstamp = tstamp;
223		err = output(net, sk, data, skb2);
224		if (err)
225			goto blackhole;
226	}
227	consume_skb(skb);
228	return err;
229
230blackhole:
231	kfree_skb(skb);
232	return 0;
233}
234EXPORT_SYMBOL_GPL(br_ip6_fragment);
235
236static const struct nf_ipv6_ops ipv6ops = {
237#if IS_MODULE(CONFIG_IPV6)
238	.chk_addr		= ipv6_chk_addr,
239	.route_me_harder	= ip6_route_me_harder,
240	.dev_get_saddr		= ipv6_dev_get_saddr,
241	.route			= __nf_ip6_route,
242#if IS_ENABLED(CONFIG_SYN_COOKIES)
243	.cookie_init_sequence	= __cookie_v6_init_sequence,
244	.cookie_v6_check	= __cookie_v6_check,
245#endif
246#endif
247	.route_input		= ip6_route_input,
248	.fragment		= ip6_fragment,
249	.reroute		= nf_ip6_reroute,
250#if IS_MODULE(CONFIG_IPV6)
251	.br_fragment		= br_ip6_fragment,
252#endif
253};
254
255int __init ipv6_netfilter_init(void)
256{
257	RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
258	return 0;
259}
260
261/* This can be called from inet6_init() on errors, so it cannot
262 * be marked __exit. -DaveM
263 */
264void ipv6_netfilter_fini(void)
265{
266	RCU_INIT_POINTER(nf_ipv6_ops, NULL);
267}