Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Operations on the network namespace
  4 */
  5#ifndef __NET_NET_NAMESPACE_H
  6#define __NET_NET_NAMESPACE_H
  7
  8#include <linux/atomic.h>
  9#include <linux/refcount.h>
 10#include <linux/workqueue.h>
 11#include <linux/list.h>
 12#include <linux/sysctl.h>
 13#include <linux/uidgid.h>
 14
 15#include <net/flow.h>
 16#include <net/netns/core.h>
 17#include <net/netns/mib.h>
 18#include <net/netns/unix.h>
 19#include <net/netns/packet.h>
 20#include <net/netns/ipv4.h>
 21#include <net/netns/ipv6.h>
 22#include <net/netns/nexthop.h>
 23#include <net/netns/ieee802154_6lowpan.h>
 24#include <net/netns/sctp.h>
 25#include <net/netns/dccp.h>
 26#include <net/netns/netfilter.h>
 27#include <net/netns/x_tables.h>
 28#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 29#include <net/netns/conntrack.h>
 30#endif
 31#include <net/netns/nftables.h>
 32#include <net/netns/xfrm.h>
 33#include <net/netns/mpls.h>
 34#include <net/netns/can.h>
 35#include <net/netns/xdp.h>
 
 36#include <linux/ns_common.h>
 37#include <linux/idr.h>
 38#include <linux/skbuff.h>
 
 39
 40struct user_namespace;
 41struct proc_dir_entry;
 42struct net_device;
 43struct sock;
 44struct ctl_table_header;
 45struct net_generic;
 46struct uevent_sock;
 47struct netns_ipvs;
 48struct bpf_prog;
 49
 50
 51#define NETDEV_HASHBITS    8
 52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 53
 54struct net {
 55	/* First cache line can be often dirtied.
 56	 * Do not place here read-mostly fields.
 57	 */
 58	refcount_t		passive;	/* To decide when the network
 59						 * namespace should be freed.
 60						 */
 61	refcount_t		count;		/* To decided when the network
 62						 *  namespace should be shut down.
 63						 */
 64	spinlock_t		rules_mod_lock;
 65
 66	unsigned int		dev_unreg_count;
 67
 68	unsigned int		dev_base_seq;	/* protected by rtnl_mutex */
 69	int			ifindex;
 70
 71	spinlock_t		nsid_lock;
 72	atomic_t		fnhe_genid;
 73
 74	struct list_head	list;		/* list of network namespaces */
 75	struct list_head	exit_list;	/* To linked to call pernet exit
 76						 * methods on dead net (
 77						 * pernet_ops_rwsem read locked),
 78						 * or to unregister pernet ops
 79						 * (pernet_ops_rwsem write locked).
 80						 */
 81	struct llist_node	cleanup_list;	/* namespaces on death row */
 82
 83#ifdef CONFIG_KEYS
 84	struct key_tag		*key_domain;	/* Key domain of operation tag */
 85#endif
 86	struct user_namespace   *user_ns;	/* Owning user namespace */
 87	struct ucounts		*ucounts;
 88	struct idr		netns_ids;
 89
 90	struct ns_common	ns;
 91
 92	struct list_head 	dev_base_head;
 93	struct proc_dir_entry 	*proc_net;
 94	struct proc_dir_entry 	*proc_net_stat;
 95
 96#ifdef CONFIG_SYSCTL
 97	struct ctl_table_set	sysctls;
 98#endif
 99
100	struct sock 		*rtnl;			/* rtnetlink socket */
101	struct sock		*genl_sock;
102
103	struct uevent_sock	*uevent_sock;		/* uevent socket */
104
105	struct hlist_head 	*dev_name_head;
106	struct hlist_head	*dev_index_head;
 
 
107	/* Note that @hash_mix can be read millions times per second,
108	 * it is critical that it is on a read_mostly cache line.
109	 */
110	u32			hash_mix;
111
112	struct net_device       *loopback_dev;          /* The loopback */
113
114	/* core fib_rules */
115	struct list_head	rules_ops;
116
117	struct netns_core	core;
118	struct netns_mib	mib;
119	struct netns_packet	packet;
120	struct netns_unix	unx;
121	struct netns_nexthop	nexthop;
122	struct netns_ipv4	ipv4;
123#if IS_ENABLED(CONFIG_IPV6)
124	struct netns_ipv6	ipv6;
125#endif
126#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
127	struct netns_ieee802154_lowpan	ieee802154_lowpan;
128#endif
129#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
130	struct netns_sctp	sctp;
131#endif
132#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
133	struct netns_dccp	dccp;
134#endif
135#ifdef CONFIG_NETFILTER
136	struct netns_nf		nf;
137	struct netns_xt		xt;
138#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
139	struct netns_ct		ct;
140#endif
141#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
142	struct netns_nftables	nft;
143#endif
144#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
145	struct netns_nf_frag	nf_frag;
146	struct ctl_table_header *nf_frag_frags_hdr;
147#endif
148	struct sock		*nfnl;
149	struct sock		*nfnl_stash;
150#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
151	struct list_head        nfnl_acct_list;
152#endif
153#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
154	struct list_head	nfct_timeout_list;
155#endif
156#endif
157#ifdef CONFIG_WEXT_CORE
158	struct sk_buff_head	wext_nlevents;
159#endif
160	struct net_generic __rcu	*gen;
161
162	struct bpf_prog __rcu	*flow_dissector_prog;
 
163
164	/* Note : following structs are cache line aligned */
165#ifdef CONFIG_XFRM
166	struct netns_xfrm	xfrm;
167#endif
 
 
 
168#if IS_ENABLED(CONFIG_IP_VS)
169	struct netns_ipvs	*ipvs;
170#endif
171#if IS_ENABLED(CONFIG_MPLS)
172	struct netns_mpls	mpls;
173#endif
174#if IS_ENABLED(CONFIG_CAN)
175	struct netns_can	can;
176#endif
177#ifdef CONFIG_XDP_SOCKETS
178	struct netns_xdp	xdp;
179#endif
180#if IS_ENABLED(CONFIG_CRYPTO_USER)
181	struct sock		*crypto_nlsk;
182#endif
183	struct sock		*diag_nlsk;
184} __randomize_layout;
185
186#include <linux/seq_file_net.h>
187
188/* Init's network namespace */
189extern struct net init_net;
190
191#ifdef CONFIG_NET_NS
192struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
193			struct net *old_net);
194
195void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
196
197void net_ns_barrier(void);
198#else /* CONFIG_NET_NS */
199#include <linux/sched.h>
200#include <linux/nsproxy.h>
201static inline struct net *copy_net_ns(unsigned long flags,
202	struct user_namespace *user_ns, struct net *old_net)
203{
204	if (flags & CLONE_NEWNET)
205		return ERR_PTR(-EINVAL);
206	return old_net;
207}
208
209static inline void net_ns_get_ownership(const struct net *net,
210					kuid_t *uid, kgid_t *gid)
211{
212	*uid = GLOBAL_ROOT_UID;
213	*gid = GLOBAL_ROOT_GID;
214}
215
216static inline void net_ns_barrier(void) {}
217#endif /* CONFIG_NET_NS */
218
219
220extern struct list_head net_namespace_list;
221
222struct net *get_net_ns_by_pid(pid_t pid);
223struct net *get_net_ns_by_fd(int fd);
224
 
 
225#ifdef CONFIG_SYSCTL
226void ipx_register_sysctl(void);
227void ipx_unregister_sysctl(void);
228#else
229#define ipx_register_sysctl()
230#define ipx_unregister_sysctl()
231#endif
232
233#ifdef CONFIG_NET_NS
234void __put_net(struct net *net);
235
236static inline struct net *get_net(struct net *net)
237{
238	refcount_inc(&net->count);
239	return net;
240}
241
242static inline struct net *maybe_get_net(struct net *net)
243{
244	/* Used when we know struct net exists but we
245	 * aren't guaranteed a previous reference count
246	 * exists.  If the reference count is zero this
247	 * function fails and returns NULL.
248	 */
249	if (!refcount_inc_not_zero(&net->count))
250		net = NULL;
251	return net;
252}
253
254static inline void put_net(struct net *net)
255{
256	if (refcount_dec_and_test(&net->count))
257		__put_net(net);
258}
259
260static inline
261int net_eq(const struct net *net1, const struct net *net2)
262{
263	return net1 == net2;
264}
265
266static inline int check_net(const struct net *net)
267{
268	return refcount_read(&net->count) != 0;
269}
270
271void net_drop_ns(void *);
272
273#else
274
275static inline struct net *get_net(struct net *net)
276{
277	return net;
278}
279
280static inline void put_net(struct net *net)
281{
282}
283
284static inline struct net *maybe_get_net(struct net *net)
285{
286	return net;
287}
288
289static inline
290int net_eq(const struct net *net1, const struct net *net2)
291{
292	return 1;
293}
294
295static inline int check_net(const struct net *net)
296{
297	return 1;
298}
299
300#define net_drop_ns NULL
301#endif
302
303
304typedef struct {
305#ifdef CONFIG_NET_NS
306	struct net *net;
307#endif
308} possible_net_t;
309
310static inline void write_pnet(possible_net_t *pnet, struct net *net)
311{
312#ifdef CONFIG_NET_NS
313	pnet->net = net;
314#endif
315}
316
317static inline struct net *read_pnet(const possible_net_t *pnet)
318{
319#ifdef CONFIG_NET_NS
320	return pnet->net;
321#else
322	return &init_net;
323#endif
324}
325
326/* Protected by net_rwsem */
327#define for_each_net(VAR)				\
328	list_for_each_entry(VAR, &net_namespace_list, list)
329
 
330#define for_each_net_rcu(VAR)				\
331	list_for_each_entry_rcu(VAR, &net_namespace_list, list)
332
333#ifdef CONFIG_NET_NS
334#define __net_init
335#define __net_exit
336#define __net_initdata
337#define __net_initconst
338#else
339#define __net_init	__init
340#define __net_exit	__ref
341#define __net_initdata	__initdata
342#define __net_initconst	__initconst
343#endif
344
345int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
346int peernet2id(struct net *net, struct net *peer);
347bool peernet_has_id(struct net *net, struct net *peer);
348struct net *get_net_ns_by_id(struct net *net, int id);
349
350struct pernet_operations {
351	struct list_head list;
352	/*
353	 * Below methods are called without any exclusive locks.
354	 * More than one net may be constructed and destructed
355	 * in parallel on several cpus. Every pernet_operations
356	 * have to keep in mind all other pernet_operations and
357	 * to introduce a locking, if they share common resources.
358	 *
359	 * The only time they are called with exclusive lock is
360	 * from register_pernet_subsys(), unregister_pernet_subsys()
361	 * register_pernet_device() and unregister_pernet_device().
362	 *
363	 * Exit methods using blocking RCU primitives, such as
364	 * synchronize_rcu(), should be implemented via exit_batch.
365	 * Then, destruction of a group of net requires single
366	 * synchronize_rcu() related to these pernet_operations,
367	 * instead of separate synchronize_rcu() for every net.
368	 * Please, avoid synchronize_rcu() at all, where it's possible.
369	 *
370	 * Note that a combination of pre_exit() and exit() can
371	 * be used, since a synchronize_rcu() is guaranteed between
372	 * the calls.
373	 */
374	int (*init)(struct net *net);
375	void (*pre_exit)(struct net *net);
376	void (*exit)(struct net *net);
377	void (*exit_batch)(struct list_head *net_exit_list);
378	unsigned int *id;
379	size_t size;
380};
381
382/*
383 * Use these carefully.  If you implement a network device and it
384 * needs per network namespace operations use device pernet operations,
385 * otherwise use pernet subsys operations.
386 *
387 * Network interfaces need to be removed from a dying netns _before_
388 * subsys notifiers can be called, as most of the network code cleanup
389 * (which is done from subsys notifiers) runs with the assumption that
390 * dev_remove_pack has been called so no new packets will arrive during
391 * and after the cleanup functions have been called.  dev_remove_pack
392 * is not per namespace so instead the guarantee of no more packets
393 * arriving in a network namespace is provided by ensuring that all
394 * network devices and all sockets have left the network namespace
395 * before the cleanup methods are called.
396 *
397 * For the longest time the ipv4 icmp code was registered as a pernet
398 * device which caused kernel oops, and panics during network
399 * namespace cleanup.   So please don't get this wrong.
400 */
401int register_pernet_subsys(struct pernet_operations *);
402void unregister_pernet_subsys(struct pernet_operations *);
403int register_pernet_device(struct pernet_operations *);
404void unregister_pernet_device(struct pernet_operations *);
405
406struct ctl_table;
407struct ctl_table_header;
408
409#ifdef CONFIG_SYSCTL
410int net_sysctl_init(void);
411struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
412					     struct ctl_table *table);
413void unregister_net_sysctl_table(struct ctl_table_header *header);
414#else
415static inline int net_sysctl_init(void) { return 0; }
416static inline struct ctl_table_header *register_net_sysctl(struct net *net,
417	const char *path, struct ctl_table *table)
418{
419	return NULL;
420}
421static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
422{
423}
424#endif
425
426static inline int rt_genid_ipv4(struct net *net)
427{
428	return atomic_read(&net->ipv4.rt_genid);
429}
430
 
 
 
 
 
 
 
431static inline void rt_genid_bump_ipv4(struct net *net)
432{
433	atomic_inc(&net->ipv4.rt_genid);
434}
435
436extern void (*__fib6_flush_trees)(struct net *net);
437static inline void rt_genid_bump_ipv6(struct net *net)
438{
439	if (__fib6_flush_trees)
440		__fib6_flush_trees(net);
441}
442
443#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
444static inline struct netns_ieee802154_lowpan *
445net_ieee802154_lowpan(struct net *net)
446{
447	return &net->ieee802154_lowpan;
448}
449#endif
450
451/* For callers who don't really care about whether it's IPv4 or IPv6 */
452static inline void rt_genid_bump_all(struct net *net)
453{
454	rt_genid_bump_ipv4(net);
455	rt_genid_bump_ipv6(net);
456}
457
458static inline int fnhe_genid(struct net *net)
459{
460	return atomic_read(&net->fnhe_genid);
461}
462
463static inline void fnhe_genid_bump(struct net *net)
464{
465	atomic_inc(&net->fnhe_genid);
466}
467
468#endif /* __NET_NET_NAMESPACE_H */
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Operations on the network namespace
  4 */
  5#ifndef __NET_NET_NAMESPACE_H
  6#define __NET_NET_NAMESPACE_H
  7
  8#include <linux/atomic.h>
  9#include <linux/refcount.h>
 10#include <linux/workqueue.h>
 11#include <linux/list.h>
 12#include <linux/sysctl.h>
 13#include <linux/uidgid.h>
 14
 15#include <net/flow.h>
 16#include <net/netns/core.h>
 17#include <net/netns/mib.h>
 18#include <net/netns/unix.h>
 19#include <net/netns/packet.h>
 20#include <net/netns/ipv4.h>
 21#include <net/netns/ipv6.h>
 22#include <net/netns/nexthop.h>
 23#include <net/netns/ieee802154_6lowpan.h>
 24#include <net/netns/sctp.h>
 25#include <net/netns/dccp.h>
 26#include <net/netns/netfilter.h>
 27#include <net/netns/x_tables.h>
 28#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 29#include <net/netns/conntrack.h>
 30#endif
 31#include <net/netns/nftables.h>
 32#include <net/netns/xfrm.h>
 33#include <net/netns/mpls.h>
 34#include <net/netns/can.h>
 35#include <net/netns/xdp.h>
 36#include <net/netns/bpf.h>
 37#include <linux/ns_common.h>
 38#include <linux/idr.h>
 39#include <linux/skbuff.h>
 40#include <linux/notifier.h>
 41
 42struct user_namespace;
 43struct proc_dir_entry;
 44struct net_device;
 45struct sock;
 46struct ctl_table_header;
 47struct net_generic;
 48struct uevent_sock;
 49struct netns_ipvs;
 50struct bpf_prog;
 51
 52
 53#define NETDEV_HASHBITS    8
 54#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 55
 56struct net {
 57	/* First cache line can be often dirtied.
 58	 * Do not place here read-mostly fields.
 59	 */
 60	refcount_t		passive;	/* To decide when the network
 61						 * namespace should be freed.
 62						 */
 63	refcount_t		count;		/* To decided when the network
 64						 *  namespace should be shut down.
 65						 */
 66	spinlock_t		rules_mod_lock;
 67
 68	unsigned int		dev_unreg_count;
 69
 70	unsigned int		dev_base_seq;	/* protected by rtnl_mutex */
 71	int			ifindex;
 72
 73	spinlock_t		nsid_lock;
 74	atomic_t		fnhe_genid;
 75
 76	struct list_head	list;		/* list of network namespaces */
 77	struct list_head	exit_list;	/* To linked to call pernet exit
 78						 * methods on dead net (
 79						 * pernet_ops_rwsem read locked),
 80						 * or to unregister pernet ops
 81						 * (pernet_ops_rwsem write locked).
 82						 */
 83	struct llist_node	cleanup_list;	/* namespaces on death row */
 84
 85#ifdef CONFIG_KEYS
 86	struct key_tag		*key_domain;	/* Key domain of operation tag */
 87#endif
 88	struct user_namespace   *user_ns;	/* Owning user namespace */
 89	struct ucounts		*ucounts;
 90	struct idr		netns_ids;
 91
 92	struct ns_common	ns;
 93
 94	struct list_head 	dev_base_head;
 95	struct proc_dir_entry 	*proc_net;
 96	struct proc_dir_entry 	*proc_net_stat;
 97
 98#ifdef CONFIG_SYSCTL
 99	struct ctl_table_set	sysctls;
100#endif
101
102	struct sock 		*rtnl;			/* rtnetlink socket */
103	struct sock		*genl_sock;
104
105	struct uevent_sock	*uevent_sock;		/* uevent socket */
106
107	struct hlist_head 	*dev_name_head;
108	struct hlist_head	*dev_index_head;
109	struct raw_notifier_head	netdev_chain;
110
111	/* Note that @hash_mix can be read millions times per second,
112	 * it is critical that it is on a read_mostly cache line.
113	 */
114	u32			hash_mix;
115
116	struct net_device       *loopback_dev;          /* The loopback */
117
118	/* core fib_rules */
119	struct list_head	rules_ops;
120
121	struct netns_core	core;
122	struct netns_mib	mib;
123	struct netns_packet	packet;
124	struct netns_unix	unx;
125	struct netns_nexthop	nexthop;
126	struct netns_ipv4	ipv4;
127#if IS_ENABLED(CONFIG_IPV6)
128	struct netns_ipv6	ipv6;
129#endif
130#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
131	struct netns_ieee802154_lowpan	ieee802154_lowpan;
132#endif
133#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
134	struct netns_sctp	sctp;
135#endif
136#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
137	struct netns_dccp	dccp;
138#endif
139#ifdef CONFIG_NETFILTER
140	struct netns_nf		nf;
141	struct netns_xt		xt;
142#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
143	struct netns_ct		ct;
144#endif
145#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
146	struct netns_nftables	nft;
147#endif
148#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
149	struct netns_nf_frag	nf_frag;
150	struct ctl_table_header *nf_frag_frags_hdr;
151#endif
152	struct sock		*nfnl;
153	struct sock		*nfnl_stash;
154#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
155	struct list_head        nfnl_acct_list;
156#endif
157#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
158	struct list_head	nfct_timeout_list;
159#endif
160#endif
161#ifdef CONFIG_WEXT_CORE
162	struct sk_buff_head	wext_nlevents;
163#endif
164	struct net_generic __rcu	*gen;
165
166	/* Used to store attached BPF programs */
167	struct netns_bpf	bpf;
168
169	/* Note : following structs are cache line aligned */
170#ifdef CONFIG_XFRM
171	struct netns_xfrm	xfrm;
172#endif
173
174	atomic64_t		net_cookie; /* written once */
175
176#if IS_ENABLED(CONFIG_IP_VS)
177	struct netns_ipvs	*ipvs;
178#endif
179#if IS_ENABLED(CONFIG_MPLS)
180	struct netns_mpls	mpls;
181#endif
182#if IS_ENABLED(CONFIG_CAN)
183	struct netns_can	can;
184#endif
185#ifdef CONFIG_XDP_SOCKETS
186	struct netns_xdp	xdp;
187#endif
188#if IS_ENABLED(CONFIG_CRYPTO_USER)
189	struct sock		*crypto_nlsk;
190#endif
191	struct sock		*diag_nlsk;
192} __randomize_layout;
193
194#include <linux/seq_file_net.h>
195
196/* Init's network namespace */
197extern struct net init_net;
198
199#ifdef CONFIG_NET_NS
200struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
201			struct net *old_net);
202
203void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
204
205void net_ns_barrier(void);
206#else /* CONFIG_NET_NS */
207#include <linux/sched.h>
208#include <linux/nsproxy.h>
209static inline struct net *copy_net_ns(unsigned long flags,
210	struct user_namespace *user_ns, struct net *old_net)
211{
212	if (flags & CLONE_NEWNET)
213		return ERR_PTR(-EINVAL);
214	return old_net;
215}
216
217static inline void net_ns_get_ownership(const struct net *net,
218					kuid_t *uid, kgid_t *gid)
219{
220	*uid = GLOBAL_ROOT_UID;
221	*gid = GLOBAL_ROOT_GID;
222}
223
224static inline void net_ns_barrier(void) {}
225#endif /* CONFIG_NET_NS */
226
227
228extern struct list_head net_namespace_list;
229
230struct net *get_net_ns_by_pid(pid_t pid);
231struct net *get_net_ns_by_fd(int fd);
232
233u64 net_gen_cookie(struct net *net);
234
235#ifdef CONFIG_SYSCTL
236void ipx_register_sysctl(void);
237void ipx_unregister_sysctl(void);
238#else
239#define ipx_register_sysctl()
240#define ipx_unregister_sysctl()
241#endif
242
243#ifdef CONFIG_NET_NS
244void __put_net(struct net *net);
245
246static inline struct net *get_net(struct net *net)
247{
248	refcount_inc(&net->count);
249	return net;
250}
251
252static inline struct net *maybe_get_net(struct net *net)
253{
254	/* Used when we know struct net exists but we
255	 * aren't guaranteed a previous reference count
256	 * exists.  If the reference count is zero this
257	 * function fails and returns NULL.
258	 */
259	if (!refcount_inc_not_zero(&net->count))
260		net = NULL;
261	return net;
262}
263
264static inline void put_net(struct net *net)
265{
266	if (refcount_dec_and_test(&net->count))
267		__put_net(net);
268}
269
270static inline
271int net_eq(const struct net *net1, const struct net *net2)
272{
273	return net1 == net2;
274}
275
276static inline int check_net(const struct net *net)
277{
278	return refcount_read(&net->count) != 0;
279}
280
281void net_drop_ns(void *);
282
283#else
284
285static inline struct net *get_net(struct net *net)
286{
287	return net;
288}
289
290static inline void put_net(struct net *net)
291{
292}
293
294static inline struct net *maybe_get_net(struct net *net)
295{
296	return net;
297}
298
299static inline
300int net_eq(const struct net *net1, const struct net *net2)
301{
302	return 1;
303}
304
305static inline int check_net(const struct net *net)
306{
307	return 1;
308}
309
310#define net_drop_ns NULL
311#endif
312
313
314typedef struct {
315#ifdef CONFIG_NET_NS
316	struct net *net;
317#endif
318} possible_net_t;
319
320static inline void write_pnet(possible_net_t *pnet, struct net *net)
321{
322#ifdef CONFIG_NET_NS
323	pnet->net = net;
324#endif
325}
326
327static inline struct net *read_pnet(const possible_net_t *pnet)
328{
329#ifdef CONFIG_NET_NS
330	return pnet->net;
331#else
332	return &init_net;
333#endif
334}
335
336/* Protected by net_rwsem */
337#define for_each_net(VAR)				\
338	list_for_each_entry(VAR, &net_namespace_list, list)
339#define for_each_net_continue_reverse(VAR)		\
340	list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list)
341#define for_each_net_rcu(VAR)				\
342	list_for_each_entry_rcu(VAR, &net_namespace_list, list)
343
344#ifdef CONFIG_NET_NS
345#define __net_init
346#define __net_exit
347#define __net_initdata
348#define __net_initconst
349#else
350#define __net_init	__init
351#define __net_exit	__ref
352#define __net_initdata	__initdata
353#define __net_initconst	__initconst
354#endif
355
356int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
357int peernet2id(const struct net *net, struct net *peer);
358bool peernet_has_id(const struct net *net, struct net *peer);
359struct net *get_net_ns_by_id(const struct net *net, int id);
360
361struct pernet_operations {
362	struct list_head list;
363	/*
364	 * Below methods are called without any exclusive locks.
365	 * More than one net may be constructed and destructed
366	 * in parallel on several cpus. Every pernet_operations
367	 * have to keep in mind all other pernet_operations and
368	 * to introduce a locking, if they share common resources.
369	 *
370	 * The only time they are called with exclusive lock is
371	 * from register_pernet_subsys(), unregister_pernet_subsys()
372	 * register_pernet_device() and unregister_pernet_device().
373	 *
374	 * Exit methods using blocking RCU primitives, such as
375	 * synchronize_rcu(), should be implemented via exit_batch.
376	 * Then, destruction of a group of net requires single
377	 * synchronize_rcu() related to these pernet_operations,
378	 * instead of separate synchronize_rcu() for every net.
379	 * Please, avoid synchronize_rcu() at all, where it's possible.
380	 *
381	 * Note that a combination of pre_exit() and exit() can
382	 * be used, since a synchronize_rcu() is guaranteed between
383	 * the calls.
384	 */
385	int (*init)(struct net *net);
386	void (*pre_exit)(struct net *net);
387	void (*exit)(struct net *net);
388	void (*exit_batch)(struct list_head *net_exit_list);
389	unsigned int *id;
390	size_t size;
391};
392
393/*
394 * Use these carefully.  If you implement a network device and it
395 * needs per network namespace operations use device pernet operations,
396 * otherwise use pernet subsys operations.
397 *
398 * Network interfaces need to be removed from a dying netns _before_
399 * subsys notifiers can be called, as most of the network code cleanup
400 * (which is done from subsys notifiers) runs with the assumption that
401 * dev_remove_pack has been called so no new packets will arrive during
402 * and after the cleanup functions have been called.  dev_remove_pack
403 * is not per namespace so instead the guarantee of no more packets
404 * arriving in a network namespace is provided by ensuring that all
405 * network devices and all sockets have left the network namespace
406 * before the cleanup methods are called.
407 *
408 * For the longest time the ipv4 icmp code was registered as a pernet
409 * device which caused kernel oops, and panics during network
410 * namespace cleanup.   So please don't get this wrong.
411 */
412int register_pernet_subsys(struct pernet_operations *);
413void unregister_pernet_subsys(struct pernet_operations *);
414int register_pernet_device(struct pernet_operations *);
415void unregister_pernet_device(struct pernet_operations *);
416
417struct ctl_table;
418struct ctl_table_header;
419
420#ifdef CONFIG_SYSCTL
421int net_sysctl_init(void);
422struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
423					     struct ctl_table *table);
424void unregister_net_sysctl_table(struct ctl_table_header *header);
425#else
426static inline int net_sysctl_init(void) { return 0; }
427static inline struct ctl_table_header *register_net_sysctl(struct net *net,
428	const char *path, struct ctl_table *table)
429{
430	return NULL;
431}
432static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
433{
434}
435#endif
436
437static inline int rt_genid_ipv4(const struct net *net)
438{
439	return atomic_read(&net->ipv4.rt_genid);
440}
441
442#if IS_ENABLED(CONFIG_IPV6)
443static inline int rt_genid_ipv6(const struct net *net)
444{
445	return atomic_read(&net->ipv6.fib6_sernum);
446}
447#endif
448
449static inline void rt_genid_bump_ipv4(struct net *net)
450{
451	atomic_inc(&net->ipv4.rt_genid);
452}
453
454extern void (*__fib6_flush_trees)(struct net *net);
455static inline void rt_genid_bump_ipv6(struct net *net)
456{
457	if (__fib6_flush_trees)
458		__fib6_flush_trees(net);
459}
460
461#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
462static inline struct netns_ieee802154_lowpan *
463net_ieee802154_lowpan(struct net *net)
464{
465	return &net->ieee802154_lowpan;
466}
467#endif
468
469/* For callers who don't really care about whether it's IPv4 or IPv6 */
470static inline void rt_genid_bump_all(struct net *net)
471{
472	rt_genid_bump_ipv4(net);
473	rt_genid_bump_ipv6(net);
474}
475
476static inline int fnhe_genid(const struct net *net)
477{
478	return atomic_read(&net->fnhe_genid);
479}
480
481static inline void fnhe_genid_bump(struct net *net)
482{
483	atomic_inc(&net->fnhe_genid);
484}
485
486#endif /* __NET_NET_NAMESPACE_H */