Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/workqueue.h>
   5#include <linux/rtnetlink.h>
   6#include <linux/cache.h>
   7#include <linux/slab.h>
   8#include <linux/list.h>
   9#include <linux/delay.h>
  10#include <linux/sched.h>
  11#include <linux/idr.h>
  12#include <linux/rculist.h>
  13#include <linux/nsproxy.h>
  14#include <linux/fs.h>
  15#include <linux/proc_ns.h>
  16#include <linux/file.h>
  17#include <linux/export.h>
  18#include <linux/user_namespace.h>
  19#include <linux/net_namespace.h>
  20#include <linux/sched/task.h>
  21#include <linux/uidgid.h>
  22#include <linux/cookie.h>
  23#include <linux/proc_fs.h>
  24
  25#include <net/sock.h>
  26#include <net/netlink.h>
  27#include <net/net_namespace.h>
  28#include <net/netns/generic.h>
  29
  30/*
  31 *	Our network namespace constructor/destructor lists
  32 */
  33
  34static LIST_HEAD(pernet_list);
  35static struct list_head *first_device = &pernet_list;
  36
  37LIST_HEAD(net_namespace_list);
  38EXPORT_SYMBOL_GPL(net_namespace_list);
  39
  40/* Protects net_namespace_list. Nests iside rtnl_lock() */
  41DECLARE_RWSEM(net_rwsem);
  42EXPORT_SYMBOL_GPL(net_rwsem);
  43
  44#ifdef CONFIG_KEYS
  45static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
  46#endif
  47
  48struct net init_net;
  49EXPORT_SYMBOL(init_net);
  50
  51static bool init_net_initialized;
  52/*
  53 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
  54 * init_net_initialized and first_device pointer.
  55 * This is internal net namespace object. Please, don't use it
  56 * outside.
  57 */
  58DECLARE_RWSEM(pernet_ops_rwsem);
 
  59
  60#define MIN_PERNET_OPS_ID	\
  61	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
  62
  63#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
  64
  65static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
  66
  67DEFINE_COOKIE(net_cookie);
  68
  69static struct net_generic *net_alloc_generic(void)
  70{
  71	unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
  72	unsigned int generic_size;
  73	struct net_generic *ng;
  74
  75	generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
  76
  77	ng = kzalloc(generic_size, GFP_KERNEL);
  78	if (ng)
  79		ng->s.len = gen_ptrs;
  80
  81	return ng;
  82}
  83
  84static int net_assign_generic(struct net *net, unsigned int id, void *data)
  85{
  86	struct net_generic *ng, *old_ng;
  87
  88	BUG_ON(id < MIN_PERNET_OPS_ID);
  89
  90	old_ng = rcu_dereference_protected(net->gen,
  91					   lockdep_is_held(&pernet_ops_rwsem));
  92	if (old_ng->s.len > id) {
  93		old_ng->ptr[id] = data;
  94		return 0;
  95	}
  96
  97	ng = net_alloc_generic();
  98	if (!ng)
  99		return -ENOMEM;
 100
 101	/*
 102	 * Some synchronisation notes:
 103	 *
 104	 * The net_generic explores the net->gen array inside rcu
 105	 * read section. Besides once set the net->gen->ptr[x]
 106	 * pointer never changes (see rules in netns/generic.h).
 107	 *
 108	 * That said, we simply duplicate this array and schedule
 109	 * the old copy for kfree after a grace period.
 110	 */
 111
 112	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
 113	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
 114	ng->ptr[id] = data;
 115
 116	rcu_assign_pointer(net->gen, ng);
 117	kfree_rcu(old_ng, s.rcu);
 118	return 0;
 119}
 120
 121static int ops_init(const struct pernet_operations *ops, struct net *net)
 122{
 123	struct net_generic *ng;
 124	int err = -ENOMEM;
 125	void *data = NULL;
 126
 127	if (ops->id) {
 128		data = kzalloc(ops->size, GFP_KERNEL);
 129		if (!data)
 130			goto out;
 131
 132		err = net_assign_generic(net, *ops->id, data);
 133		if (err)
 134			goto cleanup;
 135	}
 136	err = 0;
 137	if (ops->init)
 138		err = ops->init(net);
 139	if (!err)
 140		return 0;
 141
 142	if (ops->id) {
 143		ng = rcu_dereference_protected(net->gen,
 144					       lockdep_is_held(&pernet_ops_rwsem));
 145		ng->ptr[*ops->id] = NULL;
 146	}
 147
 148cleanup:
 149	kfree(data);
 150
 151out:
 152	return err;
 153}
 154
 155static void ops_pre_exit_list(const struct pernet_operations *ops,
 156			      struct list_head *net_exit_list)
 157{
 158	struct net *net;
 159
 160	if (ops->pre_exit) {
 161		list_for_each_entry(net, net_exit_list, exit_list)
 162			ops->pre_exit(net);
 163	}
 164}
 165
 166static void ops_exit_list(const struct pernet_operations *ops,
 167			  struct list_head *net_exit_list)
 168{
 169	struct net *net;
 170	if (ops->exit) {
 171		list_for_each_entry(net, net_exit_list, exit_list) {
 172			ops->exit(net);
 173			cond_resched();
 174		}
 175	}
 176	if (ops->exit_batch)
 177		ops->exit_batch(net_exit_list);
 178}
 179
 180static void ops_free_list(const struct pernet_operations *ops,
 181			  struct list_head *net_exit_list)
 182{
 183	struct net *net;
 184
 185	if (ops->id) {
 186		list_for_each_entry(net, net_exit_list, exit_list)
 187			kfree(net_generic(net, *ops->id));
 188	}
 189}
 190
 191/* should be called with nsid_lock held */
 192static int alloc_netid(struct net *net, struct net *peer, int reqid)
 193{
 194	int min = 0, max = 0;
 195
 196	if (reqid >= 0) {
 197		min = reqid;
 198		max = reqid + 1;
 199	}
 200
 201	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
 202}
 203
 204/* This function is used by idr_for_each(). If net is equal to peer, the
 205 * function returns the id so that idr_for_each() stops. Because we cannot
 206 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 207 * NET_ID_ZERO (-1) for it.
 208 */
 209#define NET_ID_ZERO -1
 210static int net_eq_idr(int id, void *net, void *peer)
 211{
 212	if (net_eq(net, peer))
 213		return id ? : NET_ID_ZERO;
 214	return 0;
 215}
 216
 217/* Must be called from RCU-critical section or with nsid_lock held */
 218static int __peernet2id(const struct net *net, struct net *peer)
 219{
 220	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
 221
 222	/* Magic value for id 0. */
 223	if (id == NET_ID_ZERO)
 224		return 0;
 225	if (id > 0)
 226		return id;
 227
 228	return NETNSA_NSID_NOT_ASSIGNED;
 229}
 230
 231static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
 232			      struct nlmsghdr *nlh, gfp_t gfp);
 233/* This function returns the id of a peer netns. If no id is assigned, one will
 234 * be allocated and returned.
 235 */
 236int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
 237{
 238	int id;
 239
 240	if (refcount_read(&net->ns.count) == 0)
 241		return NETNSA_NSID_NOT_ASSIGNED;
 242
 243	spin_lock_bh(&net->nsid_lock);
 244	id = __peernet2id(net, peer);
 245	if (id >= 0) {
 246		spin_unlock_bh(&net->nsid_lock);
 247		return id;
 248	}
 249
 250	/* When peer is obtained from RCU lists, we may race with
 251	 * its cleanup. Check whether it's alive, and this guarantees
 252	 * we never hash a peer back to net->netns_ids, after it has
 253	 * just been idr_remove()'d from there in cleanup_net().
 254	 */
 255	if (!maybe_get_net(peer)) {
 256		spin_unlock_bh(&net->nsid_lock);
 257		return NETNSA_NSID_NOT_ASSIGNED;
 258	}
 259
 260	id = alloc_netid(net, peer, -1);
 261	spin_unlock_bh(&net->nsid_lock);
 262
 263	put_net(peer);
 264	if (id < 0)
 265		return NETNSA_NSID_NOT_ASSIGNED;
 266
 267	rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
 268
 269	return id;
 270}
 271EXPORT_SYMBOL_GPL(peernet2id_alloc);
 272
 273/* This function returns, if assigned, the id of a peer netns. */
 274int peernet2id(const struct net *net, struct net *peer)
 275{
 276	int id;
 277
 278	rcu_read_lock();
 279	id = __peernet2id(net, peer);
 280	rcu_read_unlock();
 281
 282	return id;
 283}
 284EXPORT_SYMBOL(peernet2id);
 285
 286/* This function returns true is the peer netns has an id assigned into the
 287 * current netns.
 288 */
 289bool peernet_has_id(const struct net *net, struct net *peer)
 290{
 291	return peernet2id(net, peer) >= 0;
 292}
 293
 294struct net *get_net_ns_by_id(const struct net *net, int id)
 295{
 296	struct net *peer;
 297
 298	if (id < 0)
 299		return NULL;
 300
 301	rcu_read_lock();
 302	peer = idr_find(&net->netns_ids, id);
 303	if (peer)
 304		peer = maybe_get_net(peer);
 305	rcu_read_unlock();
 306
 307	return peer;
 308}
 309EXPORT_SYMBOL_GPL(get_net_ns_by_id);
 310
 311static __net_init void preinit_net_sysctl(struct net *net)
 312{
 313	net->core.sysctl_somaxconn = SOMAXCONN;
 314	/* Limits per socket sk_omem_alloc usage.
 315	 * TCP zerocopy regular usage needs 128 KB.
 316	 */
 317	net->core.sysctl_optmem_max = 128 * 1024;
 318	net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
 319	net->core.sysctl_tstamp_allow_data = 1;
 320}
 321
 322/* init code that must occur even if setup_net() is not called. */
 323static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
 324{
 325	refcount_set(&net->passive, 1);
 326	refcount_set(&net->ns.count, 1);
 327	ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
 328	ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
 329
 330	get_random_bytes(&net->hash_mix, sizeof(u32));
 331	net->dev_base_seq = 1;
 332	net->user_ns = user_ns;
 333
 334	idr_init(&net->netns_ids);
 335	spin_lock_init(&net->nsid_lock);
 336	mutex_init(&net->ipv4.ra_mutex);
 337
 338#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
 339	mutex_init(&net->rtnl_mutex);
 340	lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
 341#endif
 342
 343	preinit_net_sysctl(net);
 344}
 345
 346/*
 347 * setup_net runs the initializers for the network namespace object.
 348 */
 349static __net_init int setup_net(struct net *net)
 350{
 351	/* Must be called with pernet_ops_rwsem held */
 352	const struct pernet_operations *ops, *saved_ops;
 353	LIST_HEAD(net_exit_list);
 354	LIST_HEAD(dev_kill_list);
 355	int error = 0;
 
 
 
 
 356
 
 
 357	preempt_disable();
 358	net->net_cookie = gen_cookie_next(&net_cookie);
 359	preempt_enable();
 
 
 
 
 
 360
 361	list_for_each_entry(ops, &pernet_list, list) {
 362		error = ops_init(ops, net);
 363		if (error < 0)
 364			goto out_undo;
 365	}
 366	down_write(&net_rwsem);
 367	list_add_tail_rcu(&net->list, &net_namespace_list);
 368	up_write(&net_rwsem);
 369out:
 370	return error;
 371
 372out_undo:
 373	/* Walk through the list backwards calling the exit functions
 374	 * for the pernet modules whose init functions did not fail.
 375	 */
 376	list_add(&net->exit_list, &net_exit_list);
 377	saved_ops = ops;
 378	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 379		ops_pre_exit_list(ops, &net_exit_list);
 380
 381	synchronize_rcu();
 382
 383	ops = saved_ops;
 384	rtnl_lock();
 385	list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
 386		if (ops->exit_batch_rtnl)
 387			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
 388	}
 389	unregister_netdevice_many(&dev_kill_list);
 390	rtnl_unlock();
 391
 392	ops = saved_ops;
 393	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 394		ops_exit_list(ops, &net_exit_list);
 395
 396	ops = saved_ops;
 397	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 398		ops_free_list(ops, &net_exit_list);
 399
 400	rcu_barrier();
 401	goto out;
 402}
 403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404#ifdef CONFIG_NET_NS
 405static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
 406{
 407	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
 408}
 409
 410static void dec_net_namespaces(struct ucounts *ucounts)
 411{
 412	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
 413}
 414
 415static struct kmem_cache *net_cachep __ro_after_init;
 416static struct workqueue_struct *netns_wq;
 417
 418static struct net *net_alloc(void)
 419{
 420	struct net *net = NULL;
 421	struct net_generic *ng;
 422
 423	ng = net_alloc_generic();
 424	if (!ng)
 425		goto out;
 426
 427	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
 428	if (!net)
 429		goto out_free;
 430
 431#ifdef CONFIG_KEYS
 432	net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
 433	if (!net->key_domain)
 434		goto out_free_2;
 435	refcount_set(&net->key_domain->usage, 1);
 436#endif
 437
 438	rcu_assign_pointer(net->gen, ng);
 439out:
 440	return net;
 441
 442#ifdef CONFIG_KEYS
 443out_free_2:
 444	kmem_cache_free(net_cachep, net);
 445	net = NULL;
 446#endif
 447out_free:
 448	kfree(ng);
 449	goto out;
 450}
 451
 452static LLIST_HEAD(defer_free_list);
 453
 454static void net_complete_free(void)
 455{
 456	struct llist_node *kill_list;
 457	struct net *net, *next;
 458
 459	/* Get the list of namespaces to free from last round. */
 460	kill_list = llist_del_all(&defer_free_list);
 461
 462	llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
 463		kmem_cache_free(net_cachep, net);
 464
 465}
 466
 467void net_passive_dec(struct net *net)
 468{
 469	if (refcount_dec_and_test(&net->passive)) {
 470		kfree(rcu_access_pointer(net->gen));
 471
 472		/* There should not be any trackers left there. */
 473		ref_tracker_dir_exit(&net->notrefcnt_tracker);
 474
 475		/* Wait for an extra rcu_barrier() before final free. */
 476		llist_add(&net->defer_free_list, &defer_free_list);
 477	}
 478}
 479
 480void net_drop_ns(void *p)
 481{
 482	struct net *net = (struct net *)p;
 483
 484	if (net)
 485		net_passive_dec(net);
 486}
 487
 488struct net *copy_net_ns(unsigned long flags,
 489			struct user_namespace *user_ns, struct net *old_net)
 490{
 491	struct ucounts *ucounts;
 492	struct net *net;
 493	int rv;
 494
 495	if (!(flags & CLONE_NEWNET))
 496		return get_net(old_net);
 497
 498	ucounts = inc_net_namespaces(user_ns);
 499	if (!ucounts)
 500		return ERR_PTR(-ENOSPC);
 501
 502	net = net_alloc();
 503	if (!net) {
 504		rv = -ENOMEM;
 505		goto dec_ucounts;
 506	}
 507
 508	preinit_net(net, user_ns);
 
 509	net->ucounts = ucounts;
 510	get_user_ns(user_ns);
 511
 512	rv = down_read_killable(&pernet_ops_rwsem);
 513	if (rv < 0)
 514		goto put_userns;
 515
 516	rv = setup_net(net);
 517
 518	up_read(&pernet_ops_rwsem);
 519
 520	if (rv < 0) {
 521put_userns:
 522#ifdef CONFIG_KEYS
 523		key_remove_domain(net->key_domain);
 524#endif
 525		put_user_ns(user_ns);
 526		net_passive_dec(net);
 527dec_ucounts:
 528		dec_net_namespaces(ucounts);
 529		return ERR_PTR(rv);
 530	}
 531	return net;
 532}
 533
 534/**
 535 * net_ns_get_ownership - get sysfs ownership data for @net
 536 * @net: network namespace in question (can be NULL)
 537 * @uid: kernel user ID for sysfs objects
 538 * @gid: kernel group ID for sysfs objects
 539 *
 540 * Returns the uid/gid pair of root in the user namespace associated with the
 541 * given network namespace.
 542 */
 543void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
 544{
 545	if (net) {
 546		kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
 547		kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
 548
 549		if (uid_valid(ns_root_uid))
 550			*uid = ns_root_uid;
 551
 552		if (gid_valid(ns_root_gid))
 553			*gid = ns_root_gid;
 554	} else {
 555		*uid = GLOBAL_ROOT_UID;
 556		*gid = GLOBAL_ROOT_GID;
 557	}
 558}
 559EXPORT_SYMBOL_GPL(net_ns_get_ownership);
 560
 561static void unhash_nsid(struct net *net, struct net *last)
 562{
 563	struct net *tmp;
 564	/* This function is only called from cleanup_net() work,
 565	 * and this work is the only process, that may delete
 566	 * a net from net_namespace_list. So, when the below
 567	 * is executing, the list may only grow. Thus, we do not
 568	 * use for_each_net_rcu() or net_rwsem.
 569	 */
 570	for_each_net(tmp) {
 571		int id;
 572
 573		spin_lock_bh(&tmp->nsid_lock);
 574		id = __peernet2id(tmp, net);
 575		if (id >= 0)
 576			idr_remove(&tmp->netns_ids, id);
 577		spin_unlock_bh(&tmp->nsid_lock);
 578		if (id >= 0)
 579			rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
 580					  GFP_KERNEL);
 581		if (tmp == last)
 582			break;
 583	}
 584	spin_lock_bh(&net->nsid_lock);
 585	idr_destroy(&net->netns_ids);
 586	spin_unlock_bh(&net->nsid_lock);
 587}
 588
 589static LLIST_HEAD(cleanup_list);
 590
 591static void cleanup_net(struct work_struct *work)
 592{
 593	const struct pernet_operations *ops;
 594	struct net *net, *tmp, *last;
 595	struct llist_node *net_kill_list;
 596	LIST_HEAD(net_exit_list);
 597	LIST_HEAD(dev_kill_list);
 598
 599	/* Atomically snapshot the list of namespaces to cleanup */
 600	net_kill_list = llist_del_all(&cleanup_list);
 601
 602	down_read(&pernet_ops_rwsem);
 603
 604	/* Don't let anyone else find us. */
 605	down_write(&net_rwsem);
 606	llist_for_each_entry(net, net_kill_list, cleanup_list)
 607		list_del_rcu(&net->list);
 608	/* Cache last net. After we unlock rtnl, no one new net
 609	 * added to net_namespace_list can assign nsid pointer
 610	 * to a net from net_kill_list (see peernet2id_alloc()).
 611	 * So, we skip them in unhash_nsid().
 612	 *
 613	 * Note, that unhash_nsid() does not delete nsid links
 614	 * between net_kill_list's nets, as they've already
 615	 * deleted from net_namespace_list. But, this would be
 616	 * useless anyway, as netns_ids are destroyed there.
 617	 */
 618	last = list_last_entry(&net_namespace_list, struct net, list);
 619	up_write(&net_rwsem);
 620
 621	llist_for_each_entry(net, net_kill_list, cleanup_list) {
 622		unhash_nsid(net, last);
 623		list_add_tail(&net->exit_list, &net_exit_list);
 624	}
 625
 626	/* Run all of the network namespace pre_exit methods */
 627	list_for_each_entry_reverse(ops, &pernet_list, list)
 628		ops_pre_exit_list(ops, &net_exit_list);
 629
 630	/*
 631	 * Another CPU might be rcu-iterating the list, wait for it.
 632	 * This needs to be before calling the exit() notifiers, so
 633	 * the rcu_barrier() below isn't sufficient alone.
 634	 * Also the pre_exit() and exit() methods need this barrier.
 635	 */
 636	synchronize_rcu_expedited();
 637
 638	rtnl_lock();
 639	list_for_each_entry_reverse(ops, &pernet_list, list) {
 640		if (ops->exit_batch_rtnl)
 641			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
 642	}
 643	unregister_netdevice_many(&dev_kill_list);
 644	rtnl_unlock();
 645
 646	/* Run all of the network namespace exit methods */
 647	list_for_each_entry_reverse(ops, &pernet_list, list)
 648		ops_exit_list(ops, &net_exit_list);
 649
 650	/* Free the net generic variables */
 651	list_for_each_entry_reverse(ops, &pernet_list, list)
 652		ops_free_list(ops, &net_exit_list);
 653
 654	up_read(&pernet_ops_rwsem);
 655
 656	/* Ensure there are no outstanding rcu callbacks using this
 657	 * network namespace.
 658	 */
 659	rcu_barrier();
 660
 661	net_complete_free();
 662
 663	/* Finally it is safe to free my network namespace structure */
 664	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
 665		list_del_init(&net->exit_list);
 666		dec_net_namespaces(net->ucounts);
 667#ifdef CONFIG_KEYS
 668		key_remove_domain(net->key_domain);
 669#endif
 670		put_user_ns(net->user_ns);
 671		net_passive_dec(net);
 672	}
 673}
 674
 675/**
 676 * net_ns_barrier - wait until concurrent net_cleanup_work is done
 677 *
 678 * cleanup_net runs from work queue and will first remove namespaces
 679 * from the global list, then run net exit functions.
 680 *
 681 * Call this in module exit path to make sure that all netns
 682 * ->exit ops have been invoked before the function is removed.
 683 */
 684void net_ns_barrier(void)
 685{
 686	down_write(&pernet_ops_rwsem);
 687	up_write(&pernet_ops_rwsem);
 688}
 689EXPORT_SYMBOL(net_ns_barrier);
 690
 691static DECLARE_WORK(net_cleanup_work, cleanup_net);
 692
 693void __put_net(struct net *net)
 694{
 695	ref_tracker_dir_exit(&net->refcnt_tracker);
 696	/* Cleanup the network namespace in process context */
 697	if (llist_add(&net->cleanup_list, &cleanup_list))
 698		queue_work(netns_wq, &net_cleanup_work);
 699}
 700EXPORT_SYMBOL_GPL(__put_net);
 701
 702/**
 703 * get_net_ns - increment the refcount of the network namespace
 704 * @ns: common namespace (net)
 705 *
 706 * Returns the net's common namespace or ERR_PTR() if ref is zero.
 707 */
 708struct ns_common *get_net_ns(struct ns_common *ns)
 709{
 710	struct net *net;
 711
 712	net = maybe_get_net(container_of(ns, struct net, ns));
 713	if (net)
 714		return &net->ns;
 715	return ERR_PTR(-EINVAL);
 716}
 717EXPORT_SYMBOL_GPL(get_net_ns);
 718
 719struct net *get_net_ns_by_fd(int fd)
 720{
 721	CLASS(fd, f)(fd);
 
 722
 723	if (fd_empty(f))
 724		return ERR_PTR(-EBADF);
 725
 726	if (proc_ns_file(fd_file(f))) {
 727		struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
 728		if (ns->ops == &netns_operations)
 729			return get_net(container_of(ns, struct net, ns));
 730	}
 
 731
 732	return ERR_PTR(-EINVAL);
 733}
 734EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
 735#endif
 736
 737struct net *get_net_ns_by_pid(pid_t pid)
 738{
 739	struct task_struct *tsk;
 740	struct net *net;
 741
 742	/* Lookup the network namespace */
 743	net = ERR_PTR(-ESRCH);
 744	rcu_read_lock();
 745	tsk = find_task_by_vpid(pid);
 746	if (tsk) {
 747		struct nsproxy *nsproxy;
 748		task_lock(tsk);
 749		nsproxy = tsk->nsproxy;
 750		if (nsproxy)
 751			net = get_net(nsproxy->net_ns);
 752		task_unlock(tsk);
 753	}
 754	rcu_read_unlock();
 755	return net;
 756}
 757EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
 758
 759static __net_init int net_ns_net_init(struct net *net)
 760{
 761#ifdef CONFIG_NET_NS
 762	net->ns.ops = &netns_operations;
 763#endif
 764	return ns_alloc_inum(&net->ns);
 765}
 766
 767static __net_exit void net_ns_net_exit(struct net *net)
 768{
 769	ns_free_inum(&net->ns);
 770}
 771
 772static struct pernet_operations __net_initdata net_ns_ops = {
 773	.init = net_ns_net_init,
 774	.exit = net_ns_net_exit,
 775};
 776
 777static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
 778	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
 779	[NETNSA_NSID]		= { .type = NLA_S32 },
 780	[NETNSA_PID]		= { .type = NLA_U32 },
 781	[NETNSA_FD]		= { .type = NLA_U32 },
 782	[NETNSA_TARGET_NSID]	= { .type = NLA_S32 },
 783};
 784
 785static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
 786			  struct netlink_ext_ack *extack)
 787{
 788	struct net *net = sock_net(skb->sk);
 789	struct nlattr *tb[NETNSA_MAX + 1];
 790	struct nlattr *nla;
 791	struct net *peer;
 792	int nsid, err;
 793
 794	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
 795				     NETNSA_MAX, rtnl_net_policy, extack);
 796	if (err < 0)
 797		return err;
 798	if (!tb[NETNSA_NSID]) {
 799		NL_SET_ERR_MSG(extack, "nsid is missing");
 800		return -EINVAL;
 801	}
 802	nsid = nla_get_s32(tb[NETNSA_NSID]);
 803
 804	if (tb[NETNSA_PID]) {
 805		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 806		nla = tb[NETNSA_PID];
 807	} else if (tb[NETNSA_FD]) {
 808		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 809		nla = tb[NETNSA_FD];
 810	} else {
 811		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
 812		return -EINVAL;
 813	}
 814	if (IS_ERR(peer)) {
 815		NL_SET_BAD_ATTR(extack, nla);
 816		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
 817		return PTR_ERR(peer);
 818	}
 819
 820	spin_lock_bh(&net->nsid_lock);
 821	if (__peernet2id(net, peer) >= 0) {
 822		spin_unlock_bh(&net->nsid_lock);
 823		err = -EEXIST;
 824		NL_SET_BAD_ATTR(extack, nla);
 825		NL_SET_ERR_MSG(extack,
 826			       "Peer netns already has a nsid assigned");
 827		goto out;
 828	}
 829
 830	err = alloc_netid(net, peer, nsid);
 831	spin_unlock_bh(&net->nsid_lock);
 832	if (err >= 0) {
 833		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
 834				  nlh, GFP_KERNEL);
 835		err = 0;
 836	} else if (err == -ENOSPC && nsid >= 0) {
 837		err = -EEXIST;
 838		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
 839		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
 840	}
 841out:
 842	put_net(peer);
 843	return err;
 844}
 845
 846static int rtnl_net_get_size(void)
 847{
 848	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
 849	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
 850	       + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
 851	       ;
 852}
 853
 854struct net_fill_args {
 855	u32 portid;
 856	u32 seq;
 857	int flags;
 858	int cmd;
 859	int nsid;
 860	bool add_ref;
 861	int ref_nsid;
 862};
 863
 864static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
 865{
 866	struct nlmsghdr *nlh;
 867	struct rtgenmsg *rth;
 868
 869	nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
 870			args->flags);
 871	if (!nlh)
 872		return -EMSGSIZE;
 873
 874	rth = nlmsg_data(nlh);
 875	rth->rtgen_family = AF_UNSPEC;
 876
 877	if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
 878		goto nla_put_failure;
 879
 880	if (args->add_ref &&
 881	    nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
 882		goto nla_put_failure;
 883
 884	nlmsg_end(skb, nlh);
 885	return 0;
 886
 887nla_put_failure:
 888	nlmsg_cancel(skb, nlh);
 889	return -EMSGSIZE;
 890}
 891
 892static int rtnl_net_valid_getid_req(struct sk_buff *skb,
 893				    const struct nlmsghdr *nlh,
 894				    struct nlattr **tb,
 895				    struct netlink_ext_ack *extack)
 896{
 897	int i, err;
 898
 899	if (!netlink_strict_get_check(skb))
 900		return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
 901					      tb, NETNSA_MAX, rtnl_net_policy,
 902					      extack);
 903
 904	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
 905					    NETNSA_MAX, rtnl_net_policy,
 906					    extack);
 907	if (err)
 908		return err;
 909
 910	for (i = 0; i <= NETNSA_MAX; i++) {
 911		if (!tb[i])
 912			continue;
 913
 914		switch (i) {
 915		case NETNSA_PID:
 916		case NETNSA_FD:
 917		case NETNSA_NSID:
 918		case NETNSA_TARGET_NSID:
 919			break;
 920		default:
 921			NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
 922			return -EINVAL;
 923		}
 924	}
 925
 926	return 0;
 927}
 928
 929static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
 930			  struct netlink_ext_ack *extack)
 931{
 932	struct net *net = sock_net(skb->sk);
 933	struct nlattr *tb[NETNSA_MAX + 1];
 934	struct net_fill_args fillargs = {
 935		.portid = NETLINK_CB(skb).portid,
 936		.seq = nlh->nlmsg_seq,
 937		.cmd = RTM_NEWNSID,
 938	};
 939	struct net *peer, *target = net;
 940	struct nlattr *nla;
 941	struct sk_buff *msg;
 942	int err;
 943
 944	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
 945	if (err < 0)
 946		return err;
 947	if (tb[NETNSA_PID]) {
 948		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 949		nla = tb[NETNSA_PID];
 950	} else if (tb[NETNSA_FD]) {
 951		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 952		nla = tb[NETNSA_FD];
 953	} else if (tb[NETNSA_NSID]) {
 954		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
 955		if (!peer)
 956			peer = ERR_PTR(-ENOENT);
 957		nla = tb[NETNSA_NSID];
 958	} else {
 959		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
 960		return -EINVAL;
 961	}
 962
 963	if (IS_ERR(peer)) {
 964		NL_SET_BAD_ATTR(extack, nla);
 965		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
 966		return PTR_ERR(peer);
 967	}
 968
 969	if (tb[NETNSA_TARGET_NSID]) {
 970		int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
 971
 972		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
 973		if (IS_ERR(target)) {
 974			NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
 975			NL_SET_ERR_MSG(extack,
 976				       "Target netns reference is invalid");
 977			err = PTR_ERR(target);
 978			goto out;
 979		}
 980		fillargs.add_ref = true;
 981		fillargs.ref_nsid = peernet2id(net, peer);
 982	}
 983
 984	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
 985	if (!msg) {
 986		err = -ENOMEM;
 987		goto out;
 988	}
 989
 990	fillargs.nsid = peernet2id(target, peer);
 991	err = rtnl_net_fill(msg, &fillargs);
 992	if (err < 0)
 993		goto err_out;
 994
 995	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
 996	goto out;
 997
 998err_out:
 999	nlmsg_free(msg);
1000out:
1001	if (fillargs.add_ref)
1002		put_net(target);
1003	put_net(peer);
1004	return err;
1005}
1006
1007struct rtnl_net_dump_cb {
1008	struct net *tgt_net;
1009	struct net *ref_net;
1010	struct sk_buff *skb;
1011	struct net_fill_args fillargs;
1012	int idx;
1013	int s_idx;
1014};
1015
1016/* Runs in RCU-critical section. */
1017static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1018{
1019	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1020	int ret;
1021
1022	if (net_cb->idx < net_cb->s_idx)
1023		goto cont;
1024
1025	net_cb->fillargs.nsid = id;
1026	if (net_cb->fillargs.add_ref)
1027		net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1028	ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1029	if (ret < 0)
1030		return ret;
1031
1032cont:
1033	net_cb->idx++;
1034	return 0;
1035}
1036
1037static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1038				   struct rtnl_net_dump_cb *net_cb,
1039				   struct netlink_callback *cb)
1040{
1041	struct netlink_ext_ack *extack = cb->extack;
1042	struct nlattr *tb[NETNSA_MAX + 1];
1043	int err, i;
1044
1045	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1046					    NETNSA_MAX, rtnl_net_policy,
1047					    extack);
1048	if (err < 0)
1049		return err;
1050
1051	for (i = 0; i <= NETNSA_MAX; i++) {
1052		if (!tb[i])
1053			continue;
1054
1055		if (i == NETNSA_TARGET_NSID) {
1056			struct net *net;
1057
1058			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1059			if (IS_ERR(net)) {
1060				NL_SET_BAD_ATTR(extack, tb[i]);
1061				NL_SET_ERR_MSG(extack,
1062					       "Invalid target network namespace id");
1063				return PTR_ERR(net);
1064			}
1065			net_cb->fillargs.add_ref = true;
1066			net_cb->ref_net = net_cb->tgt_net;
1067			net_cb->tgt_net = net;
1068		} else {
1069			NL_SET_BAD_ATTR(extack, tb[i]);
1070			NL_SET_ERR_MSG(extack,
1071				       "Unsupported attribute in dump request");
1072			return -EINVAL;
1073		}
1074	}
1075
1076	return 0;
1077}
1078
1079static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1080{
1081	struct rtnl_net_dump_cb net_cb = {
1082		.tgt_net = sock_net(skb->sk),
1083		.skb = skb,
1084		.fillargs = {
1085			.portid = NETLINK_CB(cb->skb).portid,
1086			.seq = cb->nlh->nlmsg_seq,
1087			.flags = NLM_F_MULTI,
1088			.cmd = RTM_NEWNSID,
1089		},
1090		.idx = 0,
1091		.s_idx = cb->args[0],
1092	};
1093	int err = 0;
1094
1095	if (cb->strict_check) {
1096		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1097		if (err < 0)
1098			goto end;
1099	}
1100
1101	rcu_read_lock();
1102	idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1103	rcu_read_unlock();
1104
1105	cb->args[0] = net_cb.idx;
1106end:
1107	if (net_cb.fillargs.add_ref)
1108		put_net(net_cb.tgt_net);
1109	return err;
1110}
1111
1112static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1113			      struct nlmsghdr *nlh, gfp_t gfp)
1114{
1115	struct net_fill_args fillargs = {
1116		.portid = portid,
1117		.seq = nlh ? nlh->nlmsg_seq : 0,
1118		.cmd = cmd,
1119		.nsid = id,
1120	};
1121	struct sk_buff *msg;
1122	int err = -ENOMEM;
1123
1124	msg = nlmsg_new(rtnl_net_get_size(), gfp);
1125	if (!msg)
1126		goto out;
1127
1128	err = rtnl_net_fill(msg, &fillargs);
1129	if (err < 0)
1130		goto err_out;
1131
1132	rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1133	return;
1134
1135err_out:
1136	nlmsg_free(msg);
1137out:
1138	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1139}
1140
1141#ifdef CONFIG_NET_NS
1142static void __init netns_ipv4_struct_check(void)
1143{
1144	/* TX readonly hotpath cache lines */
1145	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1146				      sysctl_tcp_early_retrans);
1147	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1148				      sysctl_tcp_tso_win_divisor);
1149	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1150				      sysctl_tcp_tso_rtt_log);
1151	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1152				      sysctl_tcp_autocorking);
1153	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1154				      sysctl_tcp_min_snd_mss);
1155	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1156				      sysctl_tcp_notsent_lowat);
1157	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1158				      sysctl_tcp_limit_output_bytes);
1159	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1160				      sysctl_tcp_min_rtt_wlen);
1161	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1162				      sysctl_tcp_wmem);
1163	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1164				      sysctl_ip_fwd_use_pmtu);
1165	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1166
1167	/* TXRX readonly hotpath cache lines */
1168	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1169				      sysctl_tcp_moderate_rcvbuf);
1170	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1171
1172	/* RX readonly hotpath cache line */
1173	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1174				      sysctl_ip_early_demux);
1175	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1176				      sysctl_tcp_early_demux);
1177	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1178				      sysctl_tcp_l3mdev_accept);
1179	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1180				      sysctl_tcp_reordering);
1181	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1182				      sysctl_tcp_rmem);
1183	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
1184}
1185#endif
1186
1187static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
1188	{.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
1189	 .flags = RTNL_FLAG_DOIT_UNLOCKED},
1190	{.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
1191	 .dumpit = rtnl_net_dumpid,
1192	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
1193};
1194
1195void __init net_ns_init(void)
1196{
1197	struct net_generic *ng;
1198
1199#ifdef CONFIG_NET_NS
1200	netns_ipv4_struct_check();
1201	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1202					SMP_CACHE_BYTES,
1203					SLAB_PANIC|SLAB_ACCOUNT, NULL);
1204
1205	/* Create workqueue for cleanup */
1206	netns_wq = create_singlethread_workqueue("netns");
1207	if (!netns_wq)
1208		panic("Could not create netns workq");
1209#endif
1210
1211	ng = net_alloc_generic();
1212	if (!ng)
1213		panic("Could not allocate generic netns");
1214
1215	rcu_assign_pointer(init_net.gen, ng);
1216
1217#ifdef CONFIG_KEYS
1218	init_net.key_domain = &init_net_key_domain;
1219#endif
1220	preinit_net(&init_net, &init_user_ns);
1221
1222	down_write(&pernet_ops_rwsem);
1223	if (setup_net(&init_net))
 
1224		panic("Could not setup the initial network namespace");
1225
1226	init_net_initialized = true;
1227	up_write(&pernet_ops_rwsem);
1228
1229	if (register_pernet_subsys(&net_ns_ops))
1230		panic("Could not register network namespace subsystems");
1231
1232	rtnl_register_many(net_ns_rtnl_msg_handlers);
 
 
 
1233}
1234
1235static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1236{
1237	ops_pre_exit_list(ops, net_exit_list);
1238	synchronize_rcu();
1239
1240	if (ops->exit_batch_rtnl) {
1241		LIST_HEAD(dev_kill_list);
1242
1243		rtnl_lock();
1244		ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
1245		unregister_netdevice_many(&dev_kill_list);
1246		rtnl_unlock();
1247	}
1248	ops_exit_list(ops, net_exit_list);
1249
1250	ops_free_list(ops, net_exit_list);
1251}
1252
1253#ifdef CONFIG_NET_NS
1254static int __register_pernet_operations(struct list_head *list,
1255					struct pernet_operations *ops)
1256{
1257	struct net *net;
1258	int error;
1259	LIST_HEAD(net_exit_list);
1260
1261	list_add_tail(&ops->list, list);
1262	if (ops->init || ops->id) {
1263		/* We held write locked pernet_ops_rwsem, and parallel
1264		 * setup_net() and cleanup_net() are not possible.
1265		 */
1266		for_each_net(net) {
1267			error = ops_init(ops, net);
1268			if (error)
1269				goto out_undo;
1270			list_add_tail(&net->exit_list, &net_exit_list);
1271		}
1272	}
1273	return 0;
1274
1275out_undo:
1276	/* If I have an error cleanup all namespaces I initialized */
1277	list_del(&ops->list);
1278	free_exit_list(ops, &net_exit_list);
1279	return error;
1280}
1281
1282static void __unregister_pernet_operations(struct pernet_operations *ops)
1283{
1284	struct net *net;
1285	LIST_HEAD(net_exit_list);
1286
1287	list_del(&ops->list);
1288	/* See comment in __register_pernet_operations() */
1289	for_each_net(net)
1290		list_add_tail(&net->exit_list, &net_exit_list);
1291
1292	free_exit_list(ops, &net_exit_list);
1293}
1294
1295#else
1296
1297static int __register_pernet_operations(struct list_head *list,
1298					struct pernet_operations *ops)
1299{
1300	if (!init_net_initialized) {
1301		list_add_tail(&ops->list, list);
1302		return 0;
1303	}
1304
1305	return ops_init(ops, &init_net);
1306}
1307
1308static void __unregister_pernet_operations(struct pernet_operations *ops)
1309{
1310	if (!init_net_initialized) {
1311		list_del(&ops->list);
1312	} else {
1313		LIST_HEAD(net_exit_list);
1314		list_add(&init_net.exit_list, &net_exit_list);
1315		free_exit_list(ops, &net_exit_list);
1316	}
1317}
1318
1319#endif /* CONFIG_NET_NS */
1320
1321static DEFINE_IDA(net_generic_ids);
1322
1323static int register_pernet_operations(struct list_head *list,
1324				      struct pernet_operations *ops)
1325{
1326	int error;
1327
1328	if (WARN_ON(!!ops->id ^ !!ops->size))
1329		return -EINVAL;
1330
1331	if (ops->id) {
1332		error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1333				GFP_KERNEL);
1334		if (error < 0)
1335			return error;
1336		*ops->id = error;
1337		/* This does not require READ_ONCE as writers already hold
1338		 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1339		 * net_alloc_generic.
1340		 */
1341		WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1342	}
1343	error = __register_pernet_operations(list, ops);
1344	if (error) {
1345		rcu_barrier();
1346		if (ops->id)
1347			ida_free(&net_generic_ids, *ops->id);
1348	}
1349
1350	return error;
1351}
1352
1353static void unregister_pernet_operations(struct pernet_operations *ops)
1354{
1355	__unregister_pernet_operations(ops);
1356	rcu_barrier();
1357	if (ops->id)
1358		ida_free(&net_generic_ids, *ops->id);
1359}
1360
1361/**
1362 *      register_pernet_subsys - register a network namespace subsystem
1363 *	@ops:  pernet operations structure for the subsystem
1364 *
1365 *	Register a subsystem which has init and exit functions
1366 *	that are called when network namespaces are created and
1367 *	destroyed respectively.
1368 *
1369 *	When registered all network namespace init functions are
1370 *	called for every existing network namespace.  Allowing kernel
1371 *	modules to have a race free view of the set of network namespaces.
1372 *
1373 *	When a new network namespace is created all of the init
1374 *	methods are called in the order in which they were registered.
1375 *
1376 *	When a network namespace is destroyed all of the exit methods
1377 *	are called in the reverse of the order with which they were
1378 *	registered.
1379 */
1380int register_pernet_subsys(struct pernet_operations *ops)
1381{
1382	int error;
1383	down_write(&pernet_ops_rwsem);
1384	error =  register_pernet_operations(first_device, ops);
1385	up_write(&pernet_ops_rwsem);
1386	return error;
1387}
1388EXPORT_SYMBOL_GPL(register_pernet_subsys);
1389
1390/**
1391 *      unregister_pernet_subsys - unregister a network namespace subsystem
1392 *	@ops: pernet operations structure to manipulate
1393 *
1394 *	Remove the pernet operations structure from the list to be
1395 *	used when network namespaces are created or destroyed.  In
1396 *	addition run the exit method for all existing network
1397 *	namespaces.
1398 */
1399void unregister_pernet_subsys(struct pernet_operations *ops)
1400{
1401	down_write(&pernet_ops_rwsem);
1402	unregister_pernet_operations(ops);
1403	up_write(&pernet_ops_rwsem);
1404}
1405EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1406
1407/**
1408 *      register_pernet_device - register a network namespace device
1409 *	@ops:  pernet operations structure for the subsystem
1410 *
1411 *	Register a device which has init and exit functions
1412 *	that are called when network namespaces are created and
1413 *	destroyed respectively.
1414 *
1415 *	When registered all network namespace init functions are
1416 *	called for every existing network namespace.  Allowing kernel
1417 *	modules to have a race free view of the set of network namespaces.
1418 *
1419 *	When a new network namespace is created all of the init
1420 *	methods are called in the order in which they were registered.
1421 *
1422 *	When a network namespace is destroyed all of the exit methods
1423 *	are called in the reverse of the order with which they were
1424 *	registered.
1425 */
1426int register_pernet_device(struct pernet_operations *ops)
1427{
1428	int error;
1429	down_write(&pernet_ops_rwsem);
1430	error = register_pernet_operations(&pernet_list, ops);
1431	if (!error && (first_device == &pernet_list))
1432		first_device = &ops->list;
1433	up_write(&pernet_ops_rwsem);
1434	return error;
1435}
1436EXPORT_SYMBOL_GPL(register_pernet_device);
1437
1438/**
1439 *      unregister_pernet_device - unregister a network namespace netdevice
1440 *	@ops: pernet operations structure to manipulate
1441 *
1442 *	Remove the pernet operations structure from the list to be
1443 *	used when network namespaces are created or destroyed.  In
1444 *	addition run the exit method for all existing network
1445 *	namespaces.
1446 */
1447void unregister_pernet_device(struct pernet_operations *ops)
1448{
1449	down_write(&pernet_ops_rwsem);
1450	if (&ops->list == first_device)
1451		first_device = first_device->next;
1452	unregister_pernet_operations(ops);
1453	up_write(&pernet_ops_rwsem);
1454}
1455EXPORT_SYMBOL_GPL(unregister_pernet_device);
1456
1457#ifdef CONFIG_NET_NS
1458static struct ns_common *netns_get(struct task_struct *task)
1459{
1460	struct net *net = NULL;
1461	struct nsproxy *nsproxy;
1462
1463	task_lock(task);
1464	nsproxy = task->nsproxy;
1465	if (nsproxy)
1466		net = get_net(nsproxy->net_ns);
1467	task_unlock(task);
1468
1469	return net ? &net->ns : NULL;
1470}
1471
1472static inline struct net *to_net_ns(struct ns_common *ns)
1473{
1474	return container_of(ns, struct net, ns);
1475}
1476
1477static void netns_put(struct ns_common *ns)
1478{
1479	put_net(to_net_ns(ns));
1480}
1481
1482static int netns_install(struct nsset *nsset, struct ns_common *ns)
1483{
1484	struct nsproxy *nsproxy = nsset->nsproxy;
1485	struct net *net = to_net_ns(ns);
1486
1487	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1488	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1489		return -EPERM;
1490
1491	put_net(nsproxy->net_ns);
1492	nsproxy->net_ns = get_net(net);
1493	return 0;
1494}
1495
1496static struct user_namespace *netns_owner(struct ns_common *ns)
1497{
1498	return to_net_ns(ns)->user_ns;
1499}
1500
1501const struct proc_ns_operations netns_operations = {
1502	.name		= "net",
1503	.type		= CLONE_NEWNET,
1504	.get		= netns_get,
1505	.put		= netns_put,
1506	.install	= netns_install,
1507	.owner		= netns_owner,
1508};
1509#endif
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/workqueue.h>
   5#include <linux/rtnetlink.h>
   6#include <linux/cache.h>
   7#include <linux/slab.h>
   8#include <linux/list.h>
   9#include <linux/delay.h>
  10#include <linux/sched.h>
  11#include <linux/idr.h>
  12#include <linux/rculist.h>
  13#include <linux/nsproxy.h>
  14#include <linux/fs.h>
  15#include <linux/proc_ns.h>
  16#include <linux/file.h>
  17#include <linux/export.h>
  18#include <linux/user_namespace.h>
  19#include <linux/net_namespace.h>
  20#include <linux/sched/task.h>
  21#include <linux/uidgid.h>
  22#include <linux/cookie.h>
  23#include <linux/proc_fs.h>
  24
  25#include <net/sock.h>
  26#include <net/netlink.h>
  27#include <net/net_namespace.h>
  28#include <net/netns/generic.h>
  29
  30/*
  31 *	Our network namespace constructor/destructor lists
  32 */
  33
  34static LIST_HEAD(pernet_list);
  35static struct list_head *first_device = &pernet_list;
  36
  37LIST_HEAD(net_namespace_list);
  38EXPORT_SYMBOL_GPL(net_namespace_list);
  39
  40/* Protects net_namespace_list. Nests iside rtnl_lock() */
  41DECLARE_RWSEM(net_rwsem);
  42EXPORT_SYMBOL_GPL(net_rwsem);
  43
  44#ifdef CONFIG_KEYS
  45static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
  46#endif
  47
  48struct net init_net;
  49EXPORT_SYMBOL(init_net);
  50
  51static bool init_net_initialized;
  52/*
  53 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
  54 * init_net_initialized and first_device pointer.
  55 * This is internal net namespace object. Please, don't use it
  56 * outside.
  57 */
  58DECLARE_RWSEM(pernet_ops_rwsem);
  59EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
  60
  61#define MIN_PERNET_OPS_ID	\
  62	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
  63
  64#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
  65
  66static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
  67
  68DEFINE_COOKIE(net_cookie);
  69
  70static struct net_generic *net_alloc_generic(void)
  71{
 
 
  72	struct net_generic *ng;
  73	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
 
  74
  75	ng = kzalloc(generic_size, GFP_KERNEL);
  76	if (ng)
  77		ng->s.len = max_gen_ptrs;
  78
  79	return ng;
  80}
  81
  82static int net_assign_generic(struct net *net, unsigned int id, void *data)
  83{
  84	struct net_generic *ng, *old_ng;
  85
  86	BUG_ON(id < MIN_PERNET_OPS_ID);
  87
  88	old_ng = rcu_dereference_protected(net->gen,
  89					   lockdep_is_held(&pernet_ops_rwsem));
  90	if (old_ng->s.len > id) {
  91		old_ng->ptr[id] = data;
  92		return 0;
  93	}
  94
  95	ng = net_alloc_generic();
  96	if (!ng)
  97		return -ENOMEM;
  98
  99	/*
 100	 * Some synchronisation notes:
 101	 *
 102	 * The net_generic explores the net->gen array inside rcu
 103	 * read section. Besides once set the net->gen->ptr[x]
 104	 * pointer never changes (see rules in netns/generic.h).
 105	 *
 106	 * That said, we simply duplicate this array and schedule
 107	 * the old copy for kfree after a grace period.
 108	 */
 109
 110	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
 111	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
 112	ng->ptr[id] = data;
 113
 114	rcu_assign_pointer(net->gen, ng);
 115	kfree_rcu(old_ng, s.rcu);
 116	return 0;
 117}
 118
 119static int ops_init(const struct pernet_operations *ops, struct net *net)
 120{
 121	struct net_generic *ng;
 122	int err = -ENOMEM;
 123	void *data = NULL;
 124
 125	if (ops->id && ops->size) {
 126		data = kzalloc(ops->size, GFP_KERNEL);
 127		if (!data)
 128			goto out;
 129
 130		err = net_assign_generic(net, *ops->id, data);
 131		if (err)
 132			goto cleanup;
 133	}
 134	err = 0;
 135	if (ops->init)
 136		err = ops->init(net);
 137	if (!err)
 138		return 0;
 139
 140	if (ops->id && ops->size) {
 141		ng = rcu_dereference_protected(net->gen,
 142					       lockdep_is_held(&pernet_ops_rwsem));
 143		ng->ptr[*ops->id] = NULL;
 144	}
 145
 146cleanup:
 147	kfree(data);
 148
 149out:
 150	return err;
 151}
 152
 153static void ops_pre_exit_list(const struct pernet_operations *ops,
 154			      struct list_head *net_exit_list)
 155{
 156	struct net *net;
 157
 158	if (ops->pre_exit) {
 159		list_for_each_entry(net, net_exit_list, exit_list)
 160			ops->pre_exit(net);
 161	}
 162}
 163
 164static void ops_exit_list(const struct pernet_operations *ops,
 165			  struct list_head *net_exit_list)
 166{
 167	struct net *net;
 168	if (ops->exit) {
 169		list_for_each_entry(net, net_exit_list, exit_list) {
 170			ops->exit(net);
 171			cond_resched();
 172		}
 173	}
 174	if (ops->exit_batch)
 175		ops->exit_batch(net_exit_list);
 176}
 177
 178static void ops_free_list(const struct pernet_operations *ops,
 179			  struct list_head *net_exit_list)
 180{
 181	struct net *net;
 182	if (ops->size && ops->id) {
 
 183		list_for_each_entry(net, net_exit_list, exit_list)
 184			kfree(net_generic(net, *ops->id));
 185	}
 186}
 187
 188/* should be called with nsid_lock held */
 189static int alloc_netid(struct net *net, struct net *peer, int reqid)
 190{
 191	int min = 0, max = 0;
 192
 193	if (reqid >= 0) {
 194		min = reqid;
 195		max = reqid + 1;
 196	}
 197
 198	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
 199}
 200
 201/* This function is used by idr_for_each(). If net is equal to peer, the
 202 * function returns the id so that idr_for_each() stops. Because we cannot
 203 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 204 * NET_ID_ZERO (-1) for it.
 205 */
 206#define NET_ID_ZERO -1
 207static int net_eq_idr(int id, void *net, void *peer)
 208{
 209	if (net_eq(net, peer))
 210		return id ? : NET_ID_ZERO;
 211	return 0;
 212}
 213
 214/* Must be called from RCU-critical section or with nsid_lock held */
 215static int __peernet2id(const struct net *net, struct net *peer)
 216{
 217	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
 218
 219	/* Magic value for id 0. */
 220	if (id == NET_ID_ZERO)
 221		return 0;
 222	if (id > 0)
 223		return id;
 224
 225	return NETNSA_NSID_NOT_ASSIGNED;
 226}
 227
 228static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
 229			      struct nlmsghdr *nlh, gfp_t gfp);
 230/* This function returns the id of a peer netns. If no id is assigned, one will
 231 * be allocated and returned.
 232 */
 233int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
 234{
 235	int id;
 236
 237	if (refcount_read(&net->ns.count) == 0)
 238		return NETNSA_NSID_NOT_ASSIGNED;
 239
 240	spin_lock_bh(&net->nsid_lock);
 241	id = __peernet2id(net, peer);
 242	if (id >= 0) {
 243		spin_unlock_bh(&net->nsid_lock);
 244		return id;
 245	}
 246
 247	/* When peer is obtained from RCU lists, we may race with
 248	 * its cleanup. Check whether it's alive, and this guarantees
 249	 * we never hash a peer back to net->netns_ids, after it has
 250	 * just been idr_remove()'d from there in cleanup_net().
 251	 */
 252	if (!maybe_get_net(peer)) {
 253		spin_unlock_bh(&net->nsid_lock);
 254		return NETNSA_NSID_NOT_ASSIGNED;
 255	}
 256
 257	id = alloc_netid(net, peer, -1);
 258	spin_unlock_bh(&net->nsid_lock);
 259
 260	put_net(peer);
 261	if (id < 0)
 262		return NETNSA_NSID_NOT_ASSIGNED;
 263
 264	rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
 265
 266	return id;
 267}
 268EXPORT_SYMBOL_GPL(peernet2id_alloc);
 269
 270/* This function returns, if assigned, the id of a peer netns. */
 271int peernet2id(const struct net *net, struct net *peer)
 272{
 273	int id;
 274
 275	rcu_read_lock();
 276	id = __peernet2id(net, peer);
 277	rcu_read_unlock();
 278
 279	return id;
 280}
 281EXPORT_SYMBOL(peernet2id);
 282
 283/* This function returns true is the peer netns has an id assigned into the
 284 * current netns.
 285 */
 286bool peernet_has_id(const struct net *net, struct net *peer)
 287{
 288	return peernet2id(net, peer) >= 0;
 289}
 290
 291struct net *get_net_ns_by_id(const struct net *net, int id)
 292{
 293	struct net *peer;
 294
 295	if (id < 0)
 296		return NULL;
 297
 298	rcu_read_lock();
 299	peer = idr_find(&net->netns_ids, id);
 300	if (peer)
 301		peer = maybe_get_net(peer);
 302	rcu_read_unlock();
 303
 304	return peer;
 305}
 306EXPORT_SYMBOL_GPL(get_net_ns_by_id);
 307
 
 
 
 
 
 
 
 
 
 
 
 308/* init code that must occur even if setup_net() is not called. */
 309static __net_init void preinit_net(struct net *net)
 310{
 
 
 
 311	ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312}
 313
 314/*
 315 * setup_net runs the initializers for the network namespace object.
 316 */
 317static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 318{
 319	/* Must be called with pernet_ops_rwsem held */
 320	const struct pernet_operations *ops, *saved_ops;
 
 
 321	int error = 0;
 322	LIST_HEAD(net_exit_list);
 323
 324	refcount_set(&net->ns.count, 1);
 325	ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
 326
 327	refcount_set(&net->passive, 1);
 328	get_random_bytes(&net->hash_mix, sizeof(u32));
 329	preempt_disable();
 330	net->net_cookie = gen_cookie_next(&net_cookie);
 331	preempt_enable();
 332	net->dev_base_seq = 1;
 333	net->user_ns = user_ns;
 334	idr_init(&net->netns_ids);
 335	spin_lock_init(&net->nsid_lock);
 336	mutex_init(&net->ipv4.ra_mutex);
 337
 338	list_for_each_entry(ops, &pernet_list, list) {
 339		error = ops_init(ops, net);
 340		if (error < 0)
 341			goto out_undo;
 342	}
 343	down_write(&net_rwsem);
 344	list_add_tail_rcu(&net->list, &net_namespace_list);
 345	up_write(&net_rwsem);
 346out:
 347	return error;
 348
 349out_undo:
 350	/* Walk through the list backwards calling the exit functions
 351	 * for the pernet modules whose init functions did not fail.
 352	 */
 353	list_add(&net->exit_list, &net_exit_list);
 354	saved_ops = ops;
 355	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 356		ops_pre_exit_list(ops, &net_exit_list);
 357
 358	synchronize_rcu();
 359
 360	ops = saved_ops;
 
 
 
 
 
 
 
 
 
 361	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 362		ops_exit_list(ops, &net_exit_list);
 363
 364	ops = saved_ops;
 365	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 366		ops_free_list(ops, &net_exit_list);
 367
 368	rcu_barrier();
 369	goto out;
 370}
 371
 372static int __net_init net_defaults_init_net(struct net *net)
 373{
 374	net->core.sysctl_somaxconn = SOMAXCONN;
 375	/* Limits per socket sk_omem_alloc usage.
 376	 * TCP zerocopy regular usage needs 128 KB.
 377	 */
 378	net->core.sysctl_optmem_max = 128 * 1024;
 379	net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
 380
 381	return 0;
 382}
 383
 384static struct pernet_operations net_defaults_ops = {
 385	.init = net_defaults_init_net,
 386};
 387
 388static __init int net_defaults_init(void)
 389{
 390	if (register_pernet_subsys(&net_defaults_ops))
 391		panic("Cannot initialize net default settings");
 392
 393	return 0;
 394}
 395
 396core_initcall(net_defaults_init);
 397
 398#ifdef CONFIG_NET_NS
 399static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
 400{
 401	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
 402}
 403
 404static void dec_net_namespaces(struct ucounts *ucounts)
 405{
 406	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
 407}
 408
 409static struct kmem_cache *net_cachep __ro_after_init;
 410static struct workqueue_struct *netns_wq;
 411
 412static struct net *net_alloc(void)
 413{
 414	struct net *net = NULL;
 415	struct net_generic *ng;
 416
 417	ng = net_alloc_generic();
 418	if (!ng)
 419		goto out;
 420
 421	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
 422	if (!net)
 423		goto out_free;
 424
 425#ifdef CONFIG_KEYS
 426	net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
 427	if (!net->key_domain)
 428		goto out_free_2;
 429	refcount_set(&net->key_domain->usage, 1);
 430#endif
 431
 432	rcu_assign_pointer(net->gen, ng);
 433out:
 434	return net;
 435
 436#ifdef CONFIG_KEYS
 437out_free_2:
 438	kmem_cache_free(net_cachep, net);
 439	net = NULL;
 440#endif
 441out_free:
 442	kfree(ng);
 443	goto out;
 444}
 445
 446static void net_free(struct net *net)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 447{
 448	if (refcount_dec_and_test(&net->passive)) {
 449		kfree(rcu_access_pointer(net->gen));
 450
 451		/* There should not be any trackers left there. */
 452		ref_tracker_dir_exit(&net->notrefcnt_tracker);
 453
 454		kmem_cache_free(net_cachep, net);
 
 455	}
 456}
 457
 458void net_drop_ns(void *p)
 459{
 460	struct net *net = (struct net *)p;
 461
 462	if (net)
 463		net_free(net);
 464}
 465
 466struct net *copy_net_ns(unsigned long flags,
 467			struct user_namespace *user_ns, struct net *old_net)
 468{
 469	struct ucounts *ucounts;
 470	struct net *net;
 471	int rv;
 472
 473	if (!(flags & CLONE_NEWNET))
 474		return get_net(old_net);
 475
 476	ucounts = inc_net_namespaces(user_ns);
 477	if (!ucounts)
 478		return ERR_PTR(-ENOSPC);
 479
 480	net = net_alloc();
 481	if (!net) {
 482		rv = -ENOMEM;
 483		goto dec_ucounts;
 484	}
 485
 486	preinit_net(net);
 487	refcount_set(&net->passive, 1);
 488	net->ucounts = ucounts;
 489	get_user_ns(user_ns);
 490
 491	rv = down_read_killable(&pernet_ops_rwsem);
 492	if (rv < 0)
 493		goto put_userns;
 494
 495	rv = setup_net(net, user_ns);
 496
 497	up_read(&pernet_ops_rwsem);
 498
 499	if (rv < 0) {
 500put_userns:
 501#ifdef CONFIG_KEYS
 502		key_remove_domain(net->key_domain);
 503#endif
 504		put_user_ns(user_ns);
 505		net_free(net);
 506dec_ucounts:
 507		dec_net_namespaces(ucounts);
 508		return ERR_PTR(rv);
 509	}
 510	return net;
 511}
 512
 513/**
 514 * net_ns_get_ownership - get sysfs ownership data for @net
 515 * @net: network namespace in question (can be NULL)
 516 * @uid: kernel user ID for sysfs objects
 517 * @gid: kernel group ID for sysfs objects
 518 *
 519 * Returns the uid/gid pair of root in the user namespace associated with the
 520 * given network namespace.
 521 */
 522void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
 523{
 524	if (net) {
 525		kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
 526		kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
 527
 528		if (uid_valid(ns_root_uid))
 529			*uid = ns_root_uid;
 530
 531		if (gid_valid(ns_root_gid))
 532			*gid = ns_root_gid;
 533	} else {
 534		*uid = GLOBAL_ROOT_UID;
 535		*gid = GLOBAL_ROOT_GID;
 536	}
 537}
 538EXPORT_SYMBOL_GPL(net_ns_get_ownership);
 539
 540static void unhash_nsid(struct net *net, struct net *last)
 541{
 542	struct net *tmp;
 543	/* This function is only called from cleanup_net() work,
 544	 * and this work is the only process, that may delete
 545	 * a net from net_namespace_list. So, when the below
 546	 * is executing, the list may only grow. Thus, we do not
 547	 * use for_each_net_rcu() or net_rwsem.
 548	 */
 549	for_each_net(tmp) {
 550		int id;
 551
 552		spin_lock_bh(&tmp->nsid_lock);
 553		id = __peernet2id(tmp, net);
 554		if (id >= 0)
 555			idr_remove(&tmp->netns_ids, id);
 556		spin_unlock_bh(&tmp->nsid_lock);
 557		if (id >= 0)
 558			rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
 559					  GFP_KERNEL);
 560		if (tmp == last)
 561			break;
 562	}
 563	spin_lock_bh(&net->nsid_lock);
 564	idr_destroy(&net->netns_ids);
 565	spin_unlock_bh(&net->nsid_lock);
 566}
 567
 568static LLIST_HEAD(cleanup_list);
 569
 570static void cleanup_net(struct work_struct *work)
 571{
 572	const struct pernet_operations *ops;
 573	struct net *net, *tmp, *last;
 574	struct llist_node *net_kill_list;
 575	LIST_HEAD(net_exit_list);
 
 576
 577	/* Atomically snapshot the list of namespaces to cleanup */
 578	net_kill_list = llist_del_all(&cleanup_list);
 579
 580	down_read(&pernet_ops_rwsem);
 581
 582	/* Don't let anyone else find us. */
 583	down_write(&net_rwsem);
 584	llist_for_each_entry(net, net_kill_list, cleanup_list)
 585		list_del_rcu(&net->list);
 586	/* Cache last net. After we unlock rtnl, no one new net
 587	 * added to net_namespace_list can assign nsid pointer
 588	 * to a net from net_kill_list (see peernet2id_alloc()).
 589	 * So, we skip them in unhash_nsid().
 590	 *
 591	 * Note, that unhash_nsid() does not delete nsid links
 592	 * between net_kill_list's nets, as they've already
 593	 * deleted from net_namespace_list. But, this would be
 594	 * useless anyway, as netns_ids are destroyed there.
 595	 */
 596	last = list_last_entry(&net_namespace_list, struct net, list);
 597	up_write(&net_rwsem);
 598
 599	llist_for_each_entry(net, net_kill_list, cleanup_list) {
 600		unhash_nsid(net, last);
 601		list_add_tail(&net->exit_list, &net_exit_list);
 602	}
 603
 604	/* Run all of the network namespace pre_exit methods */
 605	list_for_each_entry_reverse(ops, &pernet_list, list)
 606		ops_pre_exit_list(ops, &net_exit_list);
 607
 608	/*
 609	 * Another CPU might be rcu-iterating the list, wait for it.
 610	 * This needs to be before calling the exit() notifiers, so
 611	 * the rcu_barrier() below isn't sufficient alone.
 612	 * Also the pre_exit() and exit() methods need this barrier.
 613	 */
 614	synchronize_rcu();
 
 
 
 
 
 
 
 
 615
 616	/* Run all of the network namespace exit methods */
 617	list_for_each_entry_reverse(ops, &pernet_list, list)
 618		ops_exit_list(ops, &net_exit_list);
 619
 620	/* Free the net generic variables */
 621	list_for_each_entry_reverse(ops, &pernet_list, list)
 622		ops_free_list(ops, &net_exit_list);
 623
 624	up_read(&pernet_ops_rwsem);
 625
 626	/* Ensure there are no outstanding rcu callbacks using this
 627	 * network namespace.
 628	 */
 629	rcu_barrier();
 630
 
 
 631	/* Finally it is safe to free my network namespace structure */
 632	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
 633		list_del_init(&net->exit_list);
 634		dec_net_namespaces(net->ucounts);
 635#ifdef CONFIG_KEYS
 636		key_remove_domain(net->key_domain);
 637#endif
 638		put_user_ns(net->user_ns);
 639		net_free(net);
 640	}
 641}
 642
 643/**
 644 * net_ns_barrier - wait until concurrent net_cleanup_work is done
 645 *
 646 * cleanup_net runs from work queue and will first remove namespaces
 647 * from the global list, then run net exit functions.
 648 *
 649 * Call this in module exit path to make sure that all netns
 650 * ->exit ops have been invoked before the function is removed.
 651 */
 652void net_ns_barrier(void)
 653{
 654	down_write(&pernet_ops_rwsem);
 655	up_write(&pernet_ops_rwsem);
 656}
 657EXPORT_SYMBOL(net_ns_barrier);
 658
 659static DECLARE_WORK(net_cleanup_work, cleanup_net);
 660
 661void __put_net(struct net *net)
 662{
 663	ref_tracker_dir_exit(&net->refcnt_tracker);
 664	/* Cleanup the network namespace in process context */
 665	if (llist_add(&net->cleanup_list, &cleanup_list))
 666		queue_work(netns_wq, &net_cleanup_work);
 667}
 668EXPORT_SYMBOL_GPL(__put_net);
 669
 670/**
 671 * get_net_ns - increment the refcount of the network namespace
 672 * @ns: common namespace (net)
 673 *
 674 * Returns the net's common namespace.
 675 */
 676struct ns_common *get_net_ns(struct ns_common *ns)
 677{
 678	return &get_net(container_of(ns, struct net, ns))->ns;
 
 
 
 
 
 679}
 680EXPORT_SYMBOL_GPL(get_net_ns);
 681
 682struct net *get_net_ns_by_fd(int fd)
 683{
 684	struct fd f = fdget(fd);
 685	struct net *net = ERR_PTR(-EINVAL);
 686
 687	if (!f.file)
 688		return ERR_PTR(-EBADF);
 689
 690	if (proc_ns_file(f.file)) {
 691		struct ns_common *ns = get_proc_ns(file_inode(f.file));
 692		if (ns->ops == &netns_operations)
 693			net = get_net(container_of(ns, struct net, ns));
 694	}
 695	fdput(f);
 696
 697	return net;
 698}
 699EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
 700#endif
 701
 702struct net *get_net_ns_by_pid(pid_t pid)
 703{
 704	struct task_struct *tsk;
 705	struct net *net;
 706
 707	/* Lookup the network namespace */
 708	net = ERR_PTR(-ESRCH);
 709	rcu_read_lock();
 710	tsk = find_task_by_vpid(pid);
 711	if (tsk) {
 712		struct nsproxy *nsproxy;
 713		task_lock(tsk);
 714		nsproxy = tsk->nsproxy;
 715		if (nsproxy)
 716			net = get_net(nsproxy->net_ns);
 717		task_unlock(tsk);
 718	}
 719	rcu_read_unlock();
 720	return net;
 721}
 722EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
 723
 724static __net_init int net_ns_net_init(struct net *net)
 725{
 726#ifdef CONFIG_NET_NS
 727	net->ns.ops = &netns_operations;
 728#endif
 729	return ns_alloc_inum(&net->ns);
 730}
 731
 732static __net_exit void net_ns_net_exit(struct net *net)
 733{
 734	ns_free_inum(&net->ns);
 735}
 736
 737static struct pernet_operations __net_initdata net_ns_ops = {
 738	.init = net_ns_net_init,
 739	.exit = net_ns_net_exit,
 740};
 741
 742static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
 743	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
 744	[NETNSA_NSID]		= { .type = NLA_S32 },
 745	[NETNSA_PID]		= { .type = NLA_U32 },
 746	[NETNSA_FD]		= { .type = NLA_U32 },
 747	[NETNSA_TARGET_NSID]	= { .type = NLA_S32 },
 748};
 749
 750static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
 751			  struct netlink_ext_ack *extack)
 752{
 753	struct net *net = sock_net(skb->sk);
 754	struct nlattr *tb[NETNSA_MAX + 1];
 755	struct nlattr *nla;
 756	struct net *peer;
 757	int nsid, err;
 758
 759	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
 760				     NETNSA_MAX, rtnl_net_policy, extack);
 761	if (err < 0)
 762		return err;
 763	if (!tb[NETNSA_NSID]) {
 764		NL_SET_ERR_MSG(extack, "nsid is missing");
 765		return -EINVAL;
 766	}
 767	nsid = nla_get_s32(tb[NETNSA_NSID]);
 768
 769	if (tb[NETNSA_PID]) {
 770		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 771		nla = tb[NETNSA_PID];
 772	} else if (tb[NETNSA_FD]) {
 773		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 774		nla = tb[NETNSA_FD];
 775	} else {
 776		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
 777		return -EINVAL;
 778	}
 779	if (IS_ERR(peer)) {
 780		NL_SET_BAD_ATTR(extack, nla);
 781		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
 782		return PTR_ERR(peer);
 783	}
 784
 785	spin_lock_bh(&net->nsid_lock);
 786	if (__peernet2id(net, peer) >= 0) {
 787		spin_unlock_bh(&net->nsid_lock);
 788		err = -EEXIST;
 789		NL_SET_BAD_ATTR(extack, nla);
 790		NL_SET_ERR_MSG(extack,
 791			       "Peer netns already has a nsid assigned");
 792		goto out;
 793	}
 794
 795	err = alloc_netid(net, peer, nsid);
 796	spin_unlock_bh(&net->nsid_lock);
 797	if (err >= 0) {
 798		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
 799				  nlh, GFP_KERNEL);
 800		err = 0;
 801	} else if (err == -ENOSPC && nsid >= 0) {
 802		err = -EEXIST;
 803		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
 804		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
 805	}
 806out:
 807	put_net(peer);
 808	return err;
 809}
 810
 811static int rtnl_net_get_size(void)
 812{
 813	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
 814	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
 815	       + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
 816	       ;
 817}
 818
 819struct net_fill_args {
 820	u32 portid;
 821	u32 seq;
 822	int flags;
 823	int cmd;
 824	int nsid;
 825	bool add_ref;
 826	int ref_nsid;
 827};
 828
 829static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
 830{
 831	struct nlmsghdr *nlh;
 832	struct rtgenmsg *rth;
 833
 834	nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
 835			args->flags);
 836	if (!nlh)
 837		return -EMSGSIZE;
 838
 839	rth = nlmsg_data(nlh);
 840	rth->rtgen_family = AF_UNSPEC;
 841
 842	if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
 843		goto nla_put_failure;
 844
 845	if (args->add_ref &&
 846	    nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
 847		goto nla_put_failure;
 848
 849	nlmsg_end(skb, nlh);
 850	return 0;
 851
 852nla_put_failure:
 853	nlmsg_cancel(skb, nlh);
 854	return -EMSGSIZE;
 855}
 856
 857static int rtnl_net_valid_getid_req(struct sk_buff *skb,
 858				    const struct nlmsghdr *nlh,
 859				    struct nlattr **tb,
 860				    struct netlink_ext_ack *extack)
 861{
 862	int i, err;
 863
 864	if (!netlink_strict_get_check(skb))
 865		return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
 866					      tb, NETNSA_MAX, rtnl_net_policy,
 867					      extack);
 868
 869	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
 870					    NETNSA_MAX, rtnl_net_policy,
 871					    extack);
 872	if (err)
 873		return err;
 874
 875	for (i = 0; i <= NETNSA_MAX; i++) {
 876		if (!tb[i])
 877			continue;
 878
 879		switch (i) {
 880		case NETNSA_PID:
 881		case NETNSA_FD:
 882		case NETNSA_NSID:
 883		case NETNSA_TARGET_NSID:
 884			break;
 885		default:
 886			NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
 887			return -EINVAL;
 888		}
 889	}
 890
 891	return 0;
 892}
 893
 894static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
 895			  struct netlink_ext_ack *extack)
 896{
 897	struct net *net = sock_net(skb->sk);
 898	struct nlattr *tb[NETNSA_MAX + 1];
 899	struct net_fill_args fillargs = {
 900		.portid = NETLINK_CB(skb).portid,
 901		.seq = nlh->nlmsg_seq,
 902		.cmd = RTM_NEWNSID,
 903	};
 904	struct net *peer, *target = net;
 905	struct nlattr *nla;
 906	struct sk_buff *msg;
 907	int err;
 908
 909	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
 910	if (err < 0)
 911		return err;
 912	if (tb[NETNSA_PID]) {
 913		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 914		nla = tb[NETNSA_PID];
 915	} else if (tb[NETNSA_FD]) {
 916		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 917		nla = tb[NETNSA_FD];
 918	} else if (tb[NETNSA_NSID]) {
 919		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
 920		if (!peer)
 921			peer = ERR_PTR(-ENOENT);
 922		nla = tb[NETNSA_NSID];
 923	} else {
 924		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
 925		return -EINVAL;
 926	}
 927
 928	if (IS_ERR(peer)) {
 929		NL_SET_BAD_ATTR(extack, nla);
 930		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
 931		return PTR_ERR(peer);
 932	}
 933
 934	if (tb[NETNSA_TARGET_NSID]) {
 935		int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
 936
 937		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
 938		if (IS_ERR(target)) {
 939			NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
 940			NL_SET_ERR_MSG(extack,
 941				       "Target netns reference is invalid");
 942			err = PTR_ERR(target);
 943			goto out;
 944		}
 945		fillargs.add_ref = true;
 946		fillargs.ref_nsid = peernet2id(net, peer);
 947	}
 948
 949	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
 950	if (!msg) {
 951		err = -ENOMEM;
 952		goto out;
 953	}
 954
 955	fillargs.nsid = peernet2id(target, peer);
 956	err = rtnl_net_fill(msg, &fillargs);
 957	if (err < 0)
 958		goto err_out;
 959
 960	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
 961	goto out;
 962
 963err_out:
 964	nlmsg_free(msg);
 965out:
 966	if (fillargs.add_ref)
 967		put_net(target);
 968	put_net(peer);
 969	return err;
 970}
 971
 972struct rtnl_net_dump_cb {
 973	struct net *tgt_net;
 974	struct net *ref_net;
 975	struct sk_buff *skb;
 976	struct net_fill_args fillargs;
 977	int idx;
 978	int s_idx;
 979};
 980
 981/* Runs in RCU-critical section. */
 982static int rtnl_net_dumpid_one(int id, void *peer, void *data)
 983{
 984	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
 985	int ret;
 986
 987	if (net_cb->idx < net_cb->s_idx)
 988		goto cont;
 989
 990	net_cb->fillargs.nsid = id;
 991	if (net_cb->fillargs.add_ref)
 992		net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
 993	ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
 994	if (ret < 0)
 995		return ret;
 996
 997cont:
 998	net_cb->idx++;
 999	return 0;
1000}
1001
1002static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1003				   struct rtnl_net_dump_cb *net_cb,
1004				   struct netlink_callback *cb)
1005{
1006	struct netlink_ext_ack *extack = cb->extack;
1007	struct nlattr *tb[NETNSA_MAX + 1];
1008	int err, i;
1009
1010	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1011					    NETNSA_MAX, rtnl_net_policy,
1012					    extack);
1013	if (err < 0)
1014		return err;
1015
1016	for (i = 0; i <= NETNSA_MAX; i++) {
1017		if (!tb[i])
1018			continue;
1019
1020		if (i == NETNSA_TARGET_NSID) {
1021			struct net *net;
1022
1023			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1024			if (IS_ERR(net)) {
1025				NL_SET_BAD_ATTR(extack, tb[i]);
1026				NL_SET_ERR_MSG(extack,
1027					       "Invalid target network namespace id");
1028				return PTR_ERR(net);
1029			}
1030			net_cb->fillargs.add_ref = true;
1031			net_cb->ref_net = net_cb->tgt_net;
1032			net_cb->tgt_net = net;
1033		} else {
1034			NL_SET_BAD_ATTR(extack, tb[i]);
1035			NL_SET_ERR_MSG(extack,
1036				       "Unsupported attribute in dump request");
1037			return -EINVAL;
1038		}
1039	}
1040
1041	return 0;
1042}
1043
1044static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1045{
1046	struct rtnl_net_dump_cb net_cb = {
1047		.tgt_net = sock_net(skb->sk),
1048		.skb = skb,
1049		.fillargs = {
1050			.portid = NETLINK_CB(cb->skb).portid,
1051			.seq = cb->nlh->nlmsg_seq,
1052			.flags = NLM_F_MULTI,
1053			.cmd = RTM_NEWNSID,
1054		},
1055		.idx = 0,
1056		.s_idx = cb->args[0],
1057	};
1058	int err = 0;
1059
1060	if (cb->strict_check) {
1061		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1062		if (err < 0)
1063			goto end;
1064	}
1065
1066	rcu_read_lock();
1067	idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1068	rcu_read_unlock();
1069
1070	cb->args[0] = net_cb.idx;
1071end:
1072	if (net_cb.fillargs.add_ref)
1073		put_net(net_cb.tgt_net);
1074	return err < 0 ? err : skb->len;
1075}
1076
1077static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1078			      struct nlmsghdr *nlh, gfp_t gfp)
1079{
1080	struct net_fill_args fillargs = {
1081		.portid = portid,
1082		.seq = nlh ? nlh->nlmsg_seq : 0,
1083		.cmd = cmd,
1084		.nsid = id,
1085	};
1086	struct sk_buff *msg;
1087	int err = -ENOMEM;
1088
1089	msg = nlmsg_new(rtnl_net_get_size(), gfp);
1090	if (!msg)
1091		goto out;
1092
1093	err = rtnl_net_fill(msg, &fillargs);
1094	if (err < 0)
1095		goto err_out;
1096
1097	rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1098	return;
1099
1100err_out:
1101	nlmsg_free(msg);
1102out:
1103	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1104}
1105
1106#ifdef CONFIG_NET_NS
1107static void __init netns_ipv4_struct_check(void)
1108{
1109	/* TX readonly hotpath cache lines */
1110	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1111				      sysctl_tcp_early_retrans);
1112	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1113				      sysctl_tcp_tso_win_divisor);
1114	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1115				      sysctl_tcp_tso_rtt_log);
1116	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1117				      sysctl_tcp_autocorking);
1118	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1119				      sysctl_tcp_min_snd_mss);
1120	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1121				      sysctl_tcp_notsent_lowat);
1122	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1123				      sysctl_tcp_limit_output_bytes);
1124	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1125				      sysctl_tcp_min_rtt_wlen);
1126	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1127				      sysctl_tcp_wmem);
1128	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1129				      sysctl_ip_fwd_use_pmtu);
1130	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1131
1132	/* TXRX readonly hotpath cache lines */
1133	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1134				      sysctl_tcp_moderate_rcvbuf);
1135	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1136
1137	/* RX readonly hotpath cache line */
1138	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1139				      sysctl_ip_early_demux);
1140	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1141				      sysctl_tcp_early_demux);
1142	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
 
 
1143				      sysctl_tcp_reordering);
1144	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1145				      sysctl_tcp_rmem);
1146	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 18);
1147}
1148#endif
1149
 
 
 
 
 
 
 
 
1150void __init net_ns_init(void)
1151{
1152	struct net_generic *ng;
1153
1154#ifdef CONFIG_NET_NS
1155	netns_ipv4_struct_check();
1156	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1157					SMP_CACHE_BYTES,
1158					SLAB_PANIC|SLAB_ACCOUNT, NULL);
1159
1160	/* Create workqueue for cleanup */
1161	netns_wq = create_singlethread_workqueue("netns");
1162	if (!netns_wq)
1163		panic("Could not create netns workq");
1164#endif
1165
1166	ng = net_alloc_generic();
1167	if (!ng)
1168		panic("Could not allocate generic netns");
1169
1170	rcu_assign_pointer(init_net.gen, ng);
1171
1172#ifdef CONFIG_KEYS
1173	init_net.key_domain = &init_net_key_domain;
1174#endif
 
 
1175	down_write(&pernet_ops_rwsem);
1176	preinit_net(&init_net);
1177	if (setup_net(&init_net, &init_user_ns))
1178		panic("Could not setup the initial network namespace");
1179
1180	init_net_initialized = true;
1181	up_write(&pernet_ops_rwsem);
1182
1183	if (register_pernet_subsys(&net_ns_ops))
1184		panic("Could not register network namespace subsystems");
1185
1186	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
1187		      RTNL_FLAG_DOIT_UNLOCKED);
1188	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
1189		      RTNL_FLAG_DOIT_UNLOCKED);
1190}
1191
1192static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1193{
1194	ops_pre_exit_list(ops, net_exit_list);
1195	synchronize_rcu();
 
 
 
 
 
 
 
 
 
1196	ops_exit_list(ops, net_exit_list);
 
1197	ops_free_list(ops, net_exit_list);
1198}
1199
1200#ifdef CONFIG_NET_NS
1201static int __register_pernet_operations(struct list_head *list,
1202					struct pernet_operations *ops)
1203{
1204	struct net *net;
1205	int error;
1206	LIST_HEAD(net_exit_list);
1207
1208	list_add_tail(&ops->list, list);
1209	if (ops->init || (ops->id && ops->size)) {
1210		/* We held write locked pernet_ops_rwsem, and parallel
1211		 * setup_net() and cleanup_net() are not possible.
1212		 */
1213		for_each_net(net) {
1214			error = ops_init(ops, net);
1215			if (error)
1216				goto out_undo;
1217			list_add_tail(&net->exit_list, &net_exit_list);
1218		}
1219	}
1220	return 0;
1221
1222out_undo:
1223	/* If I have an error cleanup all namespaces I initialized */
1224	list_del(&ops->list);
1225	free_exit_list(ops, &net_exit_list);
1226	return error;
1227}
1228
1229static void __unregister_pernet_operations(struct pernet_operations *ops)
1230{
1231	struct net *net;
1232	LIST_HEAD(net_exit_list);
1233
1234	list_del(&ops->list);
1235	/* See comment in __register_pernet_operations() */
1236	for_each_net(net)
1237		list_add_tail(&net->exit_list, &net_exit_list);
1238
1239	free_exit_list(ops, &net_exit_list);
1240}
1241
1242#else
1243
1244static int __register_pernet_operations(struct list_head *list,
1245					struct pernet_operations *ops)
1246{
1247	if (!init_net_initialized) {
1248		list_add_tail(&ops->list, list);
1249		return 0;
1250	}
1251
1252	return ops_init(ops, &init_net);
1253}
1254
1255static void __unregister_pernet_operations(struct pernet_operations *ops)
1256{
1257	if (!init_net_initialized) {
1258		list_del(&ops->list);
1259	} else {
1260		LIST_HEAD(net_exit_list);
1261		list_add(&init_net.exit_list, &net_exit_list);
1262		free_exit_list(ops, &net_exit_list);
1263	}
1264}
1265
1266#endif /* CONFIG_NET_NS */
1267
1268static DEFINE_IDA(net_generic_ids);
1269
1270static int register_pernet_operations(struct list_head *list,
1271				      struct pernet_operations *ops)
1272{
1273	int error;
1274
 
 
 
1275	if (ops->id) {
1276		error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1277				GFP_KERNEL);
1278		if (error < 0)
1279			return error;
1280		*ops->id = error;
1281		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
 
 
 
 
1282	}
1283	error = __register_pernet_operations(list, ops);
1284	if (error) {
1285		rcu_barrier();
1286		if (ops->id)
1287			ida_free(&net_generic_ids, *ops->id);
1288	}
1289
1290	return error;
1291}
1292
1293static void unregister_pernet_operations(struct pernet_operations *ops)
1294{
1295	__unregister_pernet_operations(ops);
1296	rcu_barrier();
1297	if (ops->id)
1298		ida_free(&net_generic_ids, *ops->id);
1299}
1300
1301/**
1302 *      register_pernet_subsys - register a network namespace subsystem
1303 *	@ops:  pernet operations structure for the subsystem
1304 *
1305 *	Register a subsystem which has init and exit functions
1306 *	that are called when network namespaces are created and
1307 *	destroyed respectively.
1308 *
1309 *	When registered all network namespace init functions are
1310 *	called for every existing network namespace.  Allowing kernel
1311 *	modules to have a race free view of the set of network namespaces.
1312 *
1313 *	When a new network namespace is created all of the init
1314 *	methods are called in the order in which they were registered.
1315 *
1316 *	When a network namespace is destroyed all of the exit methods
1317 *	are called in the reverse of the order with which they were
1318 *	registered.
1319 */
1320int register_pernet_subsys(struct pernet_operations *ops)
1321{
1322	int error;
1323	down_write(&pernet_ops_rwsem);
1324	error =  register_pernet_operations(first_device, ops);
1325	up_write(&pernet_ops_rwsem);
1326	return error;
1327}
1328EXPORT_SYMBOL_GPL(register_pernet_subsys);
1329
1330/**
1331 *      unregister_pernet_subsys - unregister a network namespace subsystem
1332 *	@ops: pernet operations structure to manipulate
1333 *
1334 *	Remove the pernet operations structure from the list to be
1335 *	used when network namespaces are created or destroyed.  In
1336 *	addition run the exit method for all existing network
1337 *	namespaces.
1338 */
1339void unregister_pernet_subsys(struct pernet_operations *ops)
1340{
1341	down_write(&pernet_ops_rwsem);
1342	unregister_pernet_operations(ops);
1343	up_write(&pernet_ops_rwsem);
1344}
1345EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1346
1347/**
1348 *      register_pernet_device - register a network namespace device
1349 *	@ops:  pernet operations structure for the subsystem
1350 *
1351 *	Register a device which has init and exit functions
1352 *	that are called when network namespaces are created and
1353 *	destroyed respectively.
1354 *
1355 *	When registered all network namespace init functions are
1356 *	called for every existing network namespace.  Allowing kernel
1357 *	modules to have a race free view of the set of network namespaces.
1358 *
1359 *	When a new network namespace is created all of the init
1360 *	methods are called in the order in which they were registered.
1361 *
1362 *	When a network namespace is destroyed all of the exit methods
1363 *	are called in the reverse of the order with which they were
1364 *	registered.
1365 */
1366int register_pernet_device(struct pernet_operations *ops)
1367{
1368	int error;
1369	down_write(&pernet_ops_rwsem);
1370	error = register_pernet_operations(&pernet_list, ops);
1371	if (!error && (first_device == &pernet_list))
1372		first_device = &ops->list;
1373	up_write(&pernet_ops_rwsem);
1374	return error;
1375}
1376EXPORT_SYMBOL_GPL(register_pernet_device);
1377
1378/**
1379 *      unregister_pernet_device - unregister a network namespace netdevice
1380 *	@ops: pernet operations structure to manipulate
1381 *
1382 *	Remove the pernet operations structure from the list to be
1383 *	used when network namespaces are created or destroyed.  In
1384 *	addition run the exit method for all existing network
1385 *	namespaces.
1386 */
1387void unregister_pernet_device(struct pernet_operations *ops)
1388{
1389	down_write(&pernet_ops_rwsem);
1390	if (&ops->list == first_device)
1391		first_device = first_device->next;
1392	unregister_pernet_operations(ops);
1393	up_write(&pernet_ops_rwsem);
1394}
1395EXPORT_SYMBOL_GPL(unregister_pernet_device);
1396
1397#ifdef CONFIG_NET_NS
1398static struct ns_common *netns_get(struct task_struct *task)
1399{
1400	struct net *net = NULL;
1401	struct nsproxy *nsproxy;
1402
1403	task_lock(task);
1404	nsproxy = task->nsproxy;
1405	if (nsproxy)
1406		net = get_net(nsproxy->net_ns);
1407	task_unlock(task);
1408
1409	return net ? &net->ns : NULL;
1410}
1411
1412static inline struct net *to_net_ns(struct ns_common *ns)
1413{
1414	return container_of(ns, struct net, ns);
1415}
1416
1417static void netns_put(struct ns_common *ns)
1418{
1419	put_net(to_net_ns(ns));
1420}
1421
1422static int netns_install(struct nsset *nsset, struct ns_common *ns)
1423{
1424	struct nsproxy *nsproxy = nsset->nsproxy;
1425	struct net *net = to_net_ns(ns);
1426
1427	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1428	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1429		return -EPERM;
1430
1431	put_net(nsproxy->net_ns);
1432	nsproxy->net_ns = get_net(net);
1433	return 0;
1434}
1435
1436static struct user_namespace *netns_owner(struct ns_common *ns)
1437{
1438	return to_net_ns(ns)->user_ns;
1439}
1440
1441const struct proc_ns_operations netns_operations = {
1442	.name		= "net",
1443	.type		= CLONE_NEWNET,
1444	.get		= netns_get,
1445	.put		= netns_put,
1446	.install	= netns_install,
1447	.owner		= netns_owner,
1448};
1449#endif