Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/workqueue.h>
   5#include <linux/rtnetlink.h>
   6#include <linux/cache.h>
   7#include <linux/slab.h>
   8#include <linux/list.h>
   9#include <linux/delay.h>
  10#include <linux/sched.h>
  11#include <linux/idr.h>
  12#include <linux/rculist.h>
  13#include <linux/nsproxy.h>
  14#include <linux/fs.h>
  15#include <linux/proc_ns.h>
  16#include <linux/file.h>
  17#include <linux/export.h>
  18#include <linux/user_namespace.h>
  19#include <linux/net_namespace.h>
  20#include <linux/sched/task.h>
  21#include <linux/uidgid.h>
  22#include <linux/cookie.h>
  23#include <linux/proc_fs.h>
  24
  25#include <net/sock.h>
  26#include <net/netlink.h>
  27#include <net/net_namespace.h>
  28#include <net/netns/generic.h>
  29
  30/*
  31 *	Our network namespace constructor/destructor lists
  32 */
  33
  34static LIST_HEAD(pernet_list);
  35static struct list_head *first_device = &pernet_list;
 
  36
  37LIST_HEAD(net_namespace_list);
  38EXPORT_SYMBOL_GPL(net_namespace_list);
  39
  40/* Protects net_namespace_list. Nests iside rtnl_lock() */
  41DECLARE_RWSEM(net_rwsem);
  42EXPORT_SYMBOL_GPL(net_rwsem);
  43
  44#ifdef CONFIG_KEYS
  45static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
  46#endif
  47
  48struct net init_net;
  49EXPORT_SYMBOL(init_net);
  50
  51static bool init_net_initialized;
  52/*
  53 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
  54 * init_net_initialized and first_device pointer.
  55 * This is internal net namespace object. Please, don't use it
  56 * outside.
  57 */
  58DECLARE_RWSEM(pernet_ops_rwsem);
  59EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
  60
  61#define MIN_PERNET_OPS_ID	\
  62	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
  63
  64#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
  65
  66static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
  67
  68DEFINE_COOKIE(net_cookie);
  69
  70static struct net_generic *net_alloc_generic(void)
  71{
  72	struct net_generic *ng;
  73	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
  74
  75	ng = kzalloc(generic_size, GFP_KERNEL);
  76	if (ng)
  77		ng->s.len = max_gen_ptrs;
  78
  79	return ng;
  80}
  81
  82static int net_assign_generic(struct net *net, unsigned int id, void *data)
  83{
  84	struct net_generic *ng, *old_ng;
  85
 
  86	BUG_ON(id < MIN_PERNET_OPS_ID);
  87
  88	old_ng = rcu_dereference_protected(net->gen,
  89					   lockdep_is_held(&pernet_ops_rwsem));
  90	if (old_ng->s.len > id) {
  91		old_ng->ptr[id] = data;
  92		return 0;
  93	}
  94
  95	ng = net_alloc_generic();
  96	if (!ng)
  97		return -ENOMEM;
  98
  99	/*
 100	 * Some synchronisation notes:
 101	 *
 102	 * The net_generic explores the net->gen array inside rcu
 103	 * read section. Besides once set the net->gen->ptr[x]
 104	 * pointer never changes (see rules in netns/generic.h).
 105	 *
 106	 * That said, we simply duplicate this array and schedule
 107	 * the old copy for kfree after a grace period.
 108	 */
 109
 110	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
 111	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
 112	ng->ptr[id] = data;
 113
 114	rcu_assign_pointer(net->gen, ng);
 115	kfree_rcu(old_ng, s.rcu);
 116	return 0;
 117}
 118
 119static int ops_init(const struct pernet_operations *ops, struct net *net)
 120{
 121	struct net_generic *ng;
 122	int err = -ENOMEM;
 123	void *data = NULL;
 124
 125	if (ops->id && ops->size) {
 126		data = kzalloc(ops->size, GFP_KERNEL);
 127		if (!data)
 128			goto out;
 129
 130		err = net_assign_generic(net, *ops->id, data);
 131		if (err)
 132			goto cleanup;
 133	}
 134	err = 0;
 135	if (ops->init)
 136		err = ops->init(net);
 137	if (!err)
 138		return 0;
 139
 140	if (ops->id && ops->size) {
 141		ng = rcu_dereference_protected(net->gen,
 142					       lockdep_is_held(&pernet_ops_rwsem));
 143		ng->ptr[*ops->id] = NULL;
 144	}
 145
 146cleanup:
 147	kfree(data);
 148
 149out:
 150	return err;
 151}
 152
 153static void ops_pre_exit_list(const struct pernet_operations *ops,
 154			      struct list_head *net_exit_list)
 155{
 156	struct net *net;
 157
 158	if (ops->pre_exit) {
 159		list_for_each_entry(net, net_exit_list, exit_list)
 160			ops->pre_exit(net);
 161	}
 162}
 163
 164static void ops_exit_list(const struct pernet_operations *ops,
 165			  struct list_head *net_exit_list)
 166{
 167	struct net *net;
 168	if (ops->exit) {
 169		list_for_each_entry(net, net_exit_list, exit_list) {
 170			ops->exit(net);
 171			cond_resched();
 172		}
 173	}
 174	if (ops->exit_batch)
 175		ops->exit_batch(net_exit_list);
 176}
 177
 178static void ops_free_list(const struct pernet_operations *ops,
 179			  struct list_head *net_exit_list)
 180{
 181	struct net *net;
 182	if (ops->size && ops->id) {
 183		list_for_each_entry(net, net_exit_list, exit_list)
 184			kfree(net_generic(net, *ops->id));
 185	}
 186}
 187
 188/* should be called with nsid_lock held */
 189static int alloc_netid(struct net *net, struct net *peer, int reqid)
 190{
 191	int min = 0, max = 0;
 192
 193	if (reqid >= 0) {
 194		min = reqid;
 195		max = reqid + 1;
 196	}
 197
 198	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
 199}
 200
 201/* This function is used by idr_for_each(). If net is equal to peer, the
 202 * function returns the id so that idr_for_each() stops. Because we cannot
 203 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 204 * NET_ID_ZERO (-1) for it.
 205 */
 206#define NET_ID_ZERO -1
 207static int net_eq_idr(int id, void *net, void *peer)
 208{
 209	if (net_eq(net, peer))
 210		return id ? : NET_ID_ZERO;
 211	return 0;
 212}
 213
 214/* Must be called from RCU-critical section or with nsid_lock held */
 215static int __peernet2id(const struct net *net, struct net *peer)
 
 
 
 216{
 217	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
 
 
 
 218
 219	/* Magic value for id 0. */
 220	if (id == NET_ID_ZERO)
 221		return 0;
 222	if (id > 0)
 223		return id;
 224
 
 
 
 
 
 
 225	return NETNSA_NSID_NOT_ASSIGNED;
 226}
 227
 228static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
 229			      struct nlmsghdr *nlh, gfp_t gfp);
 
 
 
 
 
 
 
 230/* This function returns the id of a peer netns. If no id is assigned, one will
 231 * be allocated and returned.
 232 */
 233int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
 234{
 
 235	int id;
 236
 237	if (refcount_read(&net->ns.count) == 0)
 238		return NETNSA_NSID_NOT_ASSIGNED;
 239
 240	spin_lock_bh(&net->nsid_lock);
 241	id = __peernet2id(net, peer);
 242	if (id >= 0) {
 243		spin_unlock_bh(&net->nsid_lock);
 244		return id;
 245	}
 246
 247	/* When peer is obtained from RCU lists, we may race with
 248	 * its cleanup. Check whether it's alive, and this guarantees
 249	 * we never hash a peer back to net->netns_ids, after it has
 250	 * just been idr_remove()'d from there in cleanup_net().
 251	 */
 252	if (!maybe_get_net(peer)) {
 253		spin_unlock_bh(&net->nsid_lock);
 254		return NETNSA_NSID_NOT_ASSIGNED;
 255	}
 256
 257	id = alloc_netid(net, peer, -1);
 258	spin_unlock_bh(&net->nsid_lock);
 259
 260	put_net(peer);
 261	if (id < 0)
 262		return NETNSA_NSID_NOT_ASSIGNED;
 263
 264	rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
 265
 266	return id;
 267}
 268EXPORT_SYMBOL_GPL(peernet2id_alloc);
 269
 270/* This function returns, if assigned, the id of a peer netns. */
 271int peernet2id(const struct net *net, struct net *peer)
 272{
 273	int id;
 274
 275	rcu_read_lock();
 276	id = __peernet2id(net, peer);
 277	rcu_read_unlock();
 278
 279	return id;
 280}
 281EXPORT_SYMBOL(peernet2id);
 282
 283/* This function returns true is the peer netns has an id assigned into the
 284 * current netns.
 285 */
 286bool peernet_has_id(const struct net *net, struct net *peer)
 287{
 288	return peernet2id(net, peer) >= 0;
 289}
 290
 291struct net *get_net_ns_by_id(const struct net *net, int id)
 292{
 293	struct net *peer;
 294
 295	if (id < 0)
 296		return NULL;
 297
 298	rcu_read_lock();
 
 299	peer = idr_find(&net->netns_ids, id);
 300	if (peer)
 301		peer = maybe_get_net(peer);
 
 302	rcu_read_unlock();
 303
 304	return peer;
 305}
 306EXPORT_SYMBOL_GPL(get_net_ns_by_id);
 307
 308/* init code that must occur even if setup_net() is not called. */
 309static __net_init void preinit_net(struct net *net)
 310{
 311	ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
 312}
 313
 314/*
 315 * setup_net runs the initializers for the network namespace object.
 316 */
 317static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 318{
 319	/* Must be called with pernet_ops_rwsem held */
 320	const struct pernet_operations *ops, *saved_ops;
 321	int error = 0;
 322	LIST_HEAD(net_exit_list);
 323
 324	refcount_set(&net->ns.count, 1);
 325	ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
 326
 327	refcount_set(&net->passive, 1);
 328	get_random_bytes(&net->hash_mix, sizeof(u32));
 329	preempt_disable();
 330	net->net_cookie = gen_cookie_next(&net_cookie);
 331	preempt_enable();
 332	net->dev_base_seq = 1;
 333	net->user_ns = user_ns;
 334	idr_init(&net->netns_ids);
 335	spin_lock_init(&net->nsid_lock);
 336	mutex_init(&net->ipv4.ra_mutex);
 337
 338	list_for_each_entry(ops, &pernet_list, list) {
 339		error = ops_init(ops, net);
 340		if (error < 0)
 341			goto out_undo;
 342	}
 343	down_write(&net_rwsem);
 344	list_add_tail_rcu(&net->list, &net_namespace_list);
 345	up_write(&net_rwsem);
 346out:
 347	return error;
 348
 349out_undo:
 350	/* Walk through the list backwards calling the exit functions
 351	 * for the pernet modules whose init functions did not fail.
 352	 */
 353	list_add(&net->exit_list, &net_exit_list);
 354	saved_ops = ops;
 355	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 356		ops_pre_exit_list(ops, &net_exit_list);
 357
 358	synchronize_rcu();
 359
 360	ops = saved_ops;
 361	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 362		ops_exit_list(ops, &net_exit_list);
 363
 364	ops = saved_ops;
 365	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 366		ops_free_list(ops, &net_exit_list);
 367
 368	rcu_barrier();
 369	goto out;
 370}
 371
 372static int __net_init net_defaults_init_net(struct net *net)
 373{
 374	net->core.sysctl_somaxconn = SOMAXCONN;
 375	/* Limits per socket sk_omem_alloc usage.
 376	 * TCP zerocopy regular usage needs 128 KB.
 377	 */
 378	net->core.sysctl_optmem_max = 128 * 1024;
 379	net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
 380
 381	return 0;
 382}
 383
 384static struct pernet_operations net_defaults_ops = {
 385	.init = net_defaults_init_net,
 386};
 387
 388static __init int net_defaults_init(void)
 389{
 390	if (register_pernet_subsys(&net_defaults_ops))
 391		panic("Cannot initialize net default settings");
 392
 393	return 0;
 394}
 395
 396core_initcall(net_defaults_init);
 397
 398#ifdef CONFIG_NET_NS
 399static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
 400{
 401	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
 402}
 403
 404static void dec_net_namespaces(struct ucounts *ucounts)
 405{
 406	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
 407}
 408
 409static struct kmem_cache *net_cachep __ro_after_init;
 410static struct workqueue_struct *netns_wq;
 411
 412static struct net *net_alloc(void)
 413{
 414	struct net *net = NULL;
 415	struct net_generic *ng;
 416
 417	ng = net_alloc_generic();
 418	if (!ng)
 419		goto out;
 420
 421	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
 422	if (!net)
 423		goto out_free;
 424
 425#ifdef CONFIG_KEYS
 426	net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
 427	if (!net->key_domain)
 428		goto out_free_2;
 429	refcount_set(&net->key_domain->usage, 1);
 430#endif
 431
 432	rcu_assign_pointer(net->gen, ng);
 433out:
 434	return net;
 435
 436#ifdef CONFIG_KEYS
 437out_free_2:
 438	kmem_cache_free(net_cachep, net);
 439	net = NULL;
 440#endif
 441out_free:
 442	kfree(ng);
 443	goto out;
 444}
 445
 446static void net_free(struct net *net)
 447{
 448	if (refcount_dec_and_test(&net->passive)) {
 449		kfree(rcu_access_pointer(net->gen));
 450
 451		/* There should not be any trackers left there. */
 452		ref_tracker_dir_exit(&net->notrefcnt_tracker);
 453
 454		kmem_cache_free(net_cachep, net);
 455	}
 456}
 457
 458void net_drop_ns(void *p)
 459{
 460	struct net *net = (struct net *)p;
 461
 462	if (net)
 463		net_free(net);
 464}
 465
 466struct net *copy_net_ns(unsigned long flags,
 467			struct user_namespace *user_ns, struct net *old_net)
 468{
 469	struct ucounts *ucounts;
 470	struct net *net;
 471	int rv;
 472
 473	if (!(flags & CLONE_NEWNET))
 474		return get_net(old_net);
 475
 476	ucounts = inc_net_namespaces(user_ns);
 477	if (!ucounts)
 478		return ERR_PTR(-ENOSPC);
 479
 480	net = net_alloc();
 481	if (!net) {
 482		rv = -ENOMEM;
 483		goto dec_ucounts;
 484	}
 485
 486	preinit_net(net);
 487	refcount_set(&net->passive, 1);
 488	net->ucounts = ucounts;
 489	get_user_ns(user_ns);
 490
 491	rv = down_read_killable(&pernet_ops_rwsem);
 492	if (rv < 0)
 493		goto put_userns;
 494
 495	rv = setup_net(net, user_ns);
 496
 497	up_read(&pernet_ops_rwsem);
 498
 499	if (rv < 0) {
 500put_userns:
 501#ifdef CONFIG_KEYS
 502		key_remove_domain(net->key_domain);
 503#endif
 504		put_user_ns(user_ns);
 505		net_free(net);
 506dec_ucounts:
 507		dec_net_namespaces(ucounts);
 
 508		return ERR_PTR(rv);
 509	}
 510	return net;
 511}
 512
 513/**
 514 * net_ns_get_ownership - get sysfs ownership data for @net
 515 * @net: network namespace in question (can be NULL)
 516 * @uid: kernel user ID for sysfs objects
 517 * @gid: kernel group ID for sysfs objects
 518 *
 519 * Returns the uid/gid pair of root in the user namespace associated with the
 520 * given network namespace.
 521 */
 522void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
 523{
 524	if (net) {
 525		kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
 526		kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
 527
 528		if (uid_valid(ns_root_uid))
 529			*uid = ns_root_uid;
 530
 531		if (gid_valid(ns_root_gid))
 532			*gid = ns_root_gid;
 533	} else {
 534		*uid = GLOBAL_ROOT_UID;
 535		*gid = GLOBAL_ROOT_GID;
 536	}
 537}
 538EXPORT_SYMBOL_GPL(net_ns_get_ownership);
 539
 540static void unhash_nsid(struct net *net, struct net *last)
 541{
 542	struct net *tmp;
 543	/* This function is only called from cleanup_net() work,
 544	 * and this work is the only process, that may delete
 545	 * a net from net_namespace_list. So, when the below
 546	 * is executing, the list may only grow. Thus, we do not
 547	 * use for_each_net_rcu() or net_rwsem.
 548	 */
 549	for_each_net(tmp) {
 550		int id;
 551
 552		spin_lock_bh(&tmp->nsid_lock);
 553		id = __peernet2id(tmp, net);
 554		if (id >= 0)
 555			idr_remove(&tmp->netns_ids, id);
 556		spin_unlock_bh(&tmp->nsid_lock);
 557		if (id >= 0)
 558			rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
 559					  GFP_KERNEL);
 560		if (tmp == last)
 561			break;
 562	}
 563	spin_lock_bh(&net->nsid_lock);
 564	idr_destroy(&net->netns_ids);
 565	spin_unlock_bh(&net->nsid_lock);
 566}
 567
 568static LLIST_HEAD(cleanup_list);
 
 569
 570static void cleanup_net(struct work_struct *work)
 571{
 572	const struct pernet_operations *ops;
 573	struct net *net, *tmp, *last;
 574	struct llist_node *net_kill_list;
 575	LIST_HEAD(net_exit_list);
 576
 577	/* Atomically snapshot the list of namespaces to cleanup */
 578	net_kill_list = llist_del_all(&cleanup_list);
 
 
 579
 580	down_read(&pernet_ops_rwsem);
 581
 582	/* Don't let anyone else find us. */
 583	down_write(&net_rwsem);
 584	llist_for_each_entry(net, net_kill_list, cleanup_list)
 585		list_del_rcu(&net->list);
 586	/* Cache last net. After we unlock rtnl, no one new net
 587	 * added to net_namespace_list can assign nsid pointer
 588	 * to a net from net_kill_list (see peernet2id_alloc()).
 589	 * So, we skip them in unhash_nsid().
 590	 *
 591	 * Note, that unhash_nsid() does not delete nsid links
 592	 * between net_kill_list's nets, as they've already
 593	 * deleted from net_namespace_list. But, this would be
 594	 * useless anyway, as netns_ids are destroyed there.
 595	 */
 596	last = list_last_entry(&net_namespace_list, struct net, list);
 597	up_write(&net_rwsem);
 598
 599	llist_for_each_entry(net, net_kill_list, cleanup_list) {
 600		unhash_nsid(net, last);
 601		list_add_tail(&net->exit_list, &net_exit_list);
 602	}
 
 603
 604	/* Run all of the network namespace pre_exit methods */
 605	list_for_each_entry_reverse(ops, &pernet_list, list)
 606		ops_pre_exit_list(ops, &net_exit_list);
 
 
 
 
 
 
 
 
 
 
 
 607
 608	/*
 609	 * Another CPU might be rcu-iterating the list, wait for it.
 610	 * This needs to be before calling the exit() notifiers, so
 611	 * the rcu_barrier() below isn't sufficient alone.
 612	 * Also the pre_exit() and exit() methods need this barrier.
 613	 */
 614	synchronize_rcu();
 615
 616	/* Run all of the network namespace exit methods */
 617	list_for_each_entry_reverse(ops, &pernet_list, list)
 618		ops_exit_list(ops, &net_exit_list);
 619
 620	/* Free the net generic variables */
 621	list_for_each_entry_reverse(ops, &pernet_list, list)
 622		ops_free_list(ops, &net_exit_list);
 623
 624	up_read(&pernet_ops_rwsem);
 625
 626	/* Ensure there are no outstanding rcu callbacks using this
 627	 * network namespace.
 628	 */
 629	rcu_barrier();
 630
 631	/* Finally it is safe to free my network namespace structure */
 632	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
 633		list_del_init(&net->exit_list);
 634		dec_net_namespaces(net->ucounts);
 635#ifdef CONFIG_KEYS
 636		key_remove_domain(net->key_domain);
 637#endif
 638		put_user_ns(net->user_ns);
 639		net_free(net);
 640	}
 641}
 642
 643/**
 644 * net_ns_barrier - wait until concurrent net_cleanup_work is done
 645 *
 646 * cleanup_net runs from work queue and will first remove namespaces
 647 * from the global list, then run net exit functions.
 648 *
 649 * Call this in module exit path to make sure that all netns
 650 * ->exit ops have been invoked before the function is removed.
 651 */
 652void net_ns_barrier(void)
 653{
 654	down_write(&pernet_ops_rwsem);
 655	up_write(&pernet_ops_rwsem);
 656}
 657EXPORT_SYMBOL(net_ns_barrier);
 658
 659static DECLARE_WORK(net_cleanup_work, cleanup_net);
 660
 661void __put_net(struct net *net)
 662{
 663	ref_tracker_dir_exit(&net->refcnt_tracker);
 664	/* Cleanup the network namespace in process context */
 665	if (llist_add(&net->cleanup_list, &cleanup_list))
 666		queue_work(netns_wq, &net_cleanup_work);
 667}
 668EXPORT_SYMBOL_GPL(__put_net);
 669
 670/**
 671 * get_net_ns - increment the refcount of the network namespace
 672 * @ns: common namespace (net)
 673 *
 674 * Returns the net's common namespace.
 675 */
 676struct ns_common *get_net_ns(struct ns_common *ns)
 677{
 678	return &get_net(container_of(ns, struct net, ns))->ns;
 679}
 680EXPORT_SYMBOL_GPL(get_net_ns);
 681
 682struct net *get_net_ns_by_fd(int fd)
 683{
 684	struct fd f = fdget(fd);
 685	struct net *net = ERR_PTR(-EINVAL);
 686
 687	if (!f.file)
 688		return ERR_PTR(-EBADF);
 689
 690	if (proc_ns_file(f.file)) {
 691		struct ns_common *ns = get_proc_ns(file_inode(f.file));
 692		if (ns->ops == &netns_operations)
 693			net = get_net(container_of(ns, struct net, ns));
 694	}
 695	fdput(f);
 
 
 
 696
 
 697	return net;
 698}
 699EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
 
 
 
 
 
 700#endif
 
 701
 702struct net *get_net_ns_by_pid(pid_t pid)
 703{
 704	struct task_struct *tsk;
 705	struct net *net;
 706
 707	/* Lookup the network namespace */
 708	net = ERR_PTR(-ESRCH);
 709	rcu_read_lock();
 710	tsk = find_task_by_vpid(pid);
 711	if (tsk) {
 712		struct nsproxy *nsproxy;
 713		task_lock(tsk);
 714		nsproxy = tsk->nsproxy;
 715		if (nsproxy)
 716			net = get_net(nsproxy->net_ns);
 717		task_unlock(tsk);
 718	}
 719	rcu_read_unlock();
 720	return net;
 721}
 722EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
 723
 724static __net_init int net_ns_net_init(struct net *net)
 725{
 726#ifdef CONFIG_NET_NS
 727	net->ns.ops = &netns_operations;
 728#endif
 729	return ns_alloc_inum(&net->ns);
 730}
 731
 732static __net_exit void net_ns_net_exit(struct net *net)
 733{
 734	ns_free_inum(&net->ns);
 735}
 736
 737static struct pernet_operations __net_initdata net_ns_ops = {
 738	.init = net_ns_net_init,
 739	.exit = net_ns_net_exit,
 740};
 741
 742static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
 743	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
 744	[NETNSA_NSID]		= { .type = NLA_S32 },
 745	[NETNSA_PID]		= { .type = NLA_U32 },
 746	[NETNSA_FD]		= { .type = NLA_U32 },
 747	[NETNSA_TARGET_NSID]	= { .type = NLA_S32 },
 748};
 749
 750static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
 751			  struct netlink_ext_ack *extack)
 752{
 753	struct net *net = sock_net(skb->sk);
 754	struct nlattr *tb[NETNSA_MAX + 1];
 755	struct nlattr *nla;
 756	struct net *peer;
 757	int nsid, err;
 758
 759	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
 760				     NETNSA_MAX, rtnl_net_policy, extack);
 761	if (err < 0)
 762		return err;
 763	if (!tb[NETNSA_NSID]) {
 764		NL_SET_ERR_MSG(extack, "nsid is missing");
 765		return -EINVAL;
 766	}
 767	nsid = nla_get_s32(tb[NETNSA_NSID]);
 768
 769	if (tb[NETNSA_PID]) {
 770		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 771		nla = tb[NETNSA_PID];
 772	} else if (tb[NETNSA_FD]) {
 773		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 774		nla = tb[NETNSA_FD];
 775	} else {
 776		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
 777		return -EINVAL;
 778	}
 779	if (IS_ERR(peer)) {
 780		NL_SET_BAD_ATTR(extack, nla);
 781		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
 782		return PTR_ERR(peer);
 783	}
 784
 785	spin_lock_bh(&net->nsid_lock);
 786	if (__peernet2id(net, peer) >= 0) {
 787		spin_unlock_bh(&net->nsid_lock);
 788		err = -EEXIST;
 789		NL_SET_BAD_ATTR(extack, nla);
 790		NL_SET_ERR_MSG(extack,
 791			       "Peer netns already has a nsid assigned");
 792		goto out;
 793	}
 794
 795	err = alloc_netid(net, peer, nsid);
 796	spin_unlock_bh(&net->nsid_lock);
 797	if (err >= 0) {
 798		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
 799				  nlh, GFP_KERNEL);
 800		err = 0;
 801	} else if (err == -ENOSPC && nsid >= 0) {
 802		err = -EEXIST;
 803		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
 804		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
 805	}
 806out:
 807	put_net(peer);
 808	return err;
 809}
 810
 811static int rtnl_net_get_size(void)
 812{
 813	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
 814	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
 815	       + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
 816	       ;
 817}
 818
 819struct net_fill_args {
 820	u32 portid;
 821	u32 seq;
 822	int flags;
 823	int cmd;
 824	int nsid;
 825	bool add_ref;
 826	int ref_nsid;
 827};
 828
 829static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
 830{
 831	struct nlmsghdr *nlh;
 832	struct rtgenmsg *rth;
 833
 834	nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
 835			args->flags);
 836	if (!nlh)
 837		return -EMSGSIZE;
 838
 839	rth = nlmsg_data(nlh);
 840	rth->rtgen_family = AF_UNSPEC;
 841
 842	if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
 843		goto nla_put_failure;
 844
 845	if (args->add_ref &&
 846	    nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
 847		goto nla_put_failure;
 848
 849	nlmsg_end(skb, nlh);
 850	return 0;
 851
 852nla_put_failure:
 853	nlmsg_cancel(skb, nlh);
 854	return -EMSGSIZE;
 855}
 856
 857static int rtnl_net_valid_getid_req(struct sk_buff *skb,
 858				    const struct nlmsghdr *nlh,
 859				    struct nlattr **tb,
 860				    struct netlink_ext_ack *extack)
 861{
 862	int i, err;
 863
 864	if (!netlink_strict_get_check(skb))
 865		return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
 866					      tb, NETNSA_MAX, rtnl_net_policy,
 867					      extack);
 868
 869	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
 870					    NETNSA_MAX, rtnl_net_policy,
 871					    extack);
 872	if (err)
 873		return err;
 874
 875	for (i = 0; i <= NETNSA_MAX; i++) {
 876		if (!tb[i])
 877			continue;
 878
 879		switch (i) {
 880		case NETNSA_PID:
 881		case NETNSA_FD:
 882		case NETNSA_NSID:
 883		case NETNSA_TARGET_NSID:
 884			break;
 885		default:
 886			NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
 887			return -EINVAL;
 888		}
 889	}
 890
 891	return 0;
 892}
 893
 894static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
 895			  struct netlink_ext_ack *extack)
 896{
 897	struct net *net = sock_net(skb->sk);
 898	struct nlattr *tb[NETNSA_MAX + 1];
 899	struct net_fill_args fillargs = {
 900		.portid = NETLINK_CB(skb).portid,
 901		.seq = nlh->nlmsg_seq,
 902		.cmd = RTM_NEWNSID,
 903	};
 904	struct net *peer, *target = net;
 905	struct nlattr *nla;
 906	struct sk_buff *msg;
 907	int err;
 
 908
 909	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
 
 910	if (err < 0)
 911		return err;
 912	if (tb[NETNSA_PID]) {
 913		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 914		nla = tb[NETNSA_PID];
 915	} else if (tb[NETNSA_FD]) {
 916		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 917		nla = tb[NETNSA_FD];
 918	} else if (tb[NETNSA_NSID]) {
 919		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
 920		if (!peer)
 921			peer = ERR_PTR(-ENOENT);
 922		nla = tb[NETNSA_NSID];
 923	} else {
 924		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
 925		return -EINVAL;
 926	}
 927
 928	if (IS_ERR(peer)) {
 929		NL_SET_BAD_ATTR(extack, nla);
 930		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
 931		return PTR_ERR(peer);
 932	}
 933
 934	if (tb[NETNSA_TARGET_NSID]) {
 935		int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
 936
 937		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
 938		if (IS_ERR(target)) {
 939			NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
 940			NL_SET_ERR_MSG(extack,
 941				       "Target netns reference is invalid");
 942			err = PTR_ERR(target);
 943			goto out;
 944		}
 945		fillargs.add_ref = true;
 946		fillargs.ref_nsid = peernet2id(net, peer);
 947	}
 948
 949	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
 950	if (!msg) {
 951		err = -ENOMEM;
 952		goto out;
 953	}
 954
 955	fillargs.nsid = peernet2id(target, peer);
 956	err = rtnl_net_fill(msg, &fillargs);
 
 957	if (err < 0)
 958		goto err_out;
 959
 960	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
 961	goto out;
 962
 963err_out:
 964	nlmsg_free(msg);
 965out:
 966	if (fillargs.add_ref)
 967		put_net(target);
 968	put_net(peer);
 969	return err;
 970}
 971
 972struct rtnl_net_dump_cb {
 973	struct net *tgt_net;
 974	struct net *ref_net;
 975	struct sk_buff *skb;
 976	struct net_fill_args fillargs;
 977	int idx;
 978	int s_idx;
 979};
 980
 981/* Runs in RCU-critical section. */
 982static int rtnl_net_dumpid_one(int id, void *peer, void *data)
 983{
 984	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
 985	int ret;
 986
 987	if (net_cb->idx < net_cb->s_idx)
 988		goto cont;
 989
 990	net_cb->fillargs.nsid = id;
 991	if (net_cb->fillargs.add_ref)
 992		net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
 993	ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
 994	if (ret < 0)
 995		return ret;
 996
 997cont:
 998	net_cb->idx++;
 999	return 0;
1000}
1001
1002static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1003				   struct rtnl_net_dump_cb *net_cb,
1004				   struct netlink_callback *cb)
1005{
1006	struct netlink_ext_ack *extack = cb->extack;
1007	struct nlattr *tb[NETNSA_MAX + 1];
1008	int err, i;
1009
1010	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1011					    NETNSA_MAX, rtnl_net_policy,
1012					    extack);
1013	if (err < 0)
1014		return err;
1015
1016	for (i = 0; i <= NETNSA_MAX; i++) {
1017		if (!tb[i])
1018			continue;
1019
1020		if (i == NETNSA_TARGET_NSID) {
1021			struct net *net;
1022
1023			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1024			if (IS_ERR(net)) {
1025				NL_SET_BAD_ATTR(extack, tb[i]);
1026				NL_SET_ERR_MSG(extack,
1027					       "Invalid target network namespace id");
1028				return PTR_ERR(net);
1029			}
1030			net_cb->fillargs.add_ref = true;
1031			net_cb->ref_net = net_cb->tgt_net;
1032			net_cb->tgt_net = net;
1033		} else {
1034			NL_SET_BAD_ATTR(extack, tb[i]);
1035			NL_SET_ERR_MSG(extack,
1036				       "Unsupported attribute in dump request");
1037			return -EINVAL;
1038		}
1039	}
1040
1041	return 0;
1042}
1043
1044static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1045{
 
1046	struct rtnl_net_dump_cb net_cb = {
1047		.tgt_net = sock_net(skb->sk),
1048		.skb = skb,
1049		.fillargs = {
1050			.portid = NETLINK_CB(cb->skb).portid,
1051			.seq = cb->nlh->nlmsg_seq,
1052			.flags = NLM_F_MULTI,
1053			.cmd = RTM_NEWNSID,
1054		},
1055		.idx = 0,
1056		.s_idx = cb->args[0],
1057	};
1058	int err = 0;
1059
1060	if (cb->strict_check) {
1061		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1062		if (err < 0)
1063			goto end;
1064	}
1065
1066	rcu_read_lock();
1067	idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1068	rcu_read_unlock();
1069
1070	cb->args[0] = net_cb.idx;
1071end:
1072	if (net_cb.fillargs.add_ref)
1073		put_net(net_cb.tgt_net);
1074	return err < 0 ? err : skb->len;
1075}
1076
1077static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1078			      struct nlmsghdr *nlh, gfp_t gfp)
1079{
1080	struct net_fill_args fillargs = {
1081		.portid = portid,
1082		.seq = nlh ? nlh->nlmsg_seq : 0,
1083		.cmd = cmd,
1084		.nsid = id,
1085	};
1086	struct sk_buff *msg;
1087	int err = -ENOMEM;
1088
1089	msg = nlmsg_new(rtnl_net_get_size(), gfp);
1090	if (!msg)
1091		goto out;
1092
1093	err = rtnl_net_fill(msg, &fillargs);
1094	if (err < 0)
1095		goto err_out;
1096
1097	rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1098	return;
1099
1100err_out:
1101	nlmsg_free(msg);
1102out:
1103	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1104}
1105
1106#ifdef CONFIG_NET_NS
1107static void __init netns_ipv4_struct_check(void)
1108{
1109	/* TX readonly hotpath cache lines */
1110	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1111				      sysctl_tcp_early_retrans);
1112	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1113				      sysctl_tcp_tso_win_divisor);
1114	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1115				      sysctl_tcp_tso_rtt_log);
1116	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1117				      sysctl_tcp_autocorking);
1118	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1119				      sysctl_tcp_min_snd_mss);
1120	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1121				      sysctl_tcp_notsent_lowat);
1122	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1123				      sysctl_tcp_limit_output_bytes);
1124	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1125				      sysctl_tcp_min_rtt_wlen);
1126	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1127				      sysctl_tcp_wmem);
1128	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1129				      sysctl_ip_fwd_use_pmtu);
1130	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1131
1132	/* TXRX readonly hotpath cache lines */
1133	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1134				      sysctl_tcp_moderate_rcvbuf);
1135	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1136
1137	/* RX readonly hotpath cache line */
1138	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1139				      sysctl_ip_early_demux);
1140	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1141				      sysctl_tcp_early_demux);
1142	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1143				      sysctl_tcp_reordering);
1144	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1145				      sysctl_tcp_rmem);
1146	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 18);
1147}
1148#endif
1149
1150void __init net_ns_init(void)
1151{
1152	struct net_generic *ng;
1153
1154#ifdef CONFIG_NET_NS
1155	netns_ipv4_struct_check();
1156	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1157					SMP_CACHE_BYTES,
1158					SLAB_PANIC|SLAB_ACCOUNT, NULL);
1159
1160	/* Create workqueue for cleanup */
1161	netns_wq = create_singlethread_workqueue("netns");
1162	if (!netns_wq)
1163		panic("Could not create netns workq");
1164#endif
1165
1166	ng = net_alloc_generic();
1167	if (!ng)
1168		panic("Could not allocate generic netns");
1169
1170	rcu_assign_pointer(init_net.gen, ng);
1171
1172#ifdef CONFIG_KEYS
1173	init_net.key_domain = &init_net_key_domain;
1174#endif
1175	down_write(&pernet_ops_rwsem);
1176	preinit_net(&init_net);
1177	if (setup_net(&init_net, &init_user_ns))
1178		panic("Could not setup the initial network namespace");
1179
1180	init_net_initialized = true;
1181	up_write(&pernet_ops_rwsem);
1182
1183	if (register_pernet_subsys(&net_ns_ops))
1184		panic("Could not register network namespace subsystems");
 
1185
1186	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
1187		      RTNL_FLAG_DOIT_UNLOCKED);
 
 
 
1188	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
1189		      RTNL_FLAG_DOIT_UNLOCKED);
1190}
1191
1192static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1193{
1194	ops_pre_exit_list(ops, net_exit_list);
1195	synchronize_rcu();
1196	ops_exit_list(ops, net_exit_list);
1197	ops_free_list(ops, net_exit_list);
1198}
1199
 
 
1200#ifdef CONFIG_NET_NS
1201static int __register_pernet_operations(struct list_head *list,
1202					struct pernet_operations *ops)
1203{
1204	struct net *net;
1205	int error;
1206	LIST_HEAD(net_exit_list);
1207
1208	list_add_tail(&ops->list, list);
1209	if (ops->init || (ops->id && ops->size)) {
1210		/* We held write locked pernet_ops_rwsem, and parallel
1211		 * setup_net() and cleanup_net() are not possible.
1212		 */
1213		for_each_net(net) {
1214			error = ops_init(ops, net);
1215			if (error)
1216				goto out_undo;
1217			list_add_tail(&net->exit_list, &net_exit_list);
1218		}
1219	}
1220	return 0;
1221
1222out_undo:
1223	/* If I have an error cleanup all namespaces I initialized */
1224	list_del(&ops->list);
1225	free_exit_list(ops, &net_exit_list);
 
1226	return error;
1227}
1228
1229static void __unregister_pernet_operations(struct pernet_operations *ops)
1230{
1231	struct net *net;
1232	LIST_HEAD(net_exit_list);
1233
1234	list_del(&ops->list);
1235	/* See comment in __register_pernet_operations() */
1236	for_each_net(net)
1237		list_add_tail(&net->exit_list, &net_exit_list);
1238
1239	free_exit_list(ops, &net_exit_list);
1240}
1241
1242#else
1243
1244static int __register_pernet_operations(struct list_head *list,
1245					struct pernet_operations *ops)
1246{
1247	if (!init_net_initialized) {
1248		list_add_tail(&ops->list, list);
1249		return 0;
1250	}
1251
1252	return ops_init(ops, &init_net);
1253}
1254
1255static void __unregister_pernet_operations(struct pernet_operations *ops)
1256{
1257	if (!init_net_initialized) {
1258		list_del(&ops->list);
1259	} else {
1260		LIST_HEAD(net_exit_list);
1261		list_add(&init_net.exit_list, &net_exit_list);
1262		free_exit_list(ops, &net_exit_list);
 
1263	}
1264}
1265
1266#endif /* CONFIG_NET_NS */
1267
1268static DEFINE_IDA(net_generic_ids);
1269
1270static int register_pernet_operations(struct list_head *list,
1271				      struct pernet_operations *ops)
1272{
1273	int error;
1274
1275	if (ops->id) {
1276		error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1277				GFP_KERNEL);
1278		if (error < 0)
 
 
 
 
1279			return error;
1280		*ops->id = error;
1281		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
1282	}
1283	error = __register_pernet_operations(list, ops);
1284	if (error) {
1285		rcu_barrier();
1286		if (ops->id)
1287			ida_free(&net_generic_ids, *ops->id);
1288	}
1289
1290	return error;
1291}
1292
1293static void unregister_pernet_operations(struct pernet_operations *ops)
1294{
 
1295	__unregister_pernet_operations(ops);
1296	rcu_barrier();
1297	if (ops->id)
1298		ida_free(&net_generic_ids, *ops->id);
1299}
1300
1301/**
1302 *      register_pernet_subsys - register a network namespace subsystem
1303 *	@ops:  pernet operations structure for the subsystem
1304 *
1305 *	Register a subsystem which has init and exit functions
1306 *	that are called when network namespaces are created and
1307 *	destroyed respectively.
1308 *
1309 *	When registered all network namespace init functions are
1310 *	called for every existing network namespace.  Allowing kernel
1311 *	modules to have a race free view of the set of network namespaces.
1312 *
1313 *	When a new network namespace is created all of the init
1314 *	methods are called in the order in which they were registered.
1315 *
1316 *	When a network namespace is destroyed all of the exit methods
1317 *	are called in the reverse of the order with which they were
1318 *	registered.
1319 */
1320int register_pernet_subsys(struct pernet_operations *ops)
1321{
1322	int error;
1323	down_write(&pernet_ops_rwsem);
1324	error =  register_pernet_operations(first_device, ops);
1325	up_write(&pernet_ops_rwsem);
1326	return error;
1327}
1328EXPORT_SYMBOL_GPL(register_pernet_subsys);
1329
1330/**
1331 *      unregister_pernet_subsys - unregister a network namespace subsystem
1332 *	@ops: pernet operations structure to manipulate
1333 *
1334 *	Remove the pernet operations structure from the list to be
1335 *	used when network namespaces are created or destroyed.  In
1336 *	addition run the exit method for all existing network
1337 *	namespaces.
1338 */
1339void unregister_pernet_subsys(struct pernet_operations *ops)
1340{
1341	down_write(&pernet_ops_rwsem);
1342	unregister_pernet_operations(ops);
1343	up_write(&pernet_ops_rwsem);
1344}
1345EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1346
1347/**
1348 *      register_pernet_device - register a network namespace device
1349 *	@ops:  pernet operations structure for the subsystem
1350 *
1351 *	Register a device which has init and exit functions
1352 *	that are called when network namespaces are created and
1353 *	destroyed respectively.
1354 *
1355 *	When registered all network namespace init functions are
1356 *	called for every existing network namespace.  Allowing kernel
1357 *	modules to have a race free view of the set of network namespaces.
1358 *
1359 *	When a new network namespace is created all of the init
1360 *	methods are called in the order in which they were registered.
1361 *
1362 *	When a network namespace is destroyed all of the exit methods
1363 *	are called in the reverse of the order with which they were
1364 *	registered.
1365 */
1366int register_pernet_device(struct pernet_operations *ops)
1367{
1368	int error;
1369	down_write(&pernet_ops_rwsem);
1370	error = register_pernet_operations(&pernet_list, ops);
1371	if (!error && (first_device == &pernet_list))
1372		first_device = &ops->list;
1373	up_write(&pernet_ops_rwsem);
1374	return error;
1375}
1376EXPORT_SYMBOL_GPL(register_pernet_device);
1377
1378/**
1379 *      unregister_pernet_device - unregister a network namespace netdevice
1380 *	@ops: pernet operations structure to manipulate
1381 *
1382 *	Remove the pernet operations structure from the list to be
1383 *	used when network namespaces are created or destroyed.  In
1384 *	addition run the exit method for all existing network
1385 *	namespaces.
1386 */
1387void unregister_pernet_device(struct pernet_operations *ops)
1388{
1389	down_write(&pernet_ops_rwsem);
1390	if (&ops->list == first_device)
1391		first_device = first_device->next;
1392	unregister_pernet_operations(ops);
1393	up_write(&pernet_ops_rwsem);
1394}
1395EXPORT_SYMBOL_GPL(unregister_pernet_device);
1396
1397#ifdef CONFIG_NET_NS
1398static struct ns_common *netns_get(struct task_struct *task)
1399{
1400	struct net *net = NULL;
1401	struct nsproxy *nsproxy;
1402
1403	task_lock(task);
1404	nsproxy = task->nsproxy;
1405	if (nsproxy)
1406		net = get_net(nsproxy->net_ns);
1407	task_unlock(task);
1408
1409	return net ? &net->ns : NULL;
1410}
1411
1412static inline struct net *to_net_ns(struct ns_common *ns)
1413{
1414	return container_of(ns, struct net, ns);
1415}
1416
1417static void netns_put(struct ns_common *ns)
1418{
1419	put_net(to_net_ns(ns));
1420}
1421
1422static int netns_install(struct nsset *nsset, struct ns_common *ns)
1423{
1424	struct nsproxy *nsproxy = nsset->nsproxy;
1425	struct net *net = to_net_ns(ns);
1426
1427	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1428	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1429		return -EPERM;
1430
1431	put_net(nsproxy->net_ns);
1432	nsproxy->net_ns = get_net(net);
1433	return 0;
1434}
1435
1436static struct user_namespace *netns_owner(struct ns_common *ns)
1437{
1438	return to_net_ns(ns)->user_ns;
1439}
1440
1441const struct proc_ns_operations netns_operations = {
1442	.name		= "net",
1443	.type		= CLONE_NEWNET,
1444	.get		= netns_get,
1445	.put		= netns_put,
1446	.install	= netns_install,
1447	.owner		= netns_owner,
1448};
1449#endif
v4.10.11
 
   1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   2
   3#include <linux/workqueue.h>
   4#include <linux/rtnetlink.h>
   5#include <linux/cache.h>
   6#include <linux/slab.h>
   7#include <linux/list.h>
   8#include <linux/delay.h>
   9#include <linux/sched.h>
  10#include <linux/idr.h>
  11#include <linux/rculist.h>
  12#include <linux/nsproxy.h>
  13#include <linux/fs.h>
  14#include <linux/proc_ns.h>
  15#include <linux/file.h>
  16#include <linux/export.h>
  17#include <linux/user_namespace.h>
  18#include <linux/net_namespace.h>
 
 
 
 
 
  19#include <net/sock.h>
  20#include <net/netlink.h>
  21#include <net/net_namespace.h>
  22#include <net/netns/generic.h>
  23
  24/*
  25 *	Our network namespace constructor/destructor lists
  26 */
  27
  28static LIST_HEAD(pernet_list);
  29static struct list_head *first_device = &pernet_list;
  30DEFINE_MUTEX(net_mutex);
  31
  32LIST_HEAD(net_namespace_list);
  33EXPORT_SYMBOL_GPL(net_namespace_list);
  34
  35struct net init_net = {
  36	.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
  37};
 
 
 
 
 
 
  38EXPORT_SYMBOL(init_net);
  39
  40static bool init_net_initialized;
 
 
 
 
 
 
 
 
  41
  42#define MIN_PERNET_OPS_ID	\
  43	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
  44
  45#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
  46
  47static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
  48
 
 
  49static struct net_generic *net_alloc_generic(void)
  50{
  51	struct net_generic *ng;
  52	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
  53
  54	ng = kzalloc(generic_size, GFP_KERNEL);
  55	if (ng)
  56		ng->s.len = max_gen_ptrs;
  57
  58	return ng;
  59}
  60
  61static int net_assign_generic(struct net *net, unsigned int id, void *data)
  62{
  63	struct net_generic *ng, *old_ng;
  64
  65	BUG_ON(!mutex_is_locked(&net_mutex));
  66	BUG_ON(id < MIN_PERNET_OPS_ID);
  67
  68	old_ng = rcu_dereference_protected(net->gen,
  69					   lockdep_is_held(&net_mutex));
  70	if (old_ng->s.len > id) {
  71		old_ng->ptr[id] = data;
  72		return 0;
  73	}
  74
  75	ng = net_alloc_generic();
  76	if (ng == NULL)
  77		return -ENOMEM;
  78
  79	/*
  80	 * Some synchronisation notes:
  81	 *
  82	 * The net_generic explores the net->gen array inside rcu
  83	 * read section. Besides once set the net->gen->ptr[x]
  84	 * pointer never changes (see rules in netns/generic.h).
  85	 *
  86	 * That said, we simply duplicate this array and schedule
  87	 * the old copy for kfree after a grace period.
  88	 */
  89
  90	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
  91	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
  92	ng->ptr[id] = data;
  93
  94	rcu_assign_pointer(net->gen, ng);
  95	kfree_rcu(old_ng, s.rcu);
  96	return 0;
  97}
  98
  99static int ops_init(const struct pernet_operations *ops, struct net *net)
 100{
 
 101	int err = -ENOMEM;
 102	void *data = NULL;
 103
 104	if (ops->id && ops->size) {
 105		data = kzalloc(ops->size, GFP_KERNEL);
 106		if (!data)
 107			goto out;
 108
 109		err = net_assign_generic(net, *ops->id, data);
 110		if (err)
 111			goto cleanup;
 112	}
 113	err = 0;
 114	if (ops->init)
 115		err = ops->init(net);
 116	if (!err)
 117		return 0;
 118
 
 
 
 
 
 
 119cleanup:
 120	kfree(data);
 121
 122out:
 123	return err;
 124}
 125
 126static void ops_free(const struct pernet_operations *ops, struct net *net)
 
 127{
 128	if (ops->id && ops->size) {
 129		kfree(net_generic(net, *ops->id));
 
 
 
 130	}
 131}
 132
 133static void ops_exit_list(const struct pernet_operations *ops,
 134			  struct list_head *net_exit_list)
 135{
 136	struct net *net;
 137	if (ops->exit) {
 138		list_for_each_entry(net, net_exit_list, exit_list)
 139			ops->exit(net);
 
 
 140	}
 141	if (ops->exit_batch)
 142		ops->exit_batch(net_exit_list);
 143}
 144
 145static void ops_free_list(const struct pernet_operations *ops,
 146			  struct list_head *net_exit_list)
 147{
 148	struct net *net;
 149	if (ops->size && ops->id) {
 150		list_for_each_entry(net, net_exit_list, exit_list)
 151			ops_free(ops, net);
 152	}
 153}
 154
 155/* should be called with nsid_lock held */
 156static int alloc_netid(struct net *net, struct net *peer, int reqid)
 157{
 158	int min = 0, max = 0;
 159
 160	if (reqid >= 0) {
 161		min = reqid;
 162		max = reqid + 1;
 163	}
 164
 165	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
 166}
 167
 168/* This function is used by idr_for_each(). If net is equal to peer, the
 169 * function returns the id so that idr_for_each() stops. Because we cannot
 170 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 171 * NET_ID_ZERO (-1) for it.
 172 */
 173#define NET_ID_ZERO -1
 174static int net_eq_idr(int id, void *net, void *peer)
 175{
 176	if (net_eq(net, peer))
 177		return id ? : NET_ID_ZERO;
 178	return 0;
 179}
 180
 181/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 182 * is set to true, thus the caller knows that the new id must be notified via
 183 * rtnl.
 184 */
 185static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
 186{
 187	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
 188	bool alloc_it = *alloc;
 189
 190	*alloc = false;
 191
 192	/* Magic value for id 0. */
 193	if (id == NET_ID_ZERO)
 194		return 0;
 195	if (id > 0)
 196		return id;
 197
 198	if (alloc_it) {
 199		id = alloc_netid(net, peer, -1);
 200		*alloc = true;
 201		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
 202	}
 203
 204	return NETNSA_NSID_NOT_ASSIGNED;
 205}
 206
 207/* should be called with nsid_lock held */
 208static int __peernet2id(struct net *net, struct net *peer)
 209{
 210	bool no = false;
 211
 212	return __peernet2id_alloc(net, peer, &no);
 213}
 214
 215static void rtnl_net_notifyid(struct net *net, int cmd, int id);
 216/* This function returns the id of a peer netns. If no id is assigned, one will
 217 * be allocated and returned.
 218 */
 219int peernet2id_alloc(struct net *net, struct net *peer)
 220{
 221	bool alloc;
 222	int id;
 223
 224	if (atomic_read(&net->count) == 0)
 225		return NETNSA_NSID_NOT_ASSIGNED;
 
 226	spin_lock_bh(&net->nsid_lock);
 227	alloc = atomic_read(&peer->count) == 0 ? false : true;
 228	id = __peernet2id_alloc(net, peer, &alloc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229	spin_unlock_bh(&net->nsid_lock);
 230	if (alloc && id >= 0)
 231		rtnl_net_notifyid(net, RTM_NEWNSID, id);
 
 
 
 
 
 232	return id;
 233}
 
 234
 235/* This function returns, if assigned, the id of a peer netns. */
 236int peernet2id(struct net *net, struct net *peer)
 237{
 238	int id;
 239
 240	spin_lock_bh(&net->nsid_lock);
 241	id = __peernet2id(net, peer);
 242	spin_unlock_bh(&net->nsid_lock);
 
 243	return id;
 244}
 245EXPORT_SYMBOL(peernet2id);
 246
 247/* This function returns true is the peer netns has an id assigned into the
 248 * current netns.
 249 */
 250bool peernet_has_id(struct net *net, struct net *peer)
 251{
 252	return peernet2id(net, peer) >= 0;
 253}
 254
 255struct net *get_net_ns_by_id(struct net *net, int id)
 256{
 257	struct net *peer;
 258
 259	if (id < 0)
 260		return NULL;
 261
 262	rcu_read_lock();
 263	spin_lock_bh(&net->nsid_lock);
 264	peer = idr_find(&net->netns_ids, id);
 265	if (peer)
 266		get_net(peer);
 267	spin_unlock_bh(&net->nsid_lock);
 268	rcu_read_unlock();
 269
 270	return peer;
 271}
 
 
 
 
 
 
 
 272
 273/*
 274 * setup_net runs the initializers for the network namespace object.
 275 */
 276static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 277{
 278	/* Must be called with net_mutex held */
 279	const struct pernet_operations *ops, *saved_ops;
 280	int error = 0;
 281	LIST_HEAD(net_exit_list);
 282
 283	atomic_set(&net->count, 1);
 284	atomic_set(&net->passive, 1);
 
 
 
 
 
 
 285	net->dev_base_seq = 1;
 286	net->user_ns = user_ns;
 287	idr_init(&net->netns_ids);
 288	spin_lock_init(&net->nsid_lock);
 
 289
 290	list_for_each_entry(ops, &pernet_list, list) {
 291		error = ops_init(ops, net);
 292		if (error < 0)
 293			goto out_undo;
 294	}
 
 
 
 295out:
 296	return error;
 297
 298out_undo:
 299	/* Walk through the list backwards calling the exit functions
 300	 * for the pernet modules whose init functions did not fail.
 301	 */
 302	list_add(&net->exit_list, &net_exit_list);
 303	saved_ops = ops;
 304	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 
 
 
 
 
 
 305		ops_exit_list(ops, &net_exit_list);
 306
 307	ops = saved_ops;
 308	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
 309		ops_free_list(ops, &net_exit_list);
 310
 311	rcu_barrier();
 312	goto out;
 313}
 314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315
 316#ifdef CONFIG_NET_NS
 317static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
 318{
 319	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
 320}
 321
 322static void dec_net_namespaces(struct ucounts *ucounts)
 323{
 324	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
 325}
 326
 327static struct kmem_cache *net_cachep;
 328static struct workqueue_struct *netns_wq;
 329
 330static struct net *net_alloc(void)
 331{
 332	struct net *net = NULL;
 333	struct net_generic *ng;
 334
 335	ng = net_alloc_generic();
 336	if (!ng)
 337		goto out;
 338
 339	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
 340	if (!net)
 341		goto out_free;
 342
 
 
 
 
 
 
 
 343	rcu_assign_pointer(net->gen, ng);
 344out:
 345	return net;
 346
 
 
 
 
 
 347out_free:
 348	kfree(ng);
 349	goto out;
 350}
 351
 352static void net_free(struct net *net)
 353{
 354	kfree(rcu_access_pointer(net->gen));
 355	kmem_cache_free(net_cachep, net);
 
 
 
 
 
 
 356}
 357
 358void net_drop_ns(void *p)
 359{
 360	struct net *ns = p;
 361	if (ns && atomic_dec_and_test(&ns->passive))
 362		net_free(ns);
 
 363}
 364
 365struct net *copy_net_ns(unsigned long flags,
 366			struct user_namespace *user_ns, struct net *old_net)
 367{
 368	struct ucounts *ucounts;
 369	struct net *net;
 370	int rv;
 371
 372	if (!(flags & CLONE_NEWNET))
 373		return get_net(old_net);
 374
 375	ucounts = inc_net_namespaces(user_ns);
 376	if (!ucounts)
 377		return ERR_PTR(-ENOSPC);
 378
 379	net = net_alloc();
 380	if (!net) {
 381		dec_net_namespaces(ucounts);
 382		return ERR_PTR(-ENOMEM);
 383	}
 384
 
 
 
 385	get_user_ns(user_ns);
 386
 387	rv = mutex_lock_killable(&net_mutex);
 
 
 
 
 
 
 
 388	if (rv < 0) {
 
 
 
 
 
 389		net_free(net);
 
 390		dec_net_namespaces(ucounts);
 391		put_user_ns(user_ns);
 392		return ERR_PTR(rv);
 393	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 394
 395	net->ucounts = ucounts;
 396	rv = setup_net(net, user_ns);
 397	if (rv == 0) {
 398		rtnl_lock();
 399		list_add_tail_rcu(&net->list, &net_namespace_list);
 400		rtnl_unlock();
 
 
 401	}
 402	mutex_unlock(&net_mutex);
 403	if (rv < 0) {
 404		dec_net_namespaces(ucounts);
 405		put_user_ns(user_ns);
 406		net_drop_ns(net);
 407		return ERR_PTR(rv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 408	}
 409	return net;
 
 
 410}
 411
 412static DEFINE_SPINLOCK(cleanup_list_lock);
 413static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
 414
 415static void cleanup_net(struct work_struct *work)
 416{
 417	const struct pernet_operations *ops;
 418	struct net *net, *tmp;
 419	struct list_head net_kill_list;
 420	LIST_HEAD(net_exit_list);
 421
 422	/* Atomically snapshot the list of namespaces to cleanup */
 423	spin_lock_irq(&cleanup_list_lock);
 424	list_replace_init(&cleanup_list, &net_kill_list);
 425	spin_unlock_irq(&cleanup_list_lock);
 426
 427	mutex_lock(&net_mutex);
 428
 429	/* Don't let anyone else find us. */
 430	rtnl_lock();
 431	list_for_each_entry(net, &net_kill_list, cleanup_list) {
 432		list_del_rcu(&net->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433		list_add_tail(&net->exit_list, &net_exit_list);
 434		for_each_net(tmp) {
 435			int id;
 436
 437			spin_lock_bh(&tmp->nsid_lock);
 438			id = __peernet2id(tmp, net);
 439			if (id >= 0)
 440				idr_remove(&tmp->netns_ids, id);
 441			spin_unlock_bh(&tmp->nsid_lock);
 442			if (id >= 0)
 443				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
 444		}
 445		spin_lock_bh(&net->nsid_lock);
 446		idr_destroy(&net->netns_ids);
 447		spin_unlock_bh(&net->nsid_lock);
 448
 449	}
 450	rtnl_unlock();
 451
 452	/*
 453	 * Another CPU might be rcu-iterating the list, wait for it.
 454	 * This needs to be before calling the exit() notifiers, so
 455	 * the rcu_barrier() below isn't sufficient alone.
 
 456	 */
 457	synchronize_rcu();
 458
 459	/* Run all of the network namespace exit methods */
 460	list_for_each_entry_reverse(ops, &pernet_list, list)
 461		ops_exit_list(ops, &net_exit_list);
 462
 463	/* Free the net generic variables */
 464	list_for_each_entry_reverse(ops, &pernet_list, list)
 465		ops_free_list(ops, &net_exit_list);
 466
 467	mutex_unlock(&net_mutex);
 468
 469	/* Ensure there are no outstanding rcu callbacks using this
 470	 * network namespace.
 471	 */
 472	rcu_barrier();
 473
 474	/* Finally it is safe to free my network namespace structure */
 475	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
 476		list_del_init(&net->exit_list);
 477		dec_net_namespaces(net->ucounts);
 
 
 
 478		put_user_ns(net->user_ns);
 479		net_drop_ns(net);
 480	}
 481}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482static DECLARE_WORK(net_cleanup_work, cleanup_net);
 483
 484void __put_net(struct net *net)
 485{
 
 486	/* Cleanup the network namespace in process context */
 487	unsigned long flags;
 
 
 
 488
 489	spin_lock_irqsave(&cleanup_list_lock, flags);
 490	list_add(&net->cleanup_list, &cleanup_list);
 491	spin_unlock_irqrestore(&cleanup_list_lock, flags);
 492
 493	queue_work(netns_wq, &net_cleanup_work);
 
 
 
 
 494}
 495EXPORT_SYMBOL_GPL(__put_net);
 496
 497struct net *get_net_ns_by_fd(int fd)
 498{
 499	struct file *file;
 500	struct ns_common *ns;
 501	struct net *net;
 
 
 502
 503	file = proc_ns_fget(fd);
 504	if (IS_ERR(file))
 505		return ERR_CAST(file);
 506
 507	ns = get_proc_ns(file_inode(file));
 508	if (ns->ops == &netns_operations)
 509		net = get_net(container_of(ns, struct net, ns));
 510	else
 511		net = ERR_PTR(-EINVAL);
 512
 513	fput(file);
 514	return net;
 515}
 516
 517#else
 518struct net *get_net_ns_by_fd(int fd)
 519{
 520	return ERR_PTR(-EINVAL);
 521}
 522#endif
 523EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
 524
 525struct net *get_net_ns_by_pid(pid_t pid)
 526{
 527	struct task_struct *tsk;
 528	struct net *net;
 529
 530	/* Lookup the network namespace */
 531	net = ERR_PTR(-ESRCH);
 532	rcu_read_lock();
 533	tsk = find_task_by_vpid(pid);
 534	if (tsk) {
 535		struct nsproxy *nsproxy;
 536		task_lock(tsk);
 537		nsproxy = tsk->nsproxy;
 538		if (nsproxy)
 539			net = get_net(nsproxy->net_ns);
 540		task_unlock(tsk);
 541	}
 542	rcu_read_unlock();
 543	return net;
 544}
 545EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
 546
 547static __net_init int net_ns_net_init(struct net *net)
 548{
 549#ifdef CONFIG_NET_NS
 550	net->ns.ops = &netns_operations;
 551#endif
 552	return ns_alloc_inum(&net->ns);
 553}
 554
 555static __net_exit void net_ns_net_exit(struct net *net)
 556{
 557	ns_free_inum(&net->ns);
 558}
 559
 560static struct pernet_operations __net_initdata net_ns_ops = {
 561	.init = net_ns_net_init,
 562	.exit = net_ns_net_exit,
 563};
 564
 565static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
 566	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
 567	[NETNSA_NSID]		= { .type = NLA_S32 },
 568	[NETNSA_PID]		= { .type = NLA_U32 },
 569	[NETNSA_FD]		= { .type = NLA_U32 },
 
 570};
 571
 572static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
 
 573{
 574	struct net *net = sock_net(skb->sk);
 575	struct nlattr *tb[NETNSA_MAX + 1];
 
 576	struct net *peer;
 577	int nsid, err;
 578
 579	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
 580			  rtnl_net_policy);
 581	if (err < 0)
 582		return err;
 583	if (!tb[NETNSA_NSID])
 
 584		return -EINVAL;
 
 585	nsid = nla_get_s32(tb[NETNSA_NSID]);
 586
 587	if (tb[NETNSA_PID])
 588		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 589	else if (tb[NETNSA_FD])
 
 590		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 591	else
 
 
 592		return -EINVAL;
 593	if (IS_ERR(peer))
 
 
 
 594		return PTR_ERR(peer);
 
 595
 596	spin_lock_bh(&net->nsid_lock);
 597	if (__peernet2id(net, peer) >= 0) {
 598		spin_unlock_bh(&net->nsid_lock);
 599		err = -EEXIST;
 
 
 
 600		goto out;
 601	}
 602
 603	err = alloc_netid(net, peer, nsid);
 604	spin_unlock_bh(&net->nsid_lock);
 605	if (err >= 0) {
 606		rtnl_net_notifyid(net, RTM_NEWNSID, err);
 
 607		err = 0;
 
 
 
 
 608	}
 609out:
 610	put_net(peer);
 611	return err;
 612}
 613
 614static int rtnl_net_get_size(void)
 615{
 616	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
 617	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
 
 618	       ;
 619}
 620
 621static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
 622			 int cmd, struct net *net, int nsid)
 
 
 
 
 
 
 
 
 
 623{
 624	struct nlmsghdr *nlh;
 625	struct rtgenmsg *rth;
 626
 627	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
 
 628	if (!nlh)
 629		return -EMSGSIZE;
 630
 631	rth = nlmsg_data(nlh);
 632	rth->rtgen_family = AF_UNSPEC;
 633
 634	if (nla_put_s32(skb, NETNSA_NSID, nsid))
 
 
 
 
 635		goto nla_put_failure;
 636
 637	nlmsg_end(skb, nlh);
 638	return 0;
 639
 640nla_put_failure:
 641	nlmsg_cancel(skb, nlh);
 642	return -EMSGSIZE;
 643}
 644
 645static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646{
 647	struct net *net = sock_net(skb->sk);
 648	struct nlattr *tb[NETNSA_MAX + 1];
 
 
 
 
 
 
 
 649	struct sk_buff *msg;
 650	struct net *peer;
 651	int err, id;
 652
 653	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
 654			  rtnl_net_policy);
 655	if (err < 0)
 656		return err;
 657	if (tb[NETNSA_PID])
 658		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
 659	else if (tb[NETNSA_FD])
 
 660		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
 661	else
 
 
 
 
 
 
 
 662		return -EINVAL;
 
 663
 664	if (IS_ERR(peer))
 
 
 665		return PTR_ERR(peer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666
 667	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
 668	if (!msg) {
 669		err = -ENOMEM;
 670		goto out;
 671	}
 672
 673	id = peernet2id(net, peer);
 674	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
 675			    RTM_NEWNSID, net, id);
 676	if (err < 0)
 677		goto err_out;
 678
 679	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
 680	goto out;
 681
 682err_out:
 683	nlmsg_free(msg);
 684out:
 
 
 685	put_net(peer);
 686	return err;
 687}
 688
 689struct rtnl_net_dump_cb {
 690	struct net *net;
 
 691	struct sk_buff *skb;
 692	struct netlink_callback *cb;
 693	int idx;
 694	int s_idx;
 695};
 696
 
 697static int rtnl_net_dumpid_one(int id, void *peer, void *data)
 698{
 699	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
 700	int ret;
 701
 702	if (net_cb->idx < net_cb->s_idx)
 703		goto cont;
 704
 705	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
 706			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
 707			    RTM_NEWNSID, net_cb->net, id);
 
 708	if (ret < 0)
 709		return ret;
 710
 711cont:
 712	net_cb->idx++;
 713	return 0;
 714}
 715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
 717{
 718	struct net *net = sock_net(skb->sk);
 719	struct rtnl_net_dump_cb net_cb = {
 720		.net = net,
 721		.skb = skb,
 722		.cb = cb,
 
 
 
 
 
 723		.idx = 0,
 724		.s_idx = cb->args[0],
 725	};
 
 726
 727	spin_lock_bh(&net->nsid_lock);
 728	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
 729	spin_unlock_bh(&net->nsid_lock);
 
 
 
 
 
 
 730
 731	cb->args[0] = net_cb.idx;
 732	return skb->len;
 
 
 
 733}
 734
 735static void rtnl_net_notifyid(struct net *net, int cmd, int id)
 736{
 
 
 
 
 
 
 
 737	struct sk_buff *msg;
 738	int err = -ENOMEM;
 739
 740	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
 741	if (!msg)
 742		goto out;
 743
 744	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
 745	if (err < 0)
 746		goto err_out;
 747
 748	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
 749	return;
 750
 751err_out:
 752	nlmsg_free(msg);
 753out:
 754	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
 755}
 756
 757static int __init net_ns_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 758{
 759	struct net_generic *ng;
 760
 761#ifdef CONFIG_NET_NS
 
 762	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
 763					SMP_CACHE_BYTES,
 764					SLAB_PANIC, NULL);
 765
 766	/* Create workqueue for cleanup */
 767	netns_wq = create_singlethread_workqueue("netns");
 768	if (!netns_wq)
 769		panic("Could not create netns workq");
 770#endif
 771
 772	ng = net_alloc_generic();
 773	if (!ng)
 774		panic("Could not allocate generic netns");
 775
 776	rcu_assign_pointer(init_net.gen, ng);
 777
 778	mutex_lock(&net_mutex);
 
 
 
 
 779	if (setup_net(&init_net, &init_user_ns))
 780		panic("Could not setup the initial network namespace");
 781
 782	init_net_initialized = true;
 
 783
 784	rtnl_lock();
 785	list_add_tail_rcu(&init_net.list, &net_namespace_list);
 786	rtnl_unlock();
 787
 788	mutex_unlock(&net_mutex);
 789
 790	register_pernet_subsys(&net_ns_ops);
 791
 792	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
 793	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
 794		      NULL);
 
 795
 796	return 0;
 
 
 
 
 
 797}
 798
 799pure_initcall(net_ns_init);
 800
 801#ifdef CONFIG_NET_NS
 802static int __register_pernet_operations(struct list_head *list,
 803					struct pernet_operations *ops)
 804{
 805	struct net *net;
 806	int error;
 807	LIST_HEAD(net_exit_list);
 808
 809	list_add_tail(&ops->list, list);
 810	if (ops->init || (ops->id && ops->size)) {
 
 
 
 811		for_each_net(net) {
 812			error = ops_init(ops, net);
 813			if (error)
 814				goto out_undo;
 815			list_add_tail(&net->exit_list, &net_exit_list);
 816		}
 817	}
 818	return 0;
 819
 820out_undo:
 821	/* If I have an error cleanup all namespaces I initialized */
 822	list_del(&ops->list);
 823	ops_exit_list(ops, &net_exit_list);
 824	ops_free_list(ops, &net_exit_list);
 825	return error;
 826}
 827
 828static void __unregister_pernet_operations(struct pernet_operations *ops)
 829{
 830	struct net *net;
 831	LIST_HEAD(net_exit_list);
 832
 833	list_del(&ops->list);
 
 834	for_each_net(net)
 835		list_add_tail(&net->exit_list, &net_exit_list);
 836	ops_exit_list(ops, &net_exit_list);
 837	ops_free_list(ops, &net_exit_list);
 838}
 839
 840#else
 841
 842static int __register_pernet_operations(struct list_head *list,
 843					struct pernet_operations *ops)
 844{
 845	if (!init_net_initialized) {
 846		list_add_tail(&ops->list, list);
 847		return 0;
 848	}
 849
 850	return ops_init(ops, &init_net);
 851}
 852
 853static void __unregister_pernet_operations(struct pernet_operations *ops)
 854{
 855	if (!init_net_initialized) {
 856		list_del(&ops->list);
 857	} else {
 858		LIST_HEAD(net_exit_list);
 859		list_add(&init_net.exit_list, &net_exit_list);
 860		ops_exit_list(ops, &net_exit_list);
 861		ops_free_list(ops, &net_exit_list);
 862	}
 863}
 864
 865#endif /* CONFIG_NET_NS */
 866
 867static DEFINE_IDA(net_generic_ids);
 868
 869static int register_pernet_operations(struct list_head *list,
 870				      struct pernet_operations *ops)
 871{
 872	int error;
 873
 874	if (ops->id) {
 875again:
 876		error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
 877		if (error < 0) {
 878			if (error == -EAGAIN) {
 879				ida_pre_get(&net_generic_ids, GFP_KERNEL);
 880				goto again;
 881			}
 882			return error;
 883		}
 884		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
 885	}
 886	error = __register_pernet_operations(list, ops);
 887	if (error) {
 888		rcu_barrier();
 889		if (ops->id)
 890			ida_remove(&net_generic_ids, *ops->id);
 891	}
 892
 893	return error;
 894}
 895
 896static void unregister_pernet_operations(struct pernet_operations *ops)
 897{
 898	
 899	__unregister_pernet_operations(ops);
 900	rcu_barrier();
 901	if (ops->id)
 902		ida_remove(&net_generic_ids, *ops->id);
 903}
 904
 905/**
 906 *      register_pernet_subsys - register a network namespace subsystem
 907 *	@ops:  pernet operations structure for the subsystem
 908 *
 909 *	Register a subsystem which has init and exit functions
 910 *	that are called when network namespaces are created and
 911 *	destroyed respectively.
 912 *
 913 *	When registered all network namespace init functions are
 914 *	called for every existing network namespace.  Allowing kernel
 915 *	modules to have a race free view of the set of network namespaces.
 916 *
 917 *	When a new network namespace is created all of the init
 918 *	methods are called in the order in which they were registered.
 919 *
 920 *	When a network namespace is destroyed all of the exit methods
 921 *	are called in the reverse of the order with which they were
 922 *	registered.
 923 */
 924int register_pernet_subsys(struct pernet_operations *ops)
 925{
 926	int error;
 927	mutex_lock(&net_mutex);
 928	error =  register_pernet_operations(first_device, ops);
 929	mutex_unlock(&net_mutex);
 930	return error;
 931}
 932EXPORT_SYMBOL_GPL(register_pernet_subsys);
 933
 934/**
 935 *      unregister_pernet_subsys - unregister a network namespace subsystem
 936 *	@ops: pernet operations structure to manipulate
 937 *
 938 *	Remove the pernet operations structure from the list to be
 939 *	used when network namespaces are created or destroyed.  In
 940 *	addition run the exit method for all existing network
 941 *	namespaces.
 942 */
 943void unregister_pernet_subsys(struct pernet_operations *ops)
 944{
 945	mutex_lock(&net_mutex);
 946	unregister_pernet_operations(ops);
 947	mutex_unlock(&net_mutex);
 948}
 949EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
 950
 951/**
 952 *      register_pernet_device - register a network namespace device
 953 *	@ops:  pernet operations structure for the subsystem
 954 *
 955 *	Register a device which has init and exit functions
 956 *	that are called when network namespaces are created and
 957 *	destroyed respectively.
 958 *
 959 *	When registered all network namespace init functions are
 960 *	called for every existing network namespace.  Allowing kernel
 961 *	modules to have a race free view of the set of network namespaces.
 962 *
 963 *	When a new network namespace is created all of the init
 964 *	methods are called in the order in which they were registered.
 965 *
 966 *	When a network namespace is destroyed all of the exit methods
 967 *	are called in the reverse of the order with which they were
 968 *	registered.
 969 */
 970int register_pernet_device(struct pernet_operations *ops)
 971{
 972	int error;
 973	mutex_lock(&net_mutex);
 974	error = register_pernet_operations(&pernet_list, ops);
 975	if (!error && (first_device == &pernet_list))
 976		first_device = &ops->list;
 977	mutex_unlock(&net_mutex);
 978	return error;
 979}
 980EXPORT_SYMBOL_GPL(register_pernet_device);
 981
 982/**
 983 *      unregister_pernet_device - unregister a network namespace netdevice
 984 *	@ops: pernet operations structure to manipulate
 985 *
 986 *	Remove the pernet operations structure from the list to be
 987 *	used when network namespaces are created or destroyed.  In
 988 *	addition run the exit method for all existing network
 989 *	namespaces.
 990 */
 991void unregister_pernet_device(struct pernet_operations *ops)
 992{
 993	mutex_lock(&net_mutex);
 994	if (&ops->list == first_device)
 995		first_device = first_device->next;
 996	unregister_pernet_operations(ops);
 997	mutex_unlock(&net_mutex);
 998}
 999EXPORT_SYMBOL_GPL(unregister_pernet_device);
1000
1001#ifdef CONFIG_NET_NS
1002static struct ns_common *netns_get(struct task_struct *task)
1003{
1004	struct net *net = NULL;
1005	struct nsproxy *nsproxy;
1006
1007	task_lock(task);
1008	nsproxy = task->nsproxy;
1009	if (nsproxy)
1010		net = get_net(nsproxy->net_ns);
1011	task_unlock(task);
1012
1013	return net ? &net->ns : NULL;
1014}
1015
1016static inline struct net *to_net_ns(struct ns_common *ns)
1017{
1018	return container_of(ns, struct net, ns);
1019}
1020
1021static void netns_put(struct ns_common *ns)
1022{
1023	put_net(to_net_ns(ns));
1024}
1025
1026static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1027{
 
1028	struct net *net = to_net_ns(ns);
1029
1030	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1031	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1032		return -EPERM;
1033
1034	put_net(nsproxy->net_ns);
1035	nsproxy->net_ns = get_net(net);
1036	return 0;
1037}
1038
1039static struct user_namespace *netns_owner(struct ns_common *ns)
1040{
1041	return to_net_ns(ns)->user_ns;
1042}
1043
1044const struct proc_ns_operations netns_operations = {
1045	.name		= "net",
1046	.type		= CLONE_NEWNET,
1047	.get		= netns_get,
1048	.put		= netns_put,
1049	.install	= netns_install,
1050	.owner		= netns_owner,
1051};
1052#endif