Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Pid namespaces
  3 *
  4 * Authors:
  5 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  6 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  7 *     Many thanks to Oleg Nesterov for comments and help
  8 *
  9 */
 10
 11#include <linux/pid.h>
 12#include <linux/pid_namespace.h>
 13#include <linux/user_namespace.h>
 14#include <linux/syscalls.h>
 
 15#include <linux/err.h>
 16#include <linux/acct.h>
 17#include <linux/slab.h>
 18#include <linux/proc_ns.h>
 19#include <linux/reboot.h>
 20#include <linux/export.h>
 
 
 
 
 
 21
 22struct pid_cache {
 23	int nr_ids;
 24	char name[16];
 25	struct kmem_cache *cachep;
 26	struct list_head list;
 27};
 28
 29static LIST_HEAD(pid_caches_lh);
 30static DEFINE_MUTEX(pid_caches_mutex);
 31static struct kmem_cache *pid_ns_cachep;
 
 
 32
 33/*
 34 * creates the kmem cache to allocate pids from.
 35 * @nr_ids: the number of numerical ids this pid will have to carry
 36 */
 37
 38static struct kmem_cache *create_pid_cachep(int nr_ids)
 39{
 40	struct pid_cache *pcache;
 41	struct kmem_cache *cachep;
 
 
 
 
 
 
 
 42
 
 
 43	mutex_lock(&pid_caches_mutex);
 44	list_for_each_entry(pcache, &pid_caches_lh, list)
 45		if (pcache->nr_ids == nr_ids)
 46			goto out;
 47
 48	pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
 49	if (pcache == NULL)
 50		goto err_alloc;
 51
 52	snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
 53	cachep = kmem_cache_create(pcache->name,
 54			sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
 55			0, SLAB_HWCACHE_ALIGN, NULL);
 56	if (cachep == NULL)
 57		goto err_cachep;
 58
 59	pcache->nr_ids = nr_ids;
 60	pcache->cachep = cachep;
 61	list_add(&pcache->list, &pid_caches_lh);
 62out:
 63	mutex_unlock(&pid_caches_mutex);
 64	return pcache->cachep;
 65
 66err_cachep:
 67	kfree(pcache);
 68err_alloc:
 69	mutex_unlock(&pid_caches_mutex);
 70	return NULL;
 
 71}
 72
 73static void proc_cleanup_work(struct work_struct *work)
 74{
 75	struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work);
 76	pid_ns_release_proc(ns);
 77}
 78
 79/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
 80#define MAX_PID_NS_LEVEL 32
 
 
 81
 82static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
 83	struct pid_namespace *parent_pid_ns)
 84{
 85	struct pid_namespace *ns;
 86	unsigned int level = parent_pid_ns->level + 1;
 87	int i;
 88	int err;
 89
 90	if (level > MAX_PID_NS_LEVEL) {
 91		err = -EINVAL;
 
 
 
 
 
 
 
 92		goto out;
 93	}
 94
 95	err = -ENOMEM;
 96	ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
 97	if (ns == NULL)
 98		goto out;
 99
100	ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
101	if (!ns->pidmap[0].page)
102		goto out_free;
103
104	ns->pid_cachep = create_pid_cachep(level + 1);
105	if (ns->pid_cachep == NULL)
106		goto out_free_map;
107
108	err = proc_alloc_inum(&ns->proc_inum);
109	if (err)
110		goto out_free_map;
 
111
112	kref_init(&ns->kref);
113	ns->level = level;
114	ns->parent = get_pid_ns(parent_pid_ns);
115	ns->user_ns = get_user_ns(user_ns);
116	ns->nr_hashed = PIDNS_HASH_ADDING;
117	INIT_WORK(&ns->proc_work, proc_cleanup_work);
118
119	set_bit(0, ns->pidmap[0].page);
120	atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
121
122	for (i = 1; i < PIDMAP_ENTRIES; i++)
123		atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
124
125	return ns;
126
127out_free_map:
128	kfree(ns->pidmap[0].page);
129out_free:
130	kmem_cache_free(pid_ns_cachep, ns);
 
 
131out:
132	return ERR_PTR(err);
133}
134
135static void delayed_free_pidns(struct rcu_head *p)
136{
137	kmem_cache_free(pid_ns_cachep,
138			container_of(p, struct pid_namespace, rcu));
 
 
 
 
139}
140
141static void destroy_pid_namespace(struct pid_namespace *ns)
142{
143	int i;
144
145	proc_free_inum(ns->proc_inum);
146	for (i = 0; i < PIDMAP_ENTRIES; i++)
147		kfree(ns->pidmap[i].page);
148	put_user_ns(ns->user_ns);
149	call_rcu(&ns->rcu, delayed_free_pidns);
150}
151
152struct pid_namespace *copy_pid_ns(unsigned long flags,
153	struct user_namespace *user_ns, struct pid_namespace *old_ns)
154{
155	if (!(flags & CLONE_NEWPID))
156		return get_pid_ns(old_ns);
157	if (task_active_pid_ns(current) != old_ns)
158		return ERR_PTR(-EINVAL);
159	return create_pid_namespace(user_ns, old_ns);
160}
161
162static void free_pid_ns(struct kref *kref)
163{
164	struct pid_namespace *ns;
165
166	ns = container_of(kref, struct pid_namespace, kref);
167	destroy_pid_namespace(ns);
168}
169
170void put_pid_ns(struct pid_namespace *ns)
171{
172	struct pid_namespace *parent;
173
174	while (ns != &init_pid_ns) {
175		parent = ns->parent;
176		if (!kref_put(&ns->kref, free_pid_ns))
177			break;
 
178		ns = parent;
179	}
180}
181EXPORT_SYMBOL_GPL(put_pid_ns);
182
183void zap_pid_ns_processes(struct pid_namespace *pid_ns)
184{
185	int nr;
186	int rc;
187	struct task_struct *task, *me = current;
188	int init_pids = thread_group_leader(me) ? 1 : 2;
 
189
190	/* Don't allow any more processes into the pid namespace */
191	disable_pid_allocation(pid_ns);
192
193	/* Ignore SIGCHLD causing any terminated children to autoreap */
 
 
 
 
194	spin_lock_irq(&me->sighand->siglock);
195	me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
196	spin_unlock_irq(&me->sighand->siglock);
197
198	/*
199	 * The last thread in the cgroup-init thread group is terminating.
200	 * Find remaining pid_ts in the namespace, signal and wait for them
201	 * to exit.
202	 *
203	 * Note:  This signals each threads in the namespace - even those that
204	 * 	  belong to the same thread group, To avoid this, we would have
205	 * 	  to walk the entire tasklist looking a processes in this
206	 * 	  namespace, but that could be unnecessarily expensive if the
207	 * 	  pid namespace has just a few processes. Or we need to
208	 * 	  maintain a tasklist for each pid namespace.
209	 *
210	 */
 
211	read_lock(&tasklist_lock);
212	nr = next_pidmap(pid_ns, 1);
213	while (nr > 0) {
214		rcu_read_lock();
215
216		task = pid_task(find_vpid(nr), PIDTYPE_PID);
217		if (task && !__fatal_signal_pending(task))
218			send_sig_info(SIGKILL, SEND_SIG_FORCED, task);
219
220		rcu_read_unlock();
221
222		nr = next_pidmap(pid_ns, nr);
223	}
224	read_unlock(&tasklist_lock);
 
225
226	/* Firstly reap the EXIT_ZOMBIE children we may have. */
 
 
 
 
227	do {
228		clear_thread_flag(TIF_SIGPENDING);
229		rc = sys_wait4(-1, NULL, __WALL, NULL);
230	} while (rc != -ECHILD);
231
232	/*
233	 * sys_wait4() above can't reap the TASK_DEAD children.
234	 * Make sure they all go away, see free_pid().
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235	 */
236	for (;;) {
237		set_current_state(TASK_UNINTERRUPTIBLE);
238		if (pid_ns->nr_hashed == init_pids)
239			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240		schedule();
 
241	}
242	__set_current_state(TASK_RUNNING);
243
244	if (pid_ns->reboot)
245		current->signal->group_exit_code = pid_ns->reboot;
246
247	acct_exit_ns(pid_ns);
248	return;
249}
250
251#ifdef CONFIG_CHECKPOINT_RESTORE
252static int pid_ns_ctl_handler(struct ctl_table *table, int write,
253		void __user *buffer, size_t *lenp, loff_t *ppos)
254{
255	struct pid_namespace *pid_ns = task_active_pid_ns(current);
256	struct ctl_table tmp = *table;
 
257
258	if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
259		return -EPERM;
260
261	/*
262	 * Writing directly to ns' last_pid field is OK, since this field
263	 * is volatile in a living namespace anyway and a code writing to
264	 * it should synchronize its usage with external means.
265	 */
 
266
267	tmp.data = &pid_ns->last_pid;
268	return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
269}
270
271extern int pid_max;
272static int zero = 0;
273static struct ctl_table pid_ns_ctl_table[] = {
274	{
275		.procname = "ns_last_pid",
276		.maxlen = sizeof(int),
277		.mode = 0666, /* permissions are checked in the handler */
278		.proc_handler = pid_ns_ctl_handler,
279		.extra1 = &zero,
280		.extra2 = &pid_max,
281	},
282	{ }
283};
284static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
285#endif	/* CONFIG_CHECKPOINT_RESTORE */
286
287int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
288{
289	if (pid_ns == &init_pid_ns)
290		return 0;
291
292	switch (cmd) {
293	case LINUX_REBOOT_CMD_RESTART2:
294	case LINUX_REBOOT_CMD_RESTART:
295		pid_ns->reboot = SIGHUP;
296		break;
297
298	case LINUX_REBOOT_CMD_POWER_OFF:
299	case LINUX_REBOOT_CMD_HALT:
300		pid_ns->reboot = SIGINT;
301		break;
302	default:
303		return -EINVAL;
304	}
305
306	read_lock(&tasklist_lock);
307	force_sig(SIGKILL, pid_ns->child_reaper);
308	read_unlock(&tasklist_lock);
309
310	do_exit(0);
311
312	/* Not reached */
313	return 0;
314}
315
316static void *pidns_get(struct task_struct *task)
 
 
 
 
 
317{
318	struct pid_namespace *ns;
319
320	rcu_read_lock();
321	ns = task_active_pid_ns(task);
322	if (ns)
323		get_pid_ns(ns);
324	rcu_read_unlock();
325
326	return ns;
327}
328
329static void pidns_put(void *ns)
330{
331	put_pid_ns(ns);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332}
333
334static int pidns_install(struct nsproxy *nsproxy, void *ns)
335{
 
 
 
 
 
 
336	struct pid_namespace *active = task_active_pid_ns(current);
337	struct pid_namespace *ancestor, *new = ns;
338
339	if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
340	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
341		return -EPERM;
342
343	/*
344	 * Only allow entering the current active pid namespace
345	 * or a child of the current active pid namespace.
346	 *
347	 * This is required for fork to return a usable pid value and
348	 * this maintains the property that processes and their
349	 * children can not escape their current pid namespace.
350	 */
351	if (new->level < active->level)
352		return -EINVAL;
353
354	ancestor = new;
355	while (ancestor->level > active->level)
356		ancestor = ancestor->parent;
357	if (ancestor != active)
358		return -EINVAL;
359
360	put_pid_ns(nsproxy->pid_ns_for_children);
361	nsproxy->pid_ns_for_children = get_pid_ns(new);
362	return 0;
363}
364
365static unsigned int pidns_inum(void *ns)
366{
367	struct pid_namespace *pid_ns = ns;
368	return pid_ns->proc_inum;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369}
370
371const struct proc_ns_operations pidns_operations = {
372	.name		= "pid",
373	.type		= CLONE_NEWPID,
374	.get		= pidns_get,
375	.put		= pidns_put,
376	.install	= pidns_install,
377	.inum		= pidns_inum,
 
 
 
 
 
 
 
 
 
 
 
 
378};
379
380static __init int pid_namespaces_init(void)
381{
382	pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
383
384#ifdef CONFIG_CHECKPOINT_RESTORE
385	register_sysctl_paths(kern_path, pid_ns_ctl_table);
386#endif
 
 
387	return 0;
388}
389
390__initcall(pid_namespaces_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Pid namespaces
  4 *
  5 * Authors:
  6 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  7 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  8 *     Many thanks to Oleg Nesterov for comments and help
  9 *
 10 */
 11
 12#include <linux/pid.h>
 13#include <linux/pid_namespace.h>
 14#include <linux/user_namespace.h>
 15#include <linux/syscalls.h>
 16#include <linux/cred.h>
 17#include <linux/err.h>
 18#include <linux/acct.h>
 19#include <linux/slab.h>
 20#include <linux/proc_ns.h>
 21#include <linux/reboot.h>
 22#include <linux/export.h>
 23#include <linux/sched/task.h>
 24#include <linux/sched/signal.h>
 25#include <linux/idr.h>
 26#include <uapi/linux/wait.h>
 27#include "pid_sysctl.h"
 28
 
 
 
 
 
 
 
 
 29static DEFINE_MUTEX(pid_caches_mutex);
 30static struct kmem_cache *pid_ns_cachep;
 31/* Write once array, filled from the beginning. */
 32static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL];
 33
 34/*
 35 * creates the kmem cache to allocate pids from.
 36 * @level: pid namespace level
 37 */
 38
 39static struct kmem_cache *create_pid_cachep(unsigned int level)
 40{
 41	/* Level 0 is init_pid_ns.pid_cachep */
 42	struct kmem_cache **pkc = &pid_cache[level - 1];
 43	struct kmem_cache *kc;
 44	char name[4 + 10 + 1];
 45	unsigned int len;
 46
 47	kc = READ_ONCE(*pkc);
 48	if (kc)
 49		return kc;
 50
 51	snprintf(name, sizeof(name), "pid_%u", level + 1);
 52	len = struct_size_t(struct pid, numbers, level + 1);
 53	mutex_lock(&pid_caches_mutex);
 54	/* Name collision forces to do allocation under mutex. */
 55	if (!*pkc)
 56		*pkc = kmem_cache_create(name, len, 0,
 57					 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58	mutex_unlock(&pid_caches_mutex);
 59	/* current can fail, but someone else can succeed. */
 60	return READ_ONCE(*pkc);
 61}
 62
 63static struct ucounts *inc_pid_namespaces(struct user_namespace *ns)
 64{
 65	return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES);
 
 66}
 67
 68static void dec_pid_namespaces(struct ucounts *ucounts)
 69{
 70	dec_ucount(ucounts, UCOUNT_PID_NAMESPACES);
 71}
 72
 73static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
 74	struct pid_namespace *parent_pid_ns)
 75{
 76	struct pid_namespace *ns;
 77	unsigned int level = parent_pid_ns->level + 1;
 78	struct ucounts *ucounts;
 79	int err;
 80
 81	err = -EINVAL;
 82	if (!in_userns(parent_pid_ns->user_ns, user_ns))
 83		goto out;
 84
 85	err = -ENOSPC;
 86	if (level > MAX_PID_NS_LEVEL)
 87		goto out;
 88	ucounts = inc_pid_namespaces(user_ns);
 89	if (!ucounts)
 90		goto out;
 
 91
 92	err = -ENOMEM;
 93	ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
 94	if (ns == NULL)
 95		goto out_dec;
 96
 97	idr_init(&ns->idr);
 
 
 98
 99	ns->pid_cachep = create_pid_cachep(level);
100	if (ns->pid_cachep == NULL)
101		goto out_free_idr;
102
103	err = ns_alloc_inum(&ns->ns);
104	if (err)
105		goto out_free_idr;
106	ns->ns.ops = &pidns_operations;
107
108	refcount_set(&ns->ns.count, 1);
109	ns->level = level;
110	ns->parent = get_pid_ns(parent_pid_ns);
111	ns->user_ns = get_user_ns(user_ns);
112	ns->ucounts = ucounts;
113	ns->pid_allocated = PIDNS_ADDING;
114#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
115	ns->memfd_noexec_scope = pidns_memfd_noexec_scope(parent_pid_ns);
116#endif
 
 
 
 
117	return ns;
118
119out_free_idr:
120	idr_destroy(&ns->idr);
 
121	kmem_cache_free(pid_ns_cachep, ns);
122out_dec:
123	dec_pid_namespaces(ucounts);
124out:
125	return ERR_PTR(err);
126}
127
128static void delayed_free_pidns(struct rcu_head *p)
129{
130	struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
131
132	dec_pid_namespaces(ns->ucounts);
133	put_user_ns(ns->user_ns);
134
135	kmem_cache_free(pid_ns_cachep, ns);
136}
137
138static void destroy_pid_namespace(struct pid_namespace *ns)
139{
140	ns_free_inum(&ns->ns);
141
142	idr_destroy(&ns->idr);
 
 
 
143	call_rcu(&ns->rcu, delayed_free_pidns);
144}
145
146struct pid_namespace *copy_pid_ns(unsigned long flags,
147	struct user_namespace *user_ns, struct pid_namespace *old_ns)
148{
149	if (!(flags & CLONE_NEWPID))
150		return get_pid_ns(old_ns);
151	if (task_active_pid_ns(current) != old_ns)
152		return ERR_PTR(-EINVAL);
153	return create_pid_namespace(user_ns, old_ns);
154}
155
 
 
 
 
 
 
 
 
156void put_pid_ns(struct pid_namespace *ns)
157{
158	struct pid_namespace *parent;
159
160	while (ns != &init_pid_ns) {
161		parent = ns->parent;
162		if (!refcount_dec_and_test(&ns->ns.count))
163			break;
164		destroy_pid_namespace(ns);
165		ns = parent;
166	}
167}
168EXPORT_SYMBOL_GPL(put_pid_ns);
169
170void zap_pid_ns_processes(struct pid_namespace *pid_ns)
171{
172	int nr;
173	int rc;
174	struct task_struct *task, *me = current;
175	int init_pids = thread_group_leader(me) ? 1 : 2;
176	struct pid *pid;
177
178	/* Don't allow any more processes into the pid namespace */
179	disable_pid_allocation(pid_ns);
180
181	/*
182	 * Ignore SIGCHLD causing any terminated children to autoreap.
183	 * This speeds up the namespace shutdown, plus see the comment
184	 * below.
185	 */
186	spin_lock_irq(&me->sighand->siglock);
187	me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
188	spin_unlock_irq(&me->sighand->siglock);
189
190	/*
191	 * The last thread in the cgroup-init thread group is terminating.
192	 * Find remaining pid_ts in the namespace, signal and wait for them
193	 * to exit.
194	 *
195	 * Note:  This signals each threads in the namespace - even those that
196	 * 	  belong to the same thread group, To avoid this, we would have
197	 * 	  to walk the entire tasklist looking a processes in this
198	 * 	  namespace, but that could be unnecessarily expensive if the
199	 * 	  pid namespace has just a few processes. Or we need to
200	 * 	  maintain a tasklist for each pid namespace.
201	 *
202	 */
203	rcu_read_lock();
204	read_lock(&tasklist_lock);
205	nr = 2;
206	idr_for_each_entry_continue(&pid_ns->idr, pid, nr) {
207		task = pid_task(pid, PIDTYPE_PID);
 
 
208		if (task && !__fatal_signal_pending(task))
209			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX);
 
 
 
 
210	}
211	read_unlock(&tasklist_lock);
212	rcu_read_unlock();
213
214	/*
215	 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
216	 * kernel_wait4() will also block until our children traced from the
217	 * parent namespace are detached and become EXIT_DEAD.
218	 */
219	do {
220		clear_thread_flag(TIF_SIGPENDING);
221		rc = kernel_wait4(-1, NULL, __WALL, NULL);
222	} while (rc != -ECHILD);
223
224	/*
225	 * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE
226	 * process whose parents processes are outside of the pid
227	 * namespace.  Such processes are created with setns()+fork().
228	 *
229	 * If those EXIT_ZOMBIE processes are not reaped by their
230	 * parents before their parents exit, they will be reparented
231	 * to pid_ns->child_reaper.  Thus pidns->child_reaper needs to
232	 * stay valid until they all go away.
233	 *
234	 * The code relies on the pid_ns->child_reaper ignoring
235	 * SIGCHILD to cause those EXIT_ZOMBIE processes to be
236	 * autoreaped if reparented.
237	 *
238	 * Semantically it is also desirable to wait for EXIT_ZOMBIE
239	 * processes before allowing the child_reaper to be reaped, as
240	 * that gives the invariant that when the init process of a
241	 * pid namespace is reaped all of the processes in the pid
242	 * namespace are gone.
243	 *
244	 * Once all of the other tasks are gone from the pid_namespace
245	 * free_pid() will awaken this task.
246	 */
247	for (;;) {
248		set_current_state(TASK_INTERRUPTIBLE);
249		if (pid_ns->pid_allocated == init_pids)
250			break;
251		/*
252		 * Release tasks_rcu_exit_srcu to avoid following deadlock:
253		 *
254		 * 1) TASK A unshare(CLONE_NEWPID)
255		 * 2) TASK A fork() twice -> TASK B (child reaper for new ns)
256		 *    and TASK C
257		 * 3) TASK B exits, kills TASK C, waits for TASK A to reap it
258		 * 4) TASK A calls synchronize_rcu_tasks()
259		 *                   -> synchronize_srcu(tasks_rcu_exit_srcu)
260		 * 5) *DEADLOCK*
261		 *
262		 * It is considered safe to release tasks_rcu_exit_srcu here
263		 * because we assume the current task can not be concurrently
264		 * reaped at this point.
265		 */
266		exit_tasks_rcu_stop();
267		schedule();
268		exit_tasks_rcu_start();
269	}
270	__set_current_state(TASK_RUNNING);
271
272	if (pid_ns->reboot)
273		current->signal->group_exit_code = pid_ns->reboot;
274
275	acct_exit_ns(pid_ns);
276	return;
277}
278
279#ifdef CONFIG_CHECKPOINT_RESTORE
280static int pid_ns_ctl_handler(struct ctl_table *table, int write,
281		void *buffer, size_t *lenp, loff_t *ppos)
282{
283	struct pid_namespace *pid_ns = task_active_pid_ns(current);
284	struct ctl_table tmp = *table;
285	int ret, next;
286
287	if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns))
288		return -EPERM;
289
290	next = idr_get_cursor(&pid_ns->idr) - 1;
291
292	tmp.data = &next;
293	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
294	if (!ret && write)
295		idr_set_cursor(&pid_ns->idr, next + 1);
296
297	return ret;
 
298}
299
300extern int pid_max;
 
301static struct ctl_table pid_ns_ctl_table[] = {
302	{
303		.procname = "ns_last_pid",
304		.maxlen = sizeof(int),
305		.mode = 0666, /* permissions are checked in the handler */
306		.proc_handler = pid_ns_ctl_handler,
307		.extra1 = SYSCTL_ZERO,
308		.extra2 = &pid_max,
309	},
310	{ }
311};
 
312#endif	/* CONFIG_CHECKPOINT_RESTORE */
313
314int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
315{
316	if (pid_ns == &init_pid_ns)
317		return 0;
318
319	switch (cmd) {
320	case LINUX_REBOOT_CMD_RESTART2:
321	case LINUX_REBOOT_CMD_RESTART:
322		pid_ns->reboot = SIGHUP;
323		break;
324
325	case LINUX_REBOOT_CMD_POWER_OFF:
326	case LINUX_REBOOT_CMD_HALT:
327		pid_ns->reboot = SIGINT;
328		break;
329	default:
330		return -EINVAL;
331	}
332
333	read_lock(&tasklist_lock);
334	send_sig(SIGKILL, pid_ns->child_reaper, 1);
335	read_unlock(&tasklist_lock);
336
337	do_exit(0);
338
339	/* Not reached */
340	return 0;
341}
342
343static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
344{
345	return container_of(ns, struct pid_namespace, ns);
346}
347
348static struct ns_common *pidns_get(struct task_struct *task)
349{
350	struct pid_namespace *ns;
351
352	rcu_read_lock();
353	ns = task_active_pid_ns(task);
354	if (ns)
355		get_pid_ns(ns);
356	rcu_read_unlock();
357
358	return ns ? &ns->ns : NULL;
359}
360
361static struct ns_common *pidns_for_children_get(struct task_struct *task)
362{
363	struct pid_namespace *ns = NULL;
364
365	task_lock(task);
366	if (task->nsproxy) {
367		ns = task->nsproxy->pid_ns_for_children;
368		get_pid_ns(ns);
369	}
370	task_unlock(task);
371
372	if (ns) {
373		read_lock(&tasklist_lock);
374		if (!ns->child_reaper) {
375			put_pid_ns(ns);
376			ns = NULL;
377		}
378		read_unlock(&tasklist_lock);
379	}
380
381	return ns ? &ns->ns : NULL;
382}
383
384static void pidns_put(struct ns_common *ns)
385{
386	put_pid_ns(to_pid_ns(ns));
387}
388
389static int pidns_install(struct nsset *nsset, struct ns_common *ns)
390{
391	struct nsproxy *nsproxy = nsset->nsproxy;
392	struct pid_namespace *active = task_active_pid_ns(current);
393	struct pid_namespace *ancestor, *new = to_pid_ns(ns);
394
395	if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
396	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
397		return -EPERM;
398
399	/*
400	 * Only allow entering the current active pid namespace
401	 * or a child of the current active pid namespace.
402	 *
403	 * This is required for fork to return a usable pid value and
404	 * this maintains the property that processes and their
405	 * children can not escape their current pid namespace.
406	 */
407	if (new->level < active->level)
408		return -EINVAL;
409
410	ancestor = new;
411	while (ancestor->level > active->level)
412		ancestor = ancestor->parent;
413	if (ancestor != active)
414		return -EINVAL;
415
416	put_pid_ns(nsproxy->pid_ns_for_children);
417	nsproxy->pid_ns_for_children = get_pid_ns(new);
418	return 0;
419}
420
421static struct ns_common *pidns_get_parent(struct ns_common *ns)
422{
423	struct pid_namespace *active = task_active_pid_ns(current);
424	struct pid_namespace *pid_ns, *p;
425
426	/* See if the parent is in the current namespace */
427	pid_ns = p = to_pid_ns(ns)->parent;
428	for (;;) {
429		if (!p)
430			return ERR_PTR(-EPERM);
431		if (p == active)
432			break;
433		p = p->parent;
434	}
435
436	return &get_pid_ns(pid_ns)->ns;
437}
438
439static struct user_namespace *pidns_owner(struct ns_common *ns)
440{
441	return to_pid_ns(ns)->user_ns;
442}
443
444const struct proc_ns_operations pidns_operations = {
445	.name		= "pid",
446	.type		= CLONE_NEWPID,
447	.get		= pidns_get,
448	.put		= pidns_put,
449	.install	= pidns_install,
450	.owner		= pidns_owner,
451	.get_parent	= pidns_get_parent,
452};
453
454const struct proc_ns_operations pidns_for_children_operations = {
455	.name		= "pid_for_children",
456	.real_ns_name	= "pid",
457	.type		= CLONE_NEWPID,
458	.get		= pidns_for_children_get,
459	.put		= pidns_put,
460	.install	= pidns_install,
461	.owner		= pidns_owner,
462	.get_parent	= pidns_get_parent,
463};
464
465static __init int pid_namespaces_init(void)
466{
467	pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC | SLAB_ACCOUNT);
468
469#ifdef CONFIG_CHECKPOINT_RESTORE
470	register_sysctl_init("kernel", pid_ns_ctl_table);
471#endif
472
473	register_pid_ns_sysctl_table_vm();
474	return 0;
475}
476
477__initcall(pid_namespaces_init);