Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Pid namespaces
4 *
5 * Authors:
6 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
7 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
8 * Many thanks to Oleg Nesterov for comments and help
9 *
10 */
11
12#include <linux/pid.h>
13#include <linux/pid_namespace.h>
14#include <linux/user_namespace.h>
15#include <linux/syscalls.h>
16#include <linux/cred.h>
17#include <linux/err.h>
18#include <linux/acct.h>
19#include <linux/slab.h>
20#include <linux/proc_ns.h>
21#include <linux/reboot.h>
22#include <linux/export.h>
23#include <linux/sched/task.h>
24#include <linux/sched/signal.h>
25#include <linux/idr.h>
26
27static DEFINE_MUTEX(pid_caches_mutex);
28static struct kmem_cache *pid_ns_cachep;
29/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
30#define MAX_PID_NS_LEVEL 32
31/* Write once array, filled from the beginning. */
32static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL];
33
34/*
35 * creates the kmem cache to allocate pids from.
36 * @level: pid namespace level
37 */
38
39static struct kmem_cache *create_pid_cachep(unsigned int level)
40{
41 /* Level 0 is init_pid_ns.pid_cachep */
42 struct kmem_cache **pkc = &pid_cache[level - 1];
43 struct kmem_cache *kc;
44 char name[4 + 10 + 1];
45 unsigned int len;
46
47 kc = READ_ONCE(*pkc);
48 if (kc)
49 return kc;
50
51 snprintf(name, sizeof(name), "pid_%u", level + 1);
52 len = sizeof(struct pid) + level * sizeof(struct upid);
53 mutex_lock(&pid_caches_mutex);
54 /* Name collision forces to do allocation under mutex. */
55 if (!*pkc)
56 *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0);
57 mutex_unlock(&pid_caches_mutex);
58 /* current can fail, but someone else can succeed. */
59 return READ_ONCE(*pkc);
60}
61
62static void proc_cleanup_work(struct work_struct *work)
63{
64 struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work);
65 pid_ns_release_proc(ns);
66}
67
68static struct ucounts *inc_pid_namespaces(struct user_namespace *ns)
69{
70 return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES);
71}
72
73static void dec_pid_namespaces(struct ucounts *ucounts)
74{
75 dec_ucount(ucounts, UCOUNT_PID_NAMESPACES);
76}
77
78static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
79 struct pid_namespace *parent_pid_ns)
80{
81 struct pid_namespace *ns;
82 unsigned int level = parent_pid_ns->level + 1;
83 struct ucounts *ucounts;
84 int err;
85
86 err = -EINVAL;
87 if (!in_userns(parent_pid_ns->user_ns, user_ns))
88 goto out;
89
90 err = -ENOSPC;
91 if (level > MAX_PID_NS_LEVEL)
92 goto out;
93 ucounts = inc_pid_namespaces(user_ns);
94 if (!ucounts)
95 goto out;
96
97 err = -ENOMEM;
98 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
99 if (ns == NULL)
100 goto out_dec;
101
102 idr_init(&ns->idr);
103
104 ns->pid_cachep = create_pid_cachep(level);
105 if (ns->pid_cachep == NULL)
106 goto out_free_idr;
107
108 err = ns_alloc_inum(&ns->ns);
109 if (err)
110 goto out_free_idr;
111 ns->ns.ops = &pidns_operations;
112
113 kref_init(&ns->kref);
114 ns->level = level;
115 ns->parent = get_pid_ns(parent_pid_ns);
116 ns->user_ns = get_user_ns(user_ns);
117 ns->ucounts = ucounts;
118 ns->pid_allocated = PIDNS_ADDING;
119 INIT_WORK(&ns->proc_work, proc_cleanup_work);
120
121 return ns;
122
123out_free_idr:
124 idr_destroy(&ns->idr);
125 kmem_cache_free(pid_ns_cachep, ns);
126out_dec:
127 dec_pid_namespaces(ucounts);
128out:
129 return ERR_PTR(err);
130}
131
132static void delayed_free_pidns(struct rcu_head *p)
133{
134 struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
135
136 dec_pid_namespaces(ns->ucounts);
137 put_user_ns(ns->user_ns);
138
139 kmem_cache_free(pid_ns_cachep, ns);
140}
141
142static void destroy_pid_namespace(struct pid_namespace *ns)
143{
144 ns_free_inum(&ns->ns);
145
146 idr_destroy(&ns->idr);
147 call_rcu(&ns->rcu, delayed_free_pidns);
148}
149
150struct pid_namespace *copy_pid_ns(unsigned long flags,
151 struct user_namespace *user_ns, struct pid_namespace *old_ns)
152{
153 if (!(flags & CLONE_NEWPID))
154 return get_pid_ns(old_ns);
155 if (task_active_pid_ns(current) != old_ns)
156 return ERR_PTR(-EINVAL);
157 return create_pid_namespace(user_ns, old_ns);
158}
159
160static void free_pid_ns(struct kref *kref)
161{
162 struct pid_namespace *ns;
163
164 ns = container_of(kref, struct pid_namespace, kref);
165 destroy_pid_namespace(ns);
166}
167
168void put_pid_ns(struct pid_namespace *ns)
169{
170 struct pid_namespace *parent;
171
172 while (ns != &init_pid_ns) {
173 parent = ns->parent;
174 if (!kref_put(&ns->kref, free_pid_ns))
175 break;
176 ns = parent;
177 }
178}
179EXPORT_SYMBOL_GPL(put_pid_ns);
180
181void zap_pid_ns_processes(struct pid_namespace *pid_ns)
182{
183 int nr;
184 int rc;
185 struct task_struct *task, *me = current;
186 int init_pids = thread_group_leader(me) ? 1 : 2;
187 struct pid *pid;
188
189 /* Don't allow any more processes into the pid namespace */
190 disable_pid_allocation(pid_ns);
191
192 /*
193 * Ignore SIGCHLD causing any terminated children to autoreap.
194 * This speeds up the namespace shutdown, plus see the comment
195 * below.
196 */
197 spin_lock_irq(&me->sighand->siglock);
198 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
199 spin_unlock_irq(&me->sighand->siglock);
200
201 /*
202 * The last thread in the cgroup-init thread group is terminating.
203 * Find remaining pid_ts in the namespace, signal and wait for them
204 * to exit.
205 *
206 * Note: This signals each threads in the namespace - even those that
207 * belong to the same thread group, To avoid this, we would have
208 * to walk the entire tasklist looking a processes in this
209 * namespace, but that could be unnecessarily expensive if the
210 * pid namespace has just a few processes. Or we need to
211 * maintain a tasklist for each pid namespace.
212 *
213 */
214 rcu_read_lock();
215 read_lock(&tasklist_lock);
216 nr = 2;
217 idr_for_each_entry_continue(&pid_ns->idr, pid, nr) {
218 task = pid_task(pid, PIDTYPE_PID);
219 if (task && !__fatal_signal_pending(task))
220 group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX);
221 }
222 read_unlock(&tasklist_lock);
223 rcu_read_unlock();
224
225 /*
226 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
227 * kernel_wait4() will also block until our children traced from the
228 * parent namespace are detached and become EXIT_DEAD.
229 */
230 do {
231 clear_thread_flag(TIF_SIGPENDING);
232 rc = kernel_wait4(-1, NULL, __WALL, NULL);
233 } while (rc != -ECHILD);
234
235 /*
236 * kernel_wait4() above can't reap the EXIT_DEAD children but we do not
237 * really care, we could reparent them to the global init. We could
238 * exit and reap ->child_reaper even if it is not the last thread in
239 * this pid_ns, free_pid(pid_allocated == 0) calls proc_cleanup_work(),
240 * pid_ns can not go away until proc_kill_sb() drops the reference.
241 *
242 * But this ns can also have other tasks injected by setns()+fork().
243 * Again, ignoring the user visible semantics we do not really need
244 * to wait until they are all reaped, but they can be reparented to
245 * us and thus we need to ensure that pid->child_reaper stays valid
246 * until they all go away. See free_pid()->wake_up_process().
247 *
248 * We rely on ignored SIGCHLD, an injected zombie must be autoreaped
249 * if reparented.
250 */
251 for (;;) {
252 set_current_state(TASK_INTERRUPTIBLE);
253 if (pid_ns->pid_allocated == init_pids)
254 break;
255 schedule();
256 }
257 __set_current_state(TASK_RUNNING);
258
259 if (pid_ns->reboot)
260 current->signal->group_exit_code = pid_ns->reboot;
261
262 acct_exit_ns(pid_ns);
263 return;
264}
265
266#ifdef CONFIG_CHECKPOINT_RESTORE
267static int pid_ns_ctl_handler(struct ctl_table *table, int write,
268 void __user *buffer, size_t *lenp, loff_t *ppos)
269{
270 struct pid_namespace *pid_ns = task_active_pid_ns(current);
271 struct ctl_table tmp = *table;
272 int ret, next;
273
274 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
275 return -EPERM;
276
277 /*
278 * Writing directly to ns' last_pid field is OK, since this field
279 * is volatile in a living namespace anyway and a code writing to
280 * it should synchronize its usage with external means.
281 */
282
283 next = idr_get_cursor(&pid_ns->idr) - 1;
284
285 tmp.data = &next;
286 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
287 if (!ret && write)
288 idr_set_cursor(&pid_ns->idr, next + 1);
289
290 return ret;
291}
292
293extern int pid_max;
294static struct ctl_table pid_ns_ctl_table[] = {
295 {
296 .procname = "ns_last_pid",
297 .maxlen = sizeof(int),
298 .mode = 0666, /* permissions are checked in the handler */
299 .proc_handler = pid_ns_ctl_handler,
300 .extra1 = SYSCTL_ZERO,
301 .extra2 = &pid_max,
302 },
303 { }
304};
305static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
306#endif /* CONFIG_CHECKPOINT_RESTORE */
307
308int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
309{
310 if (pid_ns == &init_pid_ns)
311 return 0;
312
313 switch (cmd) {
314 case LINUX_REBOOT_CMD_RESTART2:
315 case LINUX_REBOOT_CMD_RESTART:
316 pid_ns->reboot = SIGHUP;
317 break;
318
319 case LINUX_REBOOT_CMD_POWER_OFF:
320 case LINUX_REBOOT_CMD_HALT:
321 pid_ns->reboot = SIGINT;
322 break;
323 default:
324 return -EINVAL;
325 }
326
327 read_lock(&tasklist_lock);
328 send_sig(SIGKILL, pid_ns->child_reaper, 1);
329 read_unlock(&tasklist_lock);
330
331 do_exit(0);
332
333 /* Not reached */
334 return 0;
335}
336
337static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
338{
339 return container_of(ns, struct pid_namespace, ns);
340}
341
342static struct ns_common *pidns_get(struct task_struct *task)
343{
344 struct pid_namespace *ns;
345
346 rcu_read_lock();
347 ns = task_active_pid_ns(task);
348 if (ns)
349 get_pid_ns(ns);
350 rcu_read_unlock();
351
352 return ns ? &ns->ns : NULL;
353}
354
355static struct ns_common *pidns_for_children_get(struct task_struct *task)
356{
357 struct pid_namespace *ns = NULL;
358
359 task_lock(task);
360 if (task->nsproxy) {
361 ns = task->nsproxy->pid_ns_for_children;
362 get_pid_ns(ns);
363 }
364 task_unlock(task);
365
366 if (ns) {
367 read_lock(&tasklist_lock);
368 if (!ns->child_reaper) {
369 put_pid_ns(ns);
370 ns = NULL;
371 }
372 read_unlock(&tasklist_lock);
373 }
374
375 return ns ? &ns->ns : NULL;
376}
377
378static void pidns_put(struct ns_common *ns)
379{
380 put_pid_ns(to_pid_ns(ns));
381}
382
383static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns)
384{
385 struct pid_namespace *active = task_active_pid_ns(current);
386 struct pid_namespace *ancestor, *new = to_pid_ns(ns);
387
388 if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
389 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
390 return -EPERM;
391
392 /*
393 * Only allow entering the current active pid namespace
394 * or a child of the current active pid namespace.
395 *
396 * This is required for fork to return a usable pid value and
397 * this maintains the property that processes and their
398 * children can not escape their current pid namespace.
399 */
400 if (new->level < active->level)
401 return -EINVAL;
402
403 ancestor = new;
404 while (ancestor->level > active->level)
405 ancestor = ancestor->parent;
406 if (ancestor != active)
407 return -EINVAL;
408
409 put_pid_ns(nsproxy->pid_ns_for_children);
410 nsproxy->pid_ns_for_children = get_pid_ns(new);
411 return 0;
412}
413
414static struct ns_common *pidns_get_parent(struct ns_common *ns)
415{
416 struct pid_namespace *active = task_active_pid_ns(current);
417 struct pid_namespace *pid_ns, *p;
418
419 /* See if the parent is in the current namespace */
420 pid_ns = p = to_pid_ns(ns)->parent;
421 for (;;) {
422 if (!p)
423 return ERR_PTR(-EPERM);
424 if (p == active)
425 break;
426 p = p->parent;
427 }
428
429 return &get_pid_ns(pid_ns)->ns;
430}
431
432static struct user_namespace *pidns_owner(struct ns_common *ns)
433{
434 return to_pid_ns(ns)->user_ns;
435}
436
437const struct proc_ns_operations pidns_operations = {
438 .name = "pid",
439 .type = CLONE_NEWPID,
440 .get = pidns_get,
441 .put = pidns_put,
442 .install = pidns_install,
443 .owner = pidns_owner,
444 .get_parent = pidns_get_parent,
445};
446
447const struct proc_ns_operations pidns_for_children_operations = {
448 .name = "pid_for_children",
449 .real_ns_name = "pid",
450 .type = CLONE_NEWPID,
451 .get = pidns_for_children_get,
452 .put = pidns_put,
453 .install = pidns_install,
454 .owner = pidns_owner,
455 .get_parent = pidns_get_parent,
456};
457
458static __init int pid_namespaces_init(void)
459{
460 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
461
462#ifdef CONFIG_CHECKPOINT_RESTORE
463 register_sysctl_paths(kern_path, pid_ns_ctl_table);
464#endif
465 return 0;
466}
467
468__initcall(pid_namespaces_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Pid namespaces
4 *
5 * Authors:
6 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
7 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
8 * Many thanks to Oleg Nesterov for comments and help
9 *
10 */
11
12#include <linux/pid.h>
13#include <linux/pid_namespace.h>
14#include <linux/user_namespace.h>
15#include <linux/syscalls.h>
16#include <linux/cred.h>
17#include <linux/err.h>
18#include <linux/acct.h>
19#include <linux/slab.h>
20#include <linux/proc_ns.h>
21#include <linux/reboot.h>
22#include <linux/export.h>
23#include <linux/sched/task.h>
24#include <linux/sched/signal.h>
25#include <linux/idr.h>
26
27static DEFINE_MUTEX(pid_caches_mutex);
28static struct kmem_cache *pid_ns_cachep;
29/* Write once array, filled from the beginning. */
30static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL];
31
32/*
33 * creates the kmem cache to allocate pids from.
34 * @level: pid namespace level
35 */
36
37static struct kmem_cache *create_pid_cachep(unsigned int level)
38{
39 /* Level 0 is init_pid_ns.pid_cachep */
40 struct kmem_cache **pkc = &pid_cache[level - 1];
41 struct kmem_cache *kc;
42 char name[4 + 10 + 1];
43 unsigned int len;
44
45 kc = READ_ONCE(*pkc);
46 if (kc)
47 return kc;
48
49 snprintf(name, sizeof(name), "pid_%u", level + 1);
50 len = sizeof(struct pid) + level * sizeof(struct upid);
51 mutex_lock(&pid_caches_mutex);
52 /* Name collision forces to do allocation under mutex. */
53 if (!*pkc)
54 *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0);
55 mutex_unlock(&pid_caches_mutex);
56 /* current can fail, but someone else can succeed. */
57 return READ_ONCE(*pkc);
58}
59
60static struct ucounts *inc_pid_namespaces(struct user_namespace *ns)
61{
62 return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES);
63}
64
65static void dec_pid_namespaces(struct ucounts *ucounts)
66{
67 dec_ucount(ucounts, UCOUNT_PID_NAMESPACES);
68}
69
70static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
71 struct pid_namespace *parent_pid_ns)
72{
73 struct pid_namespace *ns;
74 unsigned int level = parent_pid_ns->level + 1;
75 struct ucounts *ucounts;
76 int err;
77
78 err = -EINVAL;
79 if (!in_userns(parent_pid_ns->user_ns, user_ns))
80 goto out;
81
82 err = -ENOSPC;
83 if (level > MAX_PID_NS_LEVEL)
84 goto out;
85 ucounts = inc_pid_namespaces(user_ns);
86 if (!ucounts)
87 goto out;
88
89 err = -ENOMEM;
90 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
91 if (ns == NULL)
92 goto out_dec;
93
94 idr_init(&ns->idr);
95
96 ns->pid_cachep = create_pid_cachep(level);
97 if (ns->pid_cachep == NULL)
98 goto out_free_idr;
99
100 err = ns_alloc_inum(&ns->ns);
101 if (err)
102 goto out_free_idr;
103 ns->ns.ops = &pidns_operations;
104
105 kref_init(&ns->kref);
106 ns->level = level;
107 ns->parent = get_pid_ns(parent_pid_ns);
108 ns->user_ns = get_user_ns(user_ns);
109 ns->ucounts = ucounts;
110 ns->pid_allocated = PIDNS_ADDING;
111
112 return ns;
113
114out_free_idr:
115 idr_destroy(&ns->idr);
116 kmem_cache_free(pid_ns_cachep, ns);
117out_dec:
118 dec_pid_namespaces(ucounts);
119out:
120 return ERR_PTR(err);
121}
122
123static void delayed_free_pidns(struct rcu_head *p)
124{
125 struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
126
127 dec_pid_namespaces(ns->ucounts);
128 put_user_ns(ns->user_ns);
129
130 kmem_cache_free(pid_ns_cachep, ns);
131}
132
133static void destroy_pid_namespace(struct pid_namespace *ns)
134{
135 ns_free_inum(&ns->ns);
136
137 idr_destroy(&ns->idr);
138 call_rcu(&ns->rcu, delayed_free_pidns);
139}
140
141struct pid_namespace *copy_pid_ns(unsigned long flags,
142 struct user_namespace *user_ns, struct pid_namespace *old_ns)
143{
144 if (!(flags & CLONE_NEWPID))
145 return get_pid_ns(old_ns);
146 if (task_active_pid_ns(current) != old_ns)
147 return ERR_PTR(-EINVAL);
148 return create_pid_namespace(user_ns, old_ns);
149}
150
151static void free_pid_ns(struct kref *kref)
152{
153 struct pid_namespace *ns;
154
155 ns = container_of(kref, struct pid_namespace, kref);
156 destroy_pid_namespace(ns);
157}
158
159void put_pid_ns(struct pid_namespace *ns)
160{
161 struct pid_namespace *parent;
162
163 while (ns != &init_pid_ns) {
164 parent = ns->parent;
165 if (!kref_put(&ns->kref, free_pid_ns))
166 break;
167 ns = parent;
168 }
169}
170EXPORT_SYMBOL_GPL(put_pid_ns);
171
172void zap_pid_ns_processes(struct pid_namespace *pid_ns)
173{
174 int nr;
175 int rc;
176 struct task_struct *task, *me = current;
177 int init_pids = thread_group_leader(me) ? 1 : 2;
178 struct pid *pid;
179
180 /* Don't allow any more processes into the pid namespace */
181 disable_pid_allocation(pid_ns);
182
183 /*
184 * Ignore SIGCHLD causing any terminated children to autoreap.
185 * This speeds up the namespace shutdown, plus see the comment
186 * below.
187 */
188 spin_lock_irq(&me->sighand->siglock);
189 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
190 spin_unlock_irq(&me->sighand->siglock);
191
192 /*
193 * The last thread in the cgroup-init thread group is terminating.
194 * Find remaining pid_ts in the namespace, signal and wait for them
195 * to exit.
196 *
197 * Note: This signals each threads in the namespace - even those that
198 * belong to the same thread group, To avoid this, we would have
199 * to walk the entire tasklist looking a processes in this
200 * namespace, but that could be unnecessarily expensive if the
201 * pid namespace has just a few processes. Or we need to
202 * maintain a tasklist for each pid namespace.
203 *
204 */
205 rcu_read_lock();
206 read_lock(&tasklist_lock);
207 nr = 2;
208 idr_for_each_entry_continue(&pid_ns->idr, pid, nr) {
209 task = pid_task(pid, PIDTYPE_PID);
210 if (task && !__fatal_signal_pending(task))
211 group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX);
212 }
213 read_unlock(&tasklist_lock);
214 rcu_read_unlock();
215
216 /*
217 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
218 * kernel_wait4() will also block until our children traced from the
219 * parent namespace are detached and become EXIT_DEAD.
220 */
221 do {
222 clear_thread_flag(TIF_SIGPENDING);
223 rc = kernel_wait4(-1, NULL, __WALL, NULL);
224 } while (rc != -ECHILD);
225
226 /*
227 * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE
228 * process whose parents processes are outside of the pid
229 * namespace. Such processes are created with setns()+fork().
230 *
231 * If those EXIT_ZOMBIE processes are not reaped by their
232 * parents before their parents exit, they will be reparented
233 * to pid_ns->child_reaper. Thus pidns->child_reaper needs to
234 * stay valid until they all go away.
235 *
236 * The code relies on the the pid_ns->child_reaper ignoring
237 * SIGCHILD to cause those EXIT_ZOMBIE processes to be
238 * autoreaped if reparented.
239 *
240 * Semantically it is also desirable to wait for EXIT_ZOMBIE
241 * processes before allowing the child_reaper to be reaped, as
242 * that gives the invariant that when the init process of a
243 * pid namespace is reaped all of the processes in the pid
244 * namespace are gone.
245 *
246 * Once all of the other tasks are gone from the pid_namespace
247 * free_pid() will awaken this task.
248 */
249 for (;;) {
250 set_current_state(TASK_INTERRUPTIBLE);
251 if (pid_ns->pid_allocated == init_pids)
252 break;
253 schedule();
254 }
255 __set_current_state(TASK_RUNNING);
256
257 if (pid_ns->reboot)
258 current->signal->group_exit_code = pid_ns->reboot;
259
260 acct_exit_ns(pid_ns);
261 return;
262}
263
264#ifdef CONFIG_CHECKPOINT_RESTORE
265static int pid_ns_ctl_handler(struct ctl_table *table, int write,
266 void *buffer, size_t *lenp, loff_t *ppos)
267{
268 struct pid_namespace *pid_ns = task_active_pid_ns(current);
269 struct ctl_table tmp = *table;
270 int ret, next;
271
272 if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns))
273 return -EPERM;
274
275 /*
276 * Writing directly to ns' last_pid field is OK, since this field
277 * is volatile in a living namespace anyway and a code writing to
278 * it should synchronize its usage with external means.
279 */
280
281 next = idr_get_cursor(&pid_ns->idr) - 1;
282
283 tmp.data = &next;
284 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
285 if (!ret && write)
286 idr_set_cursor(&pid_ns->idr, next + 1);
287
288 return ret;
289}
290
291extern int pid_max;
292static struct ctl_table pid_ns_ctl_table[] = {
293 {
294 .procname = "ns_last_pid",
295 .maxlen = sizeof(int),
296 .mode = 0666, /* permissions are checked in the handler */
297 .proc_handler = pid_ns_ctl_handler,
298 .extra1 = SYSCTL_ZERO,
299 .extra2 = &pid_max,
300 },
301 { }
302};
303static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
304#endif /* CONFIG_CHECKPOINT_RESTORE */
305
306int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
307{
308 if (pid_ns == &init_pid_ns)
309 return 0;
310
311 switch (cmd) {
312 case LINUX_REBOOT_CMD_RESTART2:
313 case LINUX_REBOOT_CMD_RESTART:
314 pid_ns->reboot = SIGHUP;
315 break;
316
317 case LINUX_REBOOT_CMD_POWER_OFF:
318 case LINUX_REBOOT_CMD_HALT:
319 pid_ns->reboot = SIGINT;
320 break;
321 default:
322 return -EINVAL;
323 }
324
325 read_lock(&tasklist_lock);
326 send_sig(SIGKILL, pid_ns->child_reaper, 1);
327 read_unlock(&tasklist_lock);
328
329 do_exit(0);
330
331 /* Not reached */
332 return 0;
333}
334
335static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
336{
337 return container_of(ns, struct pid_namespace, ns);
338}
339
340static struct ns_common *pidns_get(struct task_struct *task)
341{
342 struct pid_namespace *ns;
343
344 rcu_read_lock();
345 ns = task_active_pid_ns(task);
346 if (ns)
347 get_pid_ns(ns);
348 rcu_read_unlock();
349
350 return ns ? &ns->ns : NULL;
351}
352
353static struct ns_common *pidns_for_children_get(struct task_struct *task)
354{
355 struct pid_namespace *ns = NULL;
356
357 task_lock(task);
358 if (task->nsproxy) {
359 ns = task->nsproxy->pid_ns_for_children;
360 get_pid_ns(ns);
361 }
362 task_unlock(task);
363
364 if (ns) {
365 read_lock(&tasklist_lock);
366 if (!ns->child_reaper) {
367 put_pid_ns(ns);
368 ns = NULL;
369 }
370 read_unlock(&tasklist_lock);
371 }
372
373 return ns ? &ns->ns : NULL;
374}
375
376static void pidns_put(struct ns_common *ns)
377{
378 put_pid_ns(to_pid_ns(ns));
379}
380
381static int pidns_install(struct nsset *nsset, struct ns_common *ns)
382{
383 struct nsproxy *nsproxy = nsset->nsproxy;
384 struct pid_namespace *active = task_active_pid_ns(current);
385 struct pid_namespace *ancestor, *new = to_pid_ns(ns);
386
387 if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
388 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
389 return -EPERM;
390
391 /*
392 * Only allow entering the current active pid namespace
393 * or a child of the current active pid namespace.
394 *
395 * This is required for fork to return a usable pid value and
396 * this maintains the property that processes and their
397 * children can not escape their current pid namespace.
398 */
399 if (new->level < active->level)
400 return -EINVAL;
401
402 ancestor = new;
403 while (ancestor->level > active->level)
404 ancestor = ancestor->parent;
405 if (ancestor != active)
406 return -EINVAL;
407
408 put_pid_ns(nsproxy->pid_ns_for_children);
409 nsproxy->pid_ns_for_children = get_pid_ns(new);
410 return 0;
411}
412
413static struct ns_common *pidns_get_parent(struct ns_common *ns)
414{
415 struct pid_namespace *active = task_active_pid_ns(current);
416 struct pid_namespace *pid_ns, *p;
417
418 /* See if the parent is in the current namespace */
419 pid_ns = p = to_pid_ns(ns)->parent;
420 for (;;) {
421 if (!p)
422 return ERR_PTR(-EPERM);
423 if (p == active)
424 break;
425 p = p->parent;
426 }
427
428 return &get_pid_ns(pid_ns)->ns;
429}
430
431static struct user_namespace *pidns_owner(struct ns_common *ns)
432{
433 return to_pid_ns(ns)->user_ns;
434}
435
436const struct proc_ns_operations pidns_operations = {
437 .name = "pid",
438 .type = CLONE_NEWPID,
439 .get = pidns_get,
440 .put = pidns_put,
441 .install = pidns_install,
442 .owner = pidns_owner,
443 .get_parent = pidns_get_parent,
444};
445
446const struct proc_ns_operations pidns_for_children_operations = {
447 .name = "pid_for_children",
448 .real_ns_name = "pid",
449 .type = CLONE_NEWPID,
450 .get = pidns_for_children_get,
451 .put = pidns_put,
452 .install = pidns_install,
453 .owner = pidns_owner,
454 .get_parent = pidns_get_parent,
455};
456
457static __init int pid_namespaces_init(void)
458{
459 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
460
461#ifdef CONFIG_CHECKPOINT_RESTORE
462 register_sysctl_paths(kern_path, pid_ns_ctl_table);
463#endif
464 return 0;
465}
466
467__initcall(pid_namespaces_init);