Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic pidhash and scalable, time-bounded PID allocator
4 *
5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
6 * (C) 2004 Nadia Yvette Chambers, Oracle
7 * (C) 2002-2004 Ingo Molnar, Red Hat
8 *
9 * pid-structures are backing objects for tasks sharing a given ID to chain
10 * against. There is very little to them aside from hashing them and
11 * parking tasks using given ID's on a list.
12 *
13 * The hash is always changed with the tasklist_lock write-acquired,
14 * and the hash is only accessed with the tasklist_lock at least
15 * read-acquired, so there's no additional SMP locking needed here.
16 *
17 * We have a list of bitmap pages, which bitmaps represent the PID space.
18 * Allocating and freeing PIDs is completely lockless. The worst-case
19 * allocation scenario when all but one out of 1 million PIDs possible are
20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22 *
23 * Pid namespaces:
24 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26 * Many thanks to Oleg Nesterov for comments and help
27 *
28 */
29
30#include <linux/mm.h>
31#include <linux/export.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/rculist.h>
35#include <linux/memblock.h>
36#include <linux/pid_namespace.h>
37#include <linux/init_task.h>
38#include <linux/syscalls.h>
39#include <linux/proc_ns.h>
40#include <linux/refcount.h>
41#include <linux/anon_inodes.h>
42#include <linux/sched/signal.h>
43#include <linux/sched/task.h>
44#include <linux/idr.h>
45#include <linux/pidfs.h>
46#include <net/sock.h>
47#include <uapi/linux/pidfd.h>
48
49struct pid init_struct_pid = {
50 .count = REFCOUNT_INIT(1),
51 .tasks = {
52 { .first = NULL },
53 { .first = NULL },
54 { .first = NULL },
55 },
56 .level = 0,
57 .numbers = { {
58 .nr = 0,
59 .ns = &init_pid_ns,
60 }, }
61};
62
63int pid_max = PID_MAX_DEFAULT;
64
65int pid_max_min = RESERVED_PIDS + 1;
66int pid_max_max = PID_MAX_LIMIT;
67/*
68 * Pseudo filesystems start inode numbering after one. We use Reserved
69 * PIDs as a natural offset.
70 */
71static u64 pidfs_ino = RESERVED_PIDS;
72
73/*
74 * PID-map pages start out as NULL, they get allocated upon
75 * first use and are never deallocated. This way a low pid_max
76 * value does not cause lots of bitmaps to be allocated, but
77 * the scheme scales to up to 4 million PIDs, runtime.
78 */
79struct pid_namespace init_pid_ns = {
80 .ns.count = REFCOUNT_INIT(2),
81 .idr = IDR_INIT(init_pid_ns.idr),
82 .pid_allocated = PIDNS_ADDING,
83 .level = 0,
84 .child_reaper = &init_task,
85 .user_ns = &init_user_ns,
86 .ns.inum = PROC_PID_INIT_INO,
87#ifdef CONFIG_PID_NS
88 .ns.ops = &pidns_operations,
89#endif
90#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
91 .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC,
92#endif
93};
94EXPORT_SYMBOL_GPL(init_pid_ns);
95
96/*
97 * Note: disable interrupts while the pidmap_lock is held as an
98 * interrupt might come in and do read_lock(&tasklist_lock).
99 *
100 * If we don't disable interrupts there is a nasty deadlock between
101 * detach_pid()->free_pid() and another cpu that does
102 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
103 * read_lock(&tasklist_lock);
104 *
105 * After we clean up the tasklist_lock and know there are no
106 * irq handlers that take it we can leave the interrupts enabled.
107 * For now it is easier to be safe than to prove it can't happen.
108 */
109
110static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
111
112void put_pid(struct pid *pid)
113{
114 struct pid_namespace *ns;
115
116 if (!pid)
117 return;
118
119 ns = pid->numbers[pid->level].ns;
120 if (refcount_dec_and_test(&pid->count)) {
121 kmem_cache_free(ns->pid_cachep, pid);
122 put_pid_ns(ns);
123 }
124}
125EXPORT_SYMBOL_GPL(put_pid);
126
127static void delayed_put_pid(struct rcu_head *rhp)
128{
129 struct pid *pid = container_of(rhp, struct pid, rcu);
130 put_pid(pid);
131}
132
133void free_pid(struct pid *pid)
134{
135 /* We can be called with write_lock_irq(&tasklist_lock) held */
136 int i;
137 unsigned long flags;
138
139 spin_lock_irqsave(&pidmap_lock, flags);
140 for (i = 0; i <= pid->level; i++) {
141 struct upid *upid = pid->numbers + i;
142 struct pid_namespace *ns = upid->ns;
143 switch (--ns->pid_allocated) {
144 case 2:
145 case 1:
146 /* When all that is left in the pid namespace
147 * is the reaper wake up the reaper. The reaper
148 * may be sleeping in zap_pid_ns_processes().
149 */
150 wake_up_process(ns->child_reaper);
151 break;
152 case PIDNS_ADDING:
153 /* Handle a fork failure of the first process */
154 WARN_ON(ns->child_reaper);
155 ns->pid_allocated = 0;
156 break;
157 }
158
159 idr_remove(&ns->idr, upid->nr);
160 }
161 spin_unlock_irqrestore(&pidmap_lock, flags);
162
163 call_rcu(&pid->rcu, delayed_put_pid);
164}
165
166struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
167 size_t set_tid_size)
168{
169 struct pid *pid;
170 enum pid_type type;
171 int i, nr;
172 struct pid_namespace *tmp;
173 struct upid *upid;
174 int retval = -ENOMEM;
175
176 /*
177 * set_tid_size contains the size of the set_tid array. Starting at
178 * the most nested currently active PID namespace it tells alloc_pid()
179 * which PID to set for a process in that most nested PID namespace
180 * up to set_tid_size PID namespaces. It does not have to set the PID
181 * for a process in all nested PID namespaces but set_tid_size must
182 * never be greater than the current ns->level + 1.
183 */
184 if (set_tid_size > ns->level + 1)
185 return ERR_PTR(-EINVAL);
186
187 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
188 if (!pid)
189 return ERR_PTR(retval);
190
191 tmp = ns;
192 pid->level = ns->level;
193
194 for (i = ns->level; i >= 0; i--) {
195 int tid = 0;
196
197 if (set_tid_size) {
198 tid = set_tid[ns->level - i];
199
200 retval = -EINVAL;
201 if (tid < 1 || tid >= pid_max)
202 goto out_free;
203 /*
204 * Also fail if a PID != 1 is requested and
205 * no PID 1 exists.
206 */
207 if (tid != 1 && !tmp->child_reaper)
208 goto out_free;
209 retval = -EPERM;
210 if (!checkpoint_restore_ns_capable(tmp->user_ns))
211 goto out_free;
212 set_tid_size--;
213 }
214
215 idr_preload(GFP_KERNEL);
216 spin_lock_irq(&pidmap_lock);
217
218 if (tid) {
219 nr = idr_alloc(&tmp->idr, NULL, tid,
220 tid + 1, GFP_ATOMIC);
221 /*
222 * If ENOSPC is returned it means that the PID is
223 * alreay in use. Return EEXIST in that case.
224 */
225 if (nr == -ENOSPC)
226 nr = -EEXIST;
227 } else {
228 int pid_min = 1;
229 /*
230 * init really needs pid 1, but after reaching the
231 * maximum wrap back to RESERVED_PIDS
232 */
233 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
234 pid_min = RESERVED_PIDS;
235
236 /*
237 * Store a null pointer so find_pid_ns does not find
238 * a partially initialized PID (see below).
239 */
240 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
241 pid_max, GFP_ATOMIC);
242 }
243 spin_unlock_irq(&pidmap_lock);
244 idr_preload_end();
245
246 if (nr < 0) {
247 retval = (nr == -ENOSPC) ? -EAGAIN : nr;
248 goto out_free;
249 }
250
251 pid->numbers[i].nr = nr;
252 pid->numbers[i].ns = tmp;
253 tmp = tmp->parent;
254 }
255
256 /*
257 * ENOMEM is not the most obvious choice especially for the case
258 * where the child subreaper has already exited and the pid
259 * namespace denies the creation of any new processes. But ENOMEM
260 * is what we have exposed to userspace for a long time and it is
261 * documented behavior for pid namespaces. So we can't easily
262 * change it even if there were an error code better suited.
263 */
264 retval = -ENOMEM;
265
266 get_pid_ns(ns);
267 refcount_set(&pid->count, 1);
268 spin_lock_init(&pid->lock);
269 for (type = 0; type < PIDTYPE_MAX; ++type)
270 INIT_HLIST_HEAD(&pid->tasks[type]);
271
272 init_waitqueue_head(&pid->wait_pidfd);
273 INIT_HLIST_HEAD(&pid->inodes);
274
275 upid = pid->numbers + ns->level;
276 spin_lock_irq(&pidmap_lock);
277 if (!(ns->pid_allocated & PIDNS_ADDING))
278 goto out_unlock;
279 pid->stashed = NULL;
280 pid->ino = ++pidfs_ino;
281 for ( ; upid >= pid->numbers; --upid) {
282 /* Make the PID visible to find_pid_ns. */
283 idr_replace(&upid->ns->idr, pid, upid->nr);
284 upid->ns->pid_allocated++;
285 }
286 spin_unlock_irq(&pidmap_lock);
287
288 return pid;
289
290out_unlock:
291 spin_unlock_irq(&pidmap_lock);
292 put_pid_ns(ns);
293
294out_free:
295 spin_lock_irq(&pidmap_lock);
296 while (++i <= ns->level) {
297 upid = pid->numbers + i;
298 idr_remove(&upid->ns->idr, upid->nr);
299 }
300
301 /* On failure to allocate the first pid, reset the state */
302 if (ns->pid_allocated == PIDNS_ADDING)
303 idr_set_cursor(&ns->idr, 0);
304
305 spin_unlock_irq(&pidmap_lock);
306
307 kmem_cache_free(ns->pid_cachep, pid);
308 return ERR_PTR(retval);
309}
310
311void disable_pid_allocation(struct pid_namespace *ns)
312{
313 spin_lock_irq(&pidmap_lock);
314 ns->pid_allocated &= ~PIDNS_ADDING;
315 spin_unlock_irq(&pidmap_lock);
316}
317
318struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
319{
320 return idr_find(&ns->idr, nr);
321}
322EXPORT_SYMBOL_GPL(find_pid_ns);
323
324struct pid *find_vpid(int nr)
325{
326 return find_pid_ns(nr, task_active_pid_ns(current));
327}
328EXPORT_SYMBOL_GPL(find_vpid);
329
330static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
331{
332 return (type == PIDTYPE_PID) ?
333 &task->thread_pid :
334 &task->signal->pids[type];
335}
336
337/*
338 * attach_pid() must be called with the tasklist_lock write-held.
339 */
340void attach_pid(struct task_struct *task, enum pid_type type)
341{
342 struct pid *pid = *task_pid_ptr(task, type);
343 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
344}
345
346static void __change_pid(struct task_struct *task, enum pid_type type,
347 struct pid *new)
348{
349 struct pid **pid_ptr = task_pid_ptr(task, type);
350 struct pid *pid;
351 int tmp;
352
353 pid = *pid_ptr;
354
355 hlist_del_rcu(&task->pid_links[type]);
356 *pid_ptr = new;
357
358 if (type == PIDTYPE_PID) {
359 WARN_ON_ONCE(pid_has_task(pid, PIDTYPE_PID));
360 wake_up_all(&pid->wait_pidfd);
361 }
362
363 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
364 if (pid_has_task(pid, tmp))
365 return;
366
367 free_pid(pid);
368}
369
370void detach_pid(struct task_struct *task, enum pid_type type)
371{
372 __change_pid(task, type, NULL);
373}
374
375void change_pid(struct task_struct *task, enum pid_type type,
376 struct pid *pid)
377{
378 __change_pid(task, type, pid);
379 attach_pid(task, type);
380}
381
382void exchange_tids(struct task_struct *left, struct task_struct *right)
383{
384 struct pid *pid1 = left->thread_pid;
385 struct pid *pid2 = right->thread_pid;
386 struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
387 struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
388
389 /* Swap the single entry tid lists */
390 hlists_swap_heads_rcu(head1, head2);
391
392 /* Swap the per task_struct pid */
393 rcu_assign_pointer(left->thread_pid, pid2);
394 rcu_assign_pointer(right->thread_pid, pid1);
395
396 /* Swap the cached value */
397 WRITE_ONCE(left->pid, pid_nr(pid2));
398 WRITE_ONCE(right->pid, pid_nr(pid1));
399}
400
401/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
402void transfer_pid(struct task_struct *old, struct task_struct *new,
403 enum pid_type type)
404{
405 WARN_ON_ONCE(type == PIDTYPE_PID);
406 hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
407}
408
409struct task_struct *pid_task(struct pid *pid, enum pid_type type)
410{
411 struct task_struct *result = NULL;
412 if (pid) {
413 struct hlist_node *first;
414 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
415 lockdep_tasklist_lock_is_held());
416 if (first)
417 result = hlist_entry(first, struct task_struct, pid_links[(type)]);
418 }
419 return result;
420}
421EXPORT_SYMBOL(pid_task);
422
423/*
424 * Must be called under rcu_read_lock().
425 */
426struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
427{
428 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
429 "find_task_by_pid_ns() needs rcu_read_lock() protection");
430 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
431}
432
433struct task_struct *find_task_by_vpid(pid_t vnr)
434{
435 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
436}
437
438struct task_struct *find_get_task_by_vpid(pid_t nr)
439{
440 struct task_struct *task;
441
442 rcu_read_lock();
443 task = find_task_by_vpid(nr);
444 if (task)
445 get_task_struct(task);
446 rcu_read_unlock();
447
448 return task;
449}
450
451struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
452{
453 struct pid *pid;
454 rcu_read_lock();
455 pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
456 rcu_read_unlock();
457 return pid;
458}
459EXPORT_SYMBOL_GPL(get_task_pid);
460
461struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
462{
463 struct task_struct *result;
464 rcu_read_lock();
465 result = pid_task(pid, type);
466 if (result)
467 get_task_struct(result);
468 rcu_read_unlock();
469 return result;
470}
471EXPORT_SYMBOL_GPL(get_pid_task);
472
473struct pid *find_get_pid(pid_t nr)
474{
475 struct pid *pid;
476
477 rcu_read_lock();
478 pid = get_pid(find_vpid(nr));
479 rcu_read_unlock();
480
481 return pid;
482}
483EXPORT_SYMBOL_GPL(find_get_pid);
484
485pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
486{
487 struct upid *upid;
488 pid_t nr = 0;
489
490 if (pid && ns->level <= pid->level) {
491 upid = &pid->numbers[ns->level];
492 if (upid->ns == ns)
493 nr = upid->nr;
494 }
495 return nr;
496}
497EXPORT_SYMBOL_GPL(pid_nr_ns);
498
499pid_t pid_vnr(struct pid *pid)
500{
501 return pid_nr_ns(pid, task_active_pid_ns(current));
502}
503EXPORT_SYMBOL_GPL(pid_vnr);
504
505pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
506 struct pid_namespace *ns)
507{
508 pid_t nr = 0;
509
510 rcu_read_lock();
511 if (!ns)
512 ns = task_active_pid_ns(current);
513 nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
514 rcu_read_unlock();
515
516 return nr;
517}
518EXPORT_SYMBOL(__task_pid_nr_ns);
519
520struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
521{
522 return ns_of_pid(task_pid(tsk));
523}
524EXPORT_SYMBOL_GPL(task_active_pid_ns);
525
526/*
527 * Used by proc to find the first pid that is greater than or equal to nr.
528 *
529 * If there is a pid at nr this function is exactly the same as find_pid_ns.
530 */
531struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
532{
533 return idr_get_next(&ns->idr, &nr);
534}
535EXPORT_SYMBOL_GPL(find_ge_pid);
536
537struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
538{
539 CLASS(fd, f)(fd);
540 struct pid *pid;
541
542 if (fd_empty(f))
543 return ERR_PTR(-EBADF);
544
545 pid = pidfd_pid(fd_file(f));
546 if (!IS_ERR(pid)) {
547 get_pid(pid);
548 *flags = fd_file(f)->f_flags;
549 }
550 return pid;
551}
552
553/**
554 * pidfd_get_task() - Get the task associated with a pidfd
555 *
556 * @pidfd: pidfd for which to get the task
557 * @flags: flags associated with this pidfd
558 *
559 * Return the task associated with @pidfd. The function takes a reference on
560 * the returned task. The caller is responsible for releasing that reference.
561 *
562 * Return: On success, the task_struct associated with the pidfd.
563 * On error, a negative errno number will be returned.
564 */
565struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags)
566{
567 unsigned int f_flags;
568 struct pid *pid;
569 struct task_struct *task;
570
571 pid = pidfd_get_pid(pidfd, &f_flags);
572 if (IS_ERR(pid))
573 return ERR_CAST(pid);
574
575 task = get_pid_task(pid, PIDTYPE_TGID);
576 put_pid(pid);
577 if (!task)
578 return ERR_PTR(-ESRCH);
579
580 *flags = f_flags;
581 return task;
582}
583
584/**
585 * pidfd_create() - Create a new pid file descriptor.
586 *
587 * @pid: struct pid that the pidfd will reference
588 * @flags: flags to pass
589 *
590 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
591 *
592 * Note, that this function can only be called after the fd table has
593 * been unshared to avoid leaking the pidfd to the new process.
594 *
595 * This symbol should not be explicitly exported to loadable modules.
596 *
597 * Return: On success, a cloexec pidfd is returned.
598 * On error, a negative errno number will be returned.
599 */
600static int pidfd_create(struct pid *pid, unsigned int flags)
601{
602 int pidfd;
603 struct file *pidfd_file;
604
605 pidfd = pidfd_prepare(pid, flags, &pidfd_file);
606 if (pidfd < 0)
607 return pidfd;
608
609 fd_install(pidfd, pidfd_file);
610 return pidfd;
611}
612
613/**
614 * sys_pidfd_open() - Open new pid file descriptor.
615 *
616 * @pid: pid for which to retrieve a pidfd
617 * @flags: flags to pass
618 *
619 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
620 * the task identified by @pid. Without PIDFD_THREAD flag the target task
621 * must be a thread-group leader.
622 *
623 * Return: On success, a cloexec pidfd is returned.
624 * On error, a negative errno number will be returned.
625 */
626SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
627{
628 int fd;
629 struct pid *p;
630
631 if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD))
632 return -EINVAL;
633
634 if (pid <= 0)
635 return -EINVAL;
636
637 p = find_get_pid(pid);
638 if (!p)
639 return -ESRCH;
640
641 fd = pidfd_create(p, flags);
642
643 put_pid(p);
644 return fd;
645}
646
647void __init pid_idr_init(void)
648{
649 /* Verify no one has done anything silly: */
650 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
651
652 /* bump default and minimum pid_max based on number of cpus */
653 pid_max = min(pid_max_max, max_t(int, pid_max,
654 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
655 pid_max_min = max_t(int, pid_max_min,
656 PIDS_PER_CPU_MIN * num_possible_cpus());
657 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
658
659 idr_init(&init_pid_ns.idr);
660
661 init_pid_ns.pid_cachep = kmem_cache_create("pid",
662 struct_size_t(struct pid, numbers, 1),
663 __alignof__(struct pid),
664 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
665 NULL);
666}
667
668static struct file *__pidfd_fget(struct task_struct *task, int fd)
669{
670 struct file *file;
671 int ret;
672
673 ret = down_read_killable(&task->signal->exec_update_lock);
674 if (ret)
675 return ERR_PTR(ret);
676
677 if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
678 file = fget_task(task, fd);
679 else
680 file = ERR_PTR(-EPERM);
681
682 up_read(&task->signal->exec_update_lock);
683
684 if (!file) {
685 /*
686 * It is possible that the target thread is exiting; it can be
687 * either:
688 * 1. before exit_signals(), which gives a real fd
689 * 2. before exit_files() takes the task_lock() gives a real fd
690 * 3. after exit_files() releases task_lock(), ->files is NULL;
691 * this has PF_EXITING, since it was set in exit_signals(),
692 * __pidfd_fget() returns EBADF.
693 * In case 3 we get EBADF, but that really means ESRCH, since
694 * the task is currently exiting and has freed its files
695 * struct, so we fix it up.
696 */
697 if (task->flags & PF_EXITING)
698 file = ERR_PTR(-ESRCH);
699 else
700 file = ERR_PTR(-EBADF);
701 }
702
703 return file;
704}
705
706static int pidfd_getfd(struct pid *pid, int fd)
707{
708 struct task_struct *task;
709 struct file *file;
710 int ret;
711
712 task = get_pid_task(pid, PIDTYPE_PID);
713 if (!task)
714 return -ESRCH;
715
716 file = __pidfd_fget(task, fd);
717 put_task_struct(task);
718 if (IS_ERR(file))
719 return PTR_ERR(file);
720
721 ret = receive_fd(file, NULL, O_CLOEXEC);
722 fput(file);
723
724 return ret;
725}
726
727/**
728 * sys_pidfd_getfd() - Get a file descriptor from another process
729 *
730 * @pidfd: the pidfd file descriptor of the process
731 * @fd: the file descriptor number to get
732 * @flags: flags on how to get the fd (reserved)
733 *
734 * This syscall gets a copy of a file descriptor from another process
735 * based on the pidfd, and file descriptor number. It requires that
736 * the calling process has the ability to ptrace the process represented
737 * by the pidfd. The process which is having its file descriptor copied
738 * is otherwise unaffected.
739 *
740 * Return: On success, a cloexec file descriptor is returned.
741 * On error, a negative errno number will be returned.
742 */
743SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
744 unsigned int, flags)
745{
746 struct pid *pid;
747
748 /* flags is currently unused - make sure it's unset */
749 if (flags)
750 return -EINVAL;
751
752 CLASS(fd, f)(pidfd);
753 if (fd_empty(f))
754 return -EBADF;
755
756 pid = pidfd_pid(fd_file(f));
757 if (IS_ERR(pid))
758 return PTR_ERR(pid);
759
760 return pidfd_getfd(pid, fd);
761}
1/*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 *
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
26 *
27 */
28
29#include <linux/mm.h>
30#include <linux/export.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/rculist.h>
34#include <linux/bootmem.h>
35#include <linux/hash.h>
36#include <linux/pid_namespace.h>
37#include <linux/init_task.h>
38#include <linux/syscalls.h>
39
40#define pid_hashfn(nr, ns) \
41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42static struct hlist_head *pid_hash;
43static unsigned int pidhash_shift = 4;
44struct pid init_struct_pid = INIT_STRUCT_PID;
45
46int pid_max = PID_MAX_DEFAULT;
47
48#define RESERVED_PIDS 300
49
50int pid_max_min = RESERVED_PIDS + 1;
51int pid_max_max = PID_MAX_LIMIT;
52
53#define BITS_PER_PAGE (PAGE_SIZE*8)
54#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
55
56static inline int mk_pid(struct pid_namespace *pid_ns,
57 struct pidmap *map, int off)
58{
59 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
60}
61
62#define find_next_offset(map, off) \
63 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64
65/*
66 * PID-map pages start out as NULL, they get allocated upon
67 * first use and are never deallocated. This way a low pid_max
68 * value does not cause lots of bitmaps to be allocated, but
69 * the scheme scales to up to 4 million PIDs, runtime.
70 */
71struct pid_namespace init_pid_ns = {
72 .kref = {
73 .refcount = ATOMIC_INIT(2),
74 },
75 .pidmap = {
76 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
77 },
78 .last_pid = 0,
79 .level = 0,
80 .child_reaper = &init_task,
81};
82EXPORT_SYMBOL_GPL(init_pid_ns);
83
84int is_container_init(struct task_struct *tsk)
85{
86 int ret = 0;
87 struct pid *pid;
88
89 rcu_read_lock();
90 pid = task_pid(tsk);
91 if (pid != NULL && pid->numbers[pid->level].nr == 1)
92 ret = 1;
93 rcu_read_unlock();
94
95 return ret;
96}
97EXPORT_SYMBOL(is_container_init);
98
99/*
100 * Note: disable interrupts while the pidmap_lock is held as an
101 * interrupt might come in and do read_lock(&tasklist_lock).
102 *
103 * If we don't disable interrupts there is a nasty deadlock between
104 * detach_pid()->free_pid() and another cpu that does
105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106 * read_lock(&tasklist_lock);
107 *
108 * After we clean up the tasklist_lock and know there are no
109 * irq handlers that take it we can leave the interrupts enabled.
110 * For now it is easier to be safe than to prove it can't happen.
111 */
112
113static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
114
115static void free_pidmap(struct upid *upid)
116{
117 int nr = upid->nr;
118 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
119 int offset = nr & BITS_PER_PAGE_MASK;
120
121 clear_bit(offset, map->page);
122 atomic_inc(&map->nr_free);
123}
124
125/*
126 * If we started walking pids at 'base', is 'a' seen before 'b'?
127 */
128static int pid_before(int base, int a, int b)
129{
130 /*
131 * This is the same as saying
132 *
133 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
134 * and that mapping orders 'a' and 'b' with respect to 'base'.
135 */
136 return (unsigned)(a - base) < (unsigned)(b - base);
137}
138
139/*
140 * We might be racing with someone else trying to set pid_ns->last_pid
141 * at the pid allocation time (there's also a sysctl for this, but racing
142 * with this one is OK, see comment in kernel/pid_namespace.c about it).
143 * We want the winner to have the "later" value, because if the
144 * "earlier" value prevails, then a pid may get reused immediately.
145 *
146 * Since pids rollover, it is not sufficient to just pick the bigger
147 * value. We have to consider where we started counting from.
148 *
149 * 'base' is the value of pid_ns->last_pid that we observed when
150 * we started looking for a pid.
151 *
152 * 'pid' is the pid that we eventually found.
153 */
154static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
155{
156 int prev;
157 int last_write = base;
158 do {
159 prev = last_write;
160 last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
161 } while ((prev != last_write) && (pid_before(base, last_write, pid)));
162}
163
164static int alloc_pidmap(struct pid_namespace *pid_ns)
165{
166 int i, offset, max_scan, pid, last = pid_ns->last_pid;
167 struct pidmap *map;
168
169 pid = last + 1;
170 if (pid >= pid_max)
171 pid = RESERVED_PIDS;
172 offset = pid & BITS_PER_PAGE_MASK;
173 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
174 /*
175 * If last_pid points into the middle of the map->page we
176 * want to scan this bitmap block twice, the second time
177 * we start with offset == 0 (or RESERVED_PIDS).
178 */
179 max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
180 for (i = 0; i <= max_scan; ++i) {
181 if (unlikely(!map->page)) {
182 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
183 /*
184 * Free the page if someone raced with us
185 * installing it:
186 */
187 spin_lock_irq(&pidmap_lock);
188 if (!map->page) {
189 map->page = page;
190 page = NULL;
191 }
192 spin_unlock_irq(&pidmap_lock);
193 kfree(page);
194 if (unlikely(!map->page))
195 break;
196 }
197 if (likely(atomic_read(&map->nr_free))) {
198 do {
199 if (!test_and_set_bit(offset, map->page)) {
200 atomic_dec(&map->nr_free);
201 set_last_pid(pid_ns, last, pid);
202 return pid;
203 }
204 offset = find_next_offset(map, offset);
205 pid = mk_pid(pid_ns, map, offset);
206 } while (offset < BITS_PER_PAGE && pid < pid_max);
207 }
208 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
209 ++map;
210 offset = 0;
211 } else {
212 map = &pid_ns->pidmap[0];
213 offset = RESERVED_PIDS;
214 if (unlikely(last == offset))
215 break;
216 }
217 pid = mk_pid(pid_ns, map, offset);
218 }
219 return -1;
220}
221
222int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
223{
224 int offset;
225 struct pidmap *map, *end;
226
227 if (last >= PID_MAX_LIMIT)
228 return -1;
229
230 offset = (last + 1) & BITS_PER_PAGE_MASK;
231 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
232 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
233 for (; map < end; map++, offset = 0) {
234 if (unlikely(!map->page))
235 continue;
236 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
237 if (offset < BITS_PER_PAGE)
238 return mk_pid(pid_ns, map, offset);
239 }
240 return -1;
241}
242
243void put_pid(struct pid *pid)
244{
245 struct pid_namespace *ns;
246
247 if (!pid)
248 return;
249
250 ns = pid->numbers[pid->level].ns;
251 if ((atomic_read(&pid->count) == 1) ||
252 atomic_dec_and_test(&pid->count)) {
253 kmem_cache_free(ns->pid_cachep, pid);
254 put_pid_ns(ns);
255 }
256}
257EXPORT_SYMBOL_GPL(put_pid);
258
259static void delayed_put_pid(struct rcu_head *rhp)
260{
261 struct pid *pid = container_of(rhp, struct pid, rcu);
262 put_pid(pid);
263}
264
265void free_pid(struct pid *pid)
266{
267 /* We can be called with write_lock_irq(&tasklist_lock) held */
268 int i;
269 unsigned long flags;
270
271 spin_lock_irqsave(&pidmap_lock, flags);
272 for (i = 0; i <= pid->level; i++)
273 hlist_del_rcu(&pid->numbers[i].pid_chain);
274 spin_unlock_irqrestore(&pidmap_lock, flags);
275
276 for (i = 0; i <= pid->level; i++)
277 free_pidmap(pid->numbers + i);
278
279 call_rcu(&pid->rcu, delayed_put_pid);
280}
281
282struct pid *alloc_pid(struct pid_namespace *ns)
283{
284 struct pid *pid;
285 enum pid_type type;
286 int i, nr;
287 struct pid_namespace *tmp;
288 struct upid *upid;
289
290 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
291 if (!pid)
292 goto out;
293
294 tmp = ns;
295 for (i = ns->level; i >= 0; i--) {
296 nr = alloc_pidmap(tmp);
297 if (nr < 0)
298 goto out_free;
299
300 pid->numbers[i].nr = nr;
301 pid->numbers[i].ns = tmp;
302 tmp = tmp->parent;
303 }
304
305 get_pid_ns(ns);
306 pid->level = ns->level;
307 atomic_set(&pid->count, 1);
308 for (type = 0; type < PIDTYPE_MAX; ++type)
309 INIT_HLIST_HEAD(&pid->tasks[type]);
310
311 upid = pid->numbers + ns->level;
312 spin_lock_irq(&pidmap_lock);
313 for ( ; upid >= pid->numbers; --upid)
314 hlist_add_head_rcu(&upid->pid_chain,
315 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
316 spin_unlock_irq(&pidmap_lock);
317
318out:
319 return pid;
320
321out_free:
322 while (++i <= ns->level)
323 free_pidmap(pid->numbers + i);
324
325 kmem_cache_free(ns->pid_cachep, pid);
326 pid = NULL;
327 goto out;
328}
329
330struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
331{
332 struct hlist_node *elem;
333 struct upid *pnr;
334
335 hlist_for_each_entry_rcu(pnr, elem,
336 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
337 if (pnr->nr == nr && pnr->ns == ns)
338 return container_of(pnr, struct pid,
339 numbers[ns->level]);
340
341 return NULL;
342}
343EXPORT_SYMBOL_GPL(find_pid_ns);
344
345struct pid *find_vpid(int nr)
346{
347 return find_pid_ns(nr, current->nsproxy->pid_ns);
348}
349EXPORT_SYMBOL_GPL(find_vpid);
350
351/*
352 * attach_pid() must be called with the tasklist_lock write-held.
353 */
354void attach_pid(struct task_struct *task, enum pid_type type,
355 struct pid *pid)
356{
357 struct pid_link *link;
358
359 link = &task->pids[type];
360 link->pid = pid;
361 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
362}
363
364static void __change_pid(struct task_struct *task, enum pid_type type,
365 struct pid *new)
366{
367 struct pid_link *link;
368 struct pid *pid;
369 int tmp;
370
371 link = &task->pids[type];
372 pid = link->pid;
373
374 hlist_del_rcu(&link->node);
375 link->pid = new;
376
377 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
378 if (!hlist_empty(&pid->tasks[tmp]))
379 return;
380
381 free_pid(pid);
382}
383
384void detach_pid(struct task_struct *task, enum pid_type type)
385{
386 __change_pid(task, type, NULL);
387}
388
389void change_pid(struct task_struct *task, enum pid_type type,
390 struct pid *pid)
391{
392 __change_pid(task, type, pid);
393 attach_pid(task, type, pid);
394}
395
396/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
397void transfer_pid(struct task_struct *old, struct task_struct *new,
398 enum pid_type type)
399{
400 new->pids[type].pid = old->pids[type].pid;
401 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
402}
403
404struct task_struct *pid_task(struct pid *pid, enum pid_type type)
405{
406 struct task_struct *result = NULL;
407 if (pid) {
408 struct hlist_node *first;
409 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
410 lockdep_tasklist_lock_is_held());
411 if (first)
412 result = hlist_entry(first, struct task_struct, pids[(type)].node);
413 }
414 return result;
415}
416EXPORT_SYMBOL(pid_task);
417
418/*
419 * Must be called under rcu_read_lock().
420 */
421struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
422{
423 rcu_lockdep_assert(rcu_read_lock_held(),
424 "find_task_by_pid_ns() needs rcu_read_lock()"
425 " protection");
426 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
427}
428
429struct task_struct *find_task_by_vpid(pid_t vnr)
430{
431 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
432}
433
434struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
435{
436 struct pid *pid;
437 rcu_read_lock();
438 if (type != PIDTYPE_PID)
439 task = task->group_leader;
440 pid = get_pid(task->pids[type].pid);
441 rcu_read_unlock();
442 return pid;
443}
444EXPORT_SYMBOL_GPL(get_task_pid);
445
446struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
447{
448 struct task_struct *result;
449 rcu_read_lock();
450 result = pid_task(pid, type);
451 if (result)
452 get_task_struct(result);
453 rcu_read_unlock();
454 return result;
455}
456EXPORT_SYMBOL_GPL(get_pid_task);
457
458struct pid *find_get_pid(pid_t nr)
459{
460 struct pid *pid;
461
462 rcu_read_lock();
463 pid = get_pid(find_vpid(nr));
464 rcu_read_unlock();
465
466 return pid;
467}
468EXPORT_SYMBOL_GPL(find_get_pid);
469
470pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
471{
472 struct upid *upid;
473 pid_t nr = 0;
474
475 if (pid && ns->level <= pid->level) {
476 upid = &pid->numbers[ns->level];
477 if (upid->ns == ns)
478 nr = upid->nr;
479 }
480 return nr;
481}
482
483pid_t pid_vnr(struct pid *pid)
484{
485 return pid_nr_ns(pid, current->nsproxy->pid_ns);
486}
487EXPORT_SYMBOL_GPL(pid_vnr);
488
489pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
490 struct pid_namespace *ns)
491{
492 pid_t nr = 0;
493
494 rcu_read_lock();
495 if (!ns)
496 ns = current->nsproxy->pid_ns;
497 if (likely(pid_alive(task))) {
498 if (type != PIDTYPE_PID)
499 task = task->group_leader;
500 nr = pid_nr_ns(task->pids[type].pid, ns);
501 }
502 rcu_read_unlock();
503
504 return nr;
505}
506EXPORT_SYMBOL(__task_pid_nr_ns);
507
508pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
509{
510 return pid_nr_ns(task_tgid(tsk), ns);
511}
512EXPORT_SYMBOL(task_tgid_nr_ns);
513
514struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
515{
516 return ns_of_pid(task_pid(tsk));
517}
518EXPORT_SYMBOL_GPL(task_active_pid_ns);
519
520/*
521 * Used by proc to find the first pid that is greater than or equal to nr.
522 *
523 * If there is a pid at nr this function is exactly the same as find_pid_ns.
524 */
525struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
526{
527 struct pid *pid;
528
529 do {
530 pid = find_pid_ns(nr, ns);
531 if (pid)
532 break;
533 nr = next_pidmap(ns, nr);
534 } while (nr > 0);
535
536 return pid;
537}
538
539/*
540 * The pid hash table is scaled according to the amount of memory in the
541 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
542 * more.
543 */
544void __init pidhash_init(void)
545{
546 unsigned int i, pidhash_size;
547
548 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
549 HASH_EARLY | HASH_SMALL,
550 &pidhash_shift, NULL,
551 0, 4096);
552 pidhash_size = 1U << pidhash_shift;
553
554 for (i = 0; i < pidhash_size; i++)
555 INIT_HLIST_HEAD(&pid_hash[i]);
556}
557
558void __init pidmap_init(void)
559{
560 /* bump default and minimum pid_max based on number of cpus */
561 pid_max = min(pid_max_max, max_t(int, pid_max,
562 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
563 pid_max_min = max_t(int, pid_max_min,
564 PIDS_PER_CPU_MIN * num_possible_cpus());
565 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
566
567 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
568 /* Reserve PID 0. We never call free_pidmap(0) */
569 set_bit(0, init_pid_ns.pidmap[0].page);
570 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
571
572 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
573 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
574}