Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic pidhash and scalable, time-bounded PID allocator
  4 *
  5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
  6 * (C) 2004 Nadia Yvette Chambers, Oracle
  7 * (C) 2002-2004 Ingo Molnar, Red Hat
  8 *
  9 * pid-structures are backing objects for tasks sharing a given ID to chain
 10 * against. There is very little to them aside from hashing them and
 11 * parking tasks using given ID's on a list.
 12 *
 13 * The hash is always changed with the tasklist_lock write-acquired,
 14 * and the hash is only accessed with the tasklist_lock at least
 15 * read-acquired, so there's no additional SMP locking needed here.
 16 *
 17 * We have a list of bitmap pages, which bitmaps represent the PID space.
 18 * Allocating and freeing PIDs is completely lockless. The worst-case
 19 * allocation scenario when all but one out of 1 million PIDs possible are
 20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
 21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
 22 *
 23 * Pid namespaces:
 24 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
 25 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
 26 *     Many thanks to Oleg Nesterov for comments and help
 27 *
 28 */
 29
 30#include <linux/mm.h>
 31#include <linux/export.h>
 32#include <linux/slab.h>
 33#include <linux/init.h>
 34#include <linux/rculist.h>
 35#include <linux/memblock.h>
 36#include <linux/pid_namespace.h>
 37#include <linux/init_task.h>
 38#include <linux/syscalls.h>
 39#include <linux/proc_ns.h>
 40#include <linux/refcount.h>
 41#include <linux/anon_inodes.h>
 42#include <linux/sched/signal.h>
 43#include <linux/sched/task.h>
 44#include <linux/idr.h>
 
 
 45
 46struct pid init_struct_pid = {
 47	.count		= REFCOUNT_INIT(1),
 48	.tasks		= {
 49		{ .first = NULL },
 50		{ .first = NULL },
 51		{ .first = NULL },
 52	},
 53	.level		= 0,
 54	.numbers	= { {
 55		.nr		= 0,
 56		.ns		= &init_pid_ns,
 57	}, }
 58};
 59
 60int pid_max = PID_MAX_DEFAULT;
 61
 62#define RESERVED_PIDS		300
 63
 64int pid_max_min = RESERVED_PIDS + 1;
 65int pid_max_max = PID_MAX_LIMIT;
 66
 67/*
 68 * PID-map pages start out as NULL, they get allocated upon
 69 * first use and are never deallocated. This way a low pid_max
 70 * value does not cause lots of bitmaps to be allocated, but
 71 * the scheme scales to up to 4 million PIDs, runtime.
 72 */
 73struct pid_namespace init_pid_ns = {
 74	.kref = KREF_INIT(2),
 75	.idr = IDR_INIT(init_pid_ns.idr),
 76	.pid_allocated = PIDNS_ADDING,
 77	.level = 0,
 78	.child_reaper = &init_task,
 79	.user_ns = &init_user_ns,
 80	.ns.inum = PROC_PID_INIT_INO,
 81#ifdef CONFIG_PID_NS
 82	.ns.ops = &pidns_operations,
 83#endif
 84};
 85EXPORT_SYMBOL_GPL(init_pid_ns);
 86
 87/*
 88 * Note: disable interrupts while the pidmap_lock is held as an
 89 * interrupt might come in and do read_lock(&tasklist_lock).
 90 *
 91 * If we don't disable interrupts there is a nasty deadlock between
 92 * detach_pid()->free_pid() and another cpu that does
 93 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
 94 * read_lock(&tasklist_lock);
 95 *
 96 * After we clean up the tasklist_lock and know there are no
 97 * irq handlers that take it we can leave the interrupts enabled.
 98 * For now it is easier to be safe than to prove it can't happen.
 99 */
100
101static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
102
103void put_pid(struct pid *pid)
104{
105	struct pid_namespace *ns;
106
107	if (!pid)
108		return;
109
110	ns = pid->numbers[pid->level].ns;
111	if (refcount_dec_and_test(&pid->count)) {
112		kmem_cache_free(ns->pid_cachep, pid);
113		put_pid_ns(ns);
114	}
115}
116EXPORT_SYMBOL_GPL(put_pid);
117
118static void delayed_put_pid(struct rcu_head *rhp)
119{
120	struct pid *pid = container_of(rhp, struct pid, rcu);
121	put_pid(pid);
122}
123
124void free_pid(struct pid *pid)
125{
126	/* We can be called with write_lock_irq(&tasklist_lock) held */
127	int i;
128	unsigned long flags;
129
130	spin_lock_irqsave(&pidmap_lock, flags);
131	for (i = 0; i <= pid->level; i++) {
132		struct upid *upid = pid->numbers + i;
133		struct pid_namespace *ns = upid->ns;
134		switch (--ns->pid_allocated) {
135		case 2:
136		case 1:
137			/* When all that is left in the pid namespace
138			 * is the reaper wake up the reaper.  The reaper
139			 * may be sleeping in zap_pid_ns_processes().
140			 */
141			wake_up_process(ns->child_reaper);
142			break;
143		case PIDNS_ADDING:
144			/* Handle a fork failure of the first process */
145			WARN_ON(ns->child_reaper);
146			ns->pid_allocated = 0;
147			/* fall through */
148		case 0:
149			schedule_work(&ns->proc_work);
150			break;
151		}
152
153		idr_remove(&ns->idr, upid->nr);
154	}
155	spin_unlock_irqrestore(&pidmap_lock, flags);
156
157	call_rcu(&pid->rcu, delayed_put_pid);
158}
159
160struct pid *alloc_pid(struct pid_namespace *ns)
 
161{
162	struct pid *pid;
163	enum pid_type type;
164	int i, nr;
165	struct pid_namespace *tmp;
166	struct upid *upid;
167	int retval = -ENOMEM;
168
 
 
 
 
 
 
 
 
 
 
 
169	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
170	if (!pid)
171		return ERR_PTR(retval);
172
173	tmp = ns;
174	pid->level = ns->level;
175
176	for (i = ns->level; i >= 0; i--) {
177		int pid_min = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
179		idr_preload(GFP_KERNEL);
180		spin_lock_irq(&pidmap_lock);
181
182		/*
183		 * init really needs pid 1, but after reaching the maximum
184		 * wrap back to RESERVED_PIDS
185		 */
186		if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
187			pid_min = RESERVED_PIDS;
188
189		/*
190		 * Store a null pointer so find_pid_ns does not find
191		 * a partially initialized PID (see below).
192		 */
193		nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
194				      pid_max, GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 
195		spin_unlock_irq(&pidmap_lock);
196		idr_preload_end();
197
198		if (nr < 0) {
199			retval = (nr == -ENOSPC) ? -EAGAIN : nr;
200			goto out_free;
201		}
202
203		pid->numbers[i].nr = nr;
204		pid->numbers[i].ns = tmp;
205		tmp = tmp->parent;
206	}
207
208	if (unlikely(is_child_reaper(pid))) {
209		if (pid_ns_prepare_proc(ns))
210			goto out_free;
211	}
 
 
 
 
 
212
213	get_pid_ns(ns);
214	refcount_set(&pid->count, 1);
 
215	for (type = 0; type < PIDTYPE_MAX; ++type)
216		INIT_HLIST_HEAD(&pid->tasks[type]);
217
218	init_waitqueue_head(&pid->wait_pidfd);
 
219
220	upid = pid->numbers + ns->level;
221	spin_lock_irq(&pidmap_lock);
222	if (!(ns->pid_allocated & PIDNS_ADDING))
223		goto out_unlock;
224	for ( ; upid >= pid->numbers; --upid) {
225		/* Make the PID visible to find_pid_ns. */
226		idr_replace(&upid->ns->idr, pid, upid->nr);
227		upid->ns->pid_allocated++;
228	}
229	spin_unlock_irq(&pidmap_lock);
230
231	return pid;
232
233out_unlock:
234	spin_unlock_irq(&pidmap_lock);
235	put_pid_ns(ns);
236
237out_free:
238	spin_lock_irq(&pidmap_lock);
239	while (++i <= ns->level) {
240		upid = pid->numbers + i;
241		idr_remove(&upid->ns->idr, upid->nr);
242	}
243
244	/* On failure to allocate the first pid, reset the state */
245	if (ns->pid_allocated == PIDNS_ADDING)
246		idr_set_cursor(&ns->idr, 0);
247
248	spin_unlock_irq(&pidmap_lock);
249
250	kmem_cache_free(ns->pid_cachep, pid);
251	return ERR_PTR(retval);
252}
253
254void disable_pid_allocation(struct pid_namespace *ns)
255{
256	spin_lock_irq(&pidmap_lock);
257	ns->pid_allocated &= ~PIDNS_ADDING;
258	spin_unlock_irq(&pidmap_lock);
259}
260
261struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
262{
263	return idr_find(&ns->idr, nr);
264}
265EXPORT_SYMBOL_GPL(find_pid_ns);
266
267struct pid *find_vpid(int nr)
268{
269	return find_pid_ns(nr, task_active_pid_ns(current));
270}
271EXPORT_SYMBOL_GPL(find_vpid);
272
273static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
274{
275	return (type == PIDTYPE_PID) ?
276		&task->thread_pid :
277		&task->signal->pids[type];
278}
279
280/*
281 * attach_pid() must be called with the tasklist_lock write-held.
282 */
283void attach_pid(struct task_struct *task, enum pid_type type)
284{
285	struct pid *pid = *task_pid_ptr(task, type);
286	hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
287}
288
289static void __change_pid(struct task_struct *task, enum pid_type type,
290			struct pid *new)
291{
292	struct pid **pid_ptr = task_pid_ptr(task, type);
293	struct pid *pid;
294	int tmp;
295
296	pid = *pid_ptr;
297
298	hlist_del_rcu(&task->pid_links[type]);
299	*pid_ptr = new;
300
301	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
302		if (!hlist_empty(&pid->tasks[tmp]))
303			return;
304
305	free_pid(pid);
306}
307
308void detach_pid(struct task_struct *task, enum pid_type type)
309{
310	__change_pid(task, type, NULL);
311}
312
313void change_pid(struct task_struct *task, enum pid_type type,
314		struct pid *pid)
315{
316	__change_pid(task, type, pid);
317	attach_pid(task, type);
318}
319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
321void transfer_pid(struct task_struct *old, struct task_struct *new,
322			   enum pid_type type)
323{
324	if (type == PIDTYPE_PID)
325		new->thread_pid = old->thread_pid;
326	hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
327}
328
329struct task_struct *pid_task(struct pid *pid, enum pid_type type)
330{
331	struct task_struct *result = NULL;
332	if (pid) {
333		struct hlist_node *first;
334		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
335					      lockdep_tasklist_lock_is_held());
336		if (first)
337			result = hlist_entry(first, struct task_struct, pid_links[(type)]);
338	}
339	return result;
340}
341EXPORT_SYMBOL(pid_task);
342
343/*
344 * Must be called under rcu_read_lock().
345 */
346struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
347{
348	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
349			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
350	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
351}
352
353struct task_struct *find_task_by_vpid(pid_t vnr)
354{
355	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
356}
357
358struct task_struct *find_get_task_by_vpid(pid_t nr)
359{
360	struct task_struct *task;
361
362	rcu_read_lock();
363	task = find_task_by_vpid(nr);
364	if (task)
365		get_task_struct(task);
366	rcu_read_unlock();
367
368	return task;
369}
370
371struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
372{
373	struct pid *pid;
374	rcu_read_lock();
375	pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
376	rcu_read_unlock();
377	return pid;
378}
379EXPORT_SYMBOL_GPL(get_task_pid);
380
381struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
382{
383	struct task_struct *result;
384	rcu_read_lock();
385	result = pid_task(pid, type);
386	if (result)
387		get_task_struct(result);
388	rcu_read_unlock();
389	return result;
390}
391EXPORT_SYMBOL_GPL(get_pid_task);
392
393struct pid *find_get_pid(pid_t nr)
394{
395	struct pid *pid;
396
397	rcu_read_lock();
398	pid = get_pid(find_vpid(nr));
399	rcu_read_unlock();
400
401	return pid;
402}
403EXPORT_SYMBOL_GPL(find_get_pid);
404
405pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
406{
407	struct upid *upid;
408	pid_t nr = 0;
409
410	if (pid && ns->level <= pid->level) {
411		upid = &pid->numbers[ns->level];
412		if (upid->ns == ns)
413			nr = upid->nr;
414	}
415	return nr;
416}
417EXPORT_SYMBOL_GPL(pid_nr_ns);
418
419pid_t pid_vnr(struct pid *pid)
420{
421	return pid_nr_ns(pid, task_active_pid_ns(current));
422}
423EXPORT_SYMBOL_GPL(pid_vnr);
424
425pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
426			struct pid_namespace *ns)
427{
428	pid_t nr = 0;
429
430	rcu_read_lock();
431	if (!ns)
432		ns = task_active_pid_ns(current);
433	if (likely(pid_alive(task)))
434		nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
435	rcu_read_unlock();
436
437	return nr;
438}
439EXPORT_SYMBOL(__task_pid_nr_ns);
440
441struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
442{
443	return ns_of_pid(task_pid(tsk));
444}
445EXPORT_SYMBOL_GPL(task_active_pid_ns);
446
447/*
448 * Used by proc to find the first pid that is greater than or equal to nr.
449 *
450 * If there is a pid at nr this function is exactly the same as find_pid_ns.
451 */
452struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
453{
454	return idr_get_next(&ns->idr, &nr);
455}
456
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457/**
458 * pidfd_create() - Create a new pid file descriptor.
459 *
460 * @pid:  struct pid that the pidfd will reference
 
461 *
462 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
463 *
464 * Note, that this function can only be called after the fd table has
465 * been unshared to avoid leaking the pidfd to the new process.
466 *
467 * Return: On success, a cloexec pidfd is returned.
468 *         On error, a negative errno number will be returned.
469 */
470static int pidfd_create(struct pid *pid)
471{
472	int fd;
473
474	fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
475			      O_RDWR | O_CLOEXEC);
476	if (fd < 0)
477		put_pid(pid);
478
479	return fd;
480}
481
482/**
483 * pidfd_open() - Open new pid file descriptor.
484 *
485 * @pid:   pid for which to retrieve a pidfd
486 * @flags: flags to pass
487 *
488 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
489 * the process identified by @pid. Currently, the process identified by
490 * @pid must be a thread-group leader. This restriction currently exists
491 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
492 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
493 * leaders).
494 *
495 * Return: On success, a cloexec pidfd is returned.
496 *         On error, a negative errno number will be returned.
497 */
498SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
499{
500	int fd, ret;
501	struct pid *p;
502
503	if (flags)
504		return -EINVAL;
505
506	if (pid <= 0)
507		return -EINVAL;
508
509	p = find_get_pid(pid);
510	if (!p)
511		return -ESRCH;
512
513	ret = 0;
514	rcu_read_lock();
515	if (!pid_task(p, PIDTYPE_TGID))
516		ret = -EINVAL;
517	rcu_read_unlock();
518
519	fd = ret ?: pidfd_create(p);
520	put_pid(p);
521	return fd;
522}
523
524void __init pid_idr_init(void)
525{
526	/* Verify no one has done anything silly: */
527	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
528
529	/* bump default and minimum pid_max based on number of cpus */
530	pid_max = min(pid_max_max, max_t(int, pid_max,
531				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
532	pid_max_min = max_t(int, pid_max_min,
533				PIDS_PER_CPU_MIN * num_possible_cpus());
534	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
535
536	idr_init(&init_pid_ns.idr);
537
538	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
539			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic pidhash and scalable, time-bounded PID allocator
  4 *
  5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
  6 * (C) 2004 Nadia Yvette Chambers, Oracle
  7 * (C) 2002-2004 Ingo Molnar, Red Hat
  8 *
  9 * pid-structures are backing objects for tasks sharing a given ID to chain
 10 * against. There is very little to them aside from hashing them and
 11 * parking tasks using given ID's on a list.
 12 *
 13 * The hash is always changed with the tasklist_lock write-acquired,
 14 * and the hash is only accessed with the tasklist_lock at least
 15 * read-acquired, so there's no additional SMP locking needed here.
 16 *
 17 * We have a list of bitmap pages, which bitmaps represent the PID space.
 18 * Allocating and freeing PIDs is completely lockless. The worst-case
 19 * allocation scenario when all but one out of 1 million PIDs possible are
 20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
 21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
 22 *
 23 * Pid namespaces:
 24 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
 25 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
 26 *     Many thanks to Oleg Nesterov for comments and help
 27 *
 28 */
 29
 30#include <linux/mm.h>
 31#include <linux/export.h>
 32#include <linux/slab.h>
 33#include <linux/init.h>
 34#include <linux/rculist.h>
 35#include <linux/memblock.h>
 36#include <linux/pid_namespace.h>
 37#include <linux/init_task.h>
 38#include <linux/syscalls.h>
 39#include <linux/proc_ns.h>
 40#include <linux/refcount.h>
 41#include <linux/anon_inodes.h>
 42#include <linux/sched/signal.h>
 43#include <linux/sched/task.h>
 44#include <linux/idr.h>
 45#include <net/sock.h>
 46#include <uapi/linux/pidfd.h>
 47
 48struct pid init_struct_pid = {
 49	.count		= REFCOUNT_INIT(1),
 50	.tasks		= {
 51		{ .first = NULL },
 52		{ .first = NULL },
 53		{ .first = NULL },
 54	},
 55	.level		= 0,
 56	.numbers	= { {
 57		.nr		= 0,
 58		.ns		= &init_pid_ns,
 59	}, }
 60};
 61
 62int pid_max = PID_MAX_DEFAULT;
 63
 64#define RESERVED_PIDS		300
 65
 66int pid_max_min = RESERVED_PIDS + 1;
 67int pid_max_max = PID_MAX_LIMIT;
 68
 69/*
 70 * PID-map pages start out as NULL, they get allocated upon
 71 * first use and are never deallocated. This way a low pid_max
 72 * value does not cause lots of bitmaps to be allocated, but
 73 * the scheme scales to up to 4 million PIDs, runtime.
 74 */
 75struct pid_namespace init_pid_ns = {
 76	.ns.count = REFCOUNT_INIT(2),
 77	.idr = IDR_INIT(init_pid_ns.idr),
 78	.pid_allocated = PIDNS_ADDING,
 79	.level = 0,
 80	.child_reaper = &init_task,
 81	.user_ns = &init_user_ns,
 82	.ns.inum = PROC_PID_INIT_INO,
 83#ifdef CONFIG_PID_NS
 84	.ns.ops = &pidns_operations,
 85#endif
 86};
 87EXPORT_SYMBOL_GPL(init_pid_ns);
 88
 89/*
 90 * Note: disable interrupts while the pidmap_lock is held as an
 91 * interrupt might come in and do read_lock(&tasklist_lock).
 92 *
 93 * If we don't disable interrupts there is a nasty deadlock between
 94 * detach_pid()->free_pid() and another cpu that does
 95 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
 96 * read_lock(&tasklist_lock);
 97 *
 98 * After we clean up the tasklist_lock and know there are no
 99 * irq handlers that take it we can leave the interrupts enabled.
100 * For now it is easier to be safe than to prove it can't happen.
101 */
102
103static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
104
105void put_pid(struct pid *pid)
106{
107	struct pid_namespace *ns;
108
109	if (!pid)
110		return;
111
112	ns = pid->numbers[pid->level].ns;
113	if (refcount_dec_and_test(&pid->count)) {
114		kmem_cache_free(ns->pid_cachep, pid);
115		put_pid_ns(ns);
116	}
117}
118EXPORT_SYMBOL_GPL(put_pid);
119
120static void delayed_put_pid(struct rcu_head *rhp)
121{
122	struct pid *pid = container_of(rhp, struct pid, rcu);
123	put_pid(pid);
124}
125
126void free_pid(struct pid *pid)
127{
128	/* We can be called with write_lock_irq(&tasklist_lock) held */
129	int i;
130	unsigned long flags;
131
132	spin_lock_irqsave(&pidmap_lock, flags);
133	for (i = 0; i <= pid->level; i++) {
134		struct upid *upid = pid->numbers + i;
135		struct pid_namespace *ns = upid->ns;
136		switch (--ns->pid_allocated) {
137		case 2:
138		case 1:
139			/* When all that is left in the pid namespace
140			 * is the reaper wake up the reaper.  The reaper
141			 * may be sleeping in zap_pid_ns_processes().
142			 */
143			wake_up_process(ns->child_reaper);
144			break;
145		case PIDNS_ADDING:
146			/* Handle a fork failure of the first process */
147			WARN_ON(ns->child_reaper);
148			ns->pid_allocated = 0;
 
 
 
149			break;
150		}
151
152		idr_remove(&ns->idr, upid->nr);
153	}
154	spin_unlock_irqrestore(&pidmap_lock, flags);
155
156	call_rcu(&pid->rcu, delayed_put_pid);
157}
158
159struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
160		      size_t set_tid_size)
161{
162	struct pid *pid;
163	enum pid_type type;
164	int i, nr;
165	struct pid_namespace *tmp;
166	struct upid *upid;
167	int retval = -ENOMEM;
168
169	/*
170	 * set_tid_size contains the size of the set_tid array. Starting at
171	 * the most nested currently active PID namespace it tells alloc_pid()
172	 * which PID to set for a process in that most nested PID namespace
173	 * up to set_tid_size PID namespaces. It does not have to set the PID
174	 * for a process in all nested PID namespaces but set_tid_size must
175	 * never be greater than the current ns->level + 1.
176	 */
177	if (set_tid_size > ns->level + 1)
178		return ERR_PTR(-EINVAL);
179
180	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
181	if (!pid)
182		return ERR_PTR(retval);
183
184	tmp = ns;
185	pid->level = ns->level;
186
187	for (i = ns->level; i >= 0; i--) {
188		int tid = 0;
189
190		if (set_tid_size) {
191			tid = set_tid[ns->level - i];
192
193			retval = -EINVAL;
194			if (tid < 1 || tid >= pid_max)
195				goto out_free;
196			/*
197			 * Also fail if a PID != 1 is requested and
198			 * no PID 1 exists.
199			 */
200			if (tid != 1 && !tmp->child_reaper)
201				goto out_free;
202			retval = -EPERM;
203			if (!checkpoint_restore_ns_capable(tmp->user_ns))
204				goto out_free;
205			set_tid_size--;
206		}
207
208		idr_preload(GFP_KERNEL);
209		spin_lock_irq(&pidmap_lock);
210
211		if (tid) {
212			nr = idr_alloc(&tmp->idr, NULL, tid,
213				       tid + 1, GFP_ATOMIC);
214			/*
215			 * If ENOSPC is returned it means that the PID is
216			 * alreay in use. Return EEXIST in that case.
217			 */
218			if (nr == -ENOSPC)
219				nr = -EEXIST;
220		} else {
221			int pid_min = 1;
222			/*
223			 * init really needs pid 1, but after reaching the
224			 * maximum wrap back to RESERVED_PIDS
225			 */
226			if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
227				pid_min = RESERVED_PIDS;
228
229			/*
230			 * Store a null pointer so find_pid_ns does not find
231			 * a partially initialized PID (see below).
232			 */
233			nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
234					      pid_max, GFP_ATOMIC);
235		}
236		spin_unlock_irq(&pidmap_lock);
237		idr_preload_end();
238
239		if (nr < 0) {
240			retval = (nr == -ENOSPC) ? -EAGAIN : nr;
241			goto out_free;
242		}
243
244		pid->numbers[i].nr = nr;
245		pid->numbers[i].ns = tmp;
246		tmp = tmp->parent;
247	}
248
249	/*
250	 * ENOMEM is not the most obvious choice especially for the case
251	 * where the child subreaper has already exited and the pid
252	 * namespace denies the creation of any new processes. But ENOMEM
253	 * is what we have exposed to userspace for a long time and it is
254	 * documented behavior for pid namespaces. So we can't easily
255	 * change it even if there were an error code better suited.
256	 */
257	retval = -ENOMEM;
258
259	get_pid_ns(ns);
260	refcount_set(&pid->count, 1);
261	spin_lock_init(&pid->lock);
262	for (type = 0; type < PIDTYPE_MAX; ++type)
263		INIT_HLIST_HEAD(&pid->tasks[type]);
264
265	init_waitqueue_head(&pid->wait_pidfd);
266	INIT_HLIST_HEAD(&pid->inodes);
267
268	upid = pid->numbers + ns->level;
269	spin_lock_irq(&pidmap_lock);
270	if (!(ns->pid_allocated & PIDNS_ADDING))
271		goto out_unlock;
272	for ( ; upid >= pid->numbers; --upid) {
273		/* Make the PID visible to find_pid_ns. */
274		idr_replace(&upid->ns->idr, pid, upid->nr);
275		upid->ns->pid_allocated++;
276	}
277	spin_unlock_irq(&pidmap_lock);
278
279	return pid;
280
281out_unlock:
282	spin_unlock_irq(&pidmap_lock);
283	put_pid_ns(ns);
284
285out_free:
286	spin_lock_irq(&pidmap_lock);
287	while (++i <= ns->level) {
288		upid = pid->numbers + i;
289		idr_remove(&upid->ns->idr, upid->nr);
290	}
291
292	/* On failure to allocate the first pid, reset the state */
293	if (ns->pid_allocated == PIDNS_ADDING)
294		idr_set_cursor(&ns->idr, 0);
295
296	spin_unlock_irq(&pidmap_lock);
297
298	kmem_cache_free(ns->pid_cachep, pid);
299	return ERR_PTR(retval);
300}
301
302void disable_pid_allocation(struct pid_namespace *ns)
303{
304	spin_lock_irq(&pidmap_lock);
305	ns->pid_allocated &= ~PIDNS_ADDING;
306	spin_unlock_irq(&pidmap_lock);
307}
308
309struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
310{
311	return idr_find(&ns->idr, nr);
312}
313EXPORT_SYMBOL_GPL(find_pid_ns);
314
315struct pid *find_vpid(int nr)
316{
317	return find_pid_ns(nr, task_active_pid_ns(current));
318}
319EXPORT_SYMBOL_GPL(find_vpid);
320
321static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
322{
323	return (type == PIDTYPE_PID) ?
324		&task->thread_pid :
325		&task->signal->pids[type];
326}
327
328/*
329 * attach_pid() must be called with the tasklist_lock write-held.
330 */
331void attach_pid(struct task_struct *task, enum pid_type type)
332{
333	struct pid *pid = *task_pid_ptr(task, type);
334	hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
335}
336
337static void __change_pid(struct task_struct *task, enum pid_type type,
338			struct pid *new)
339{
340	struct pid **pid_ptr = task_pid_ptr(task, type);
341	struct pid *pid;
342	int tmp;
343
344	pid = *pid_ptr;
345
346	hlist_del_rcu(&task->pid_links[type]);
347	*pid_ptr = new;
348
349	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
350		if (pid_has_task(pid, tmp))
351			return;
352
353	free_pid(pid);
354}
355
356void detach_pid(struct task_struct *task, enum pid_type type)
357{
358	__change_pid(task, type, NULL);
359}
360
361void change_pid(struct task_struct *task, enum pid_type type,
362		struct pid *pid)
363{
364	__change_pid(task, type, pid);
365	attach_pid(task, type);
366}
367
368void exchange_tids(struct task_struct *left, struct task_struct *right)
369{
370	struct pid *pid1 = left->thread_pid;
371	struct pid *pid2 = right->thread_pid;
372	struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
373	struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
374
375	/* Swap the single entry tid lists */
376	hlists_swap_heads_rcu(head1, head2);
377
378	/* Swap the per task_struct pid */
379	rcu_assign_pointer(left->thread_pid, pid2);
380	rcu_assign_pointer(right->thread_pid, pid1);
381
382	/* Swap the cached value */
383	WRITE_ONCE(left->pid, pid_nr(pid2));
384	WRITE_ONCE(right->pid, pid_nr(pid1));
385}
386
387/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
388void transfer_pid(struct task_struct *old, struct task_struct *new,
389			   enum pid_type type)
390{
391	if (type == PIDTYPE_PID)
392		new->thread_pid = old->thread_pid;
393	hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
394}
395
396struct task_struct *pid_task(struct pid *pid, enum pid_type type)
397{
398	struct task_struct *result = NULL;
399	if (pid) {
400		struct hlist_node *first;
401		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
402					      lockdep_tasklist_lock_is_held());
403		if (first)
404			result = hlist_entry(first, struct task_struct, pid_links[(type)]);
405	}
406	return result;
407}
408EXPORT_SYMBOL(pid_task);
409
410/*
411 * Must be called under rcu_read_lock().
412 */
413struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
414{
415	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
416			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
417	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
418}
419
420struct task_struct *find_task_by_vpid(pid_t vnr)
421{
422	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
423}
424
425struct task_struct *find_get_task_by_vpid(pid_t nr)
426{
427	struct task_struct *task;
428
429	rcu_read_lock();
430	task = find_task_by_vpid(nr);
431	if (task)
432		get_task_struct(task);
433	rcu_read_unlock();
434
435	return task;
436}
437
438struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
439{
440	struct pid *pid;
441	rcu_read_lock();
442	pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
443	rcu_read_unlock();
444	return pid;
445}
446EXPORT_SYMBOL_GPL(get_task_pid);
447
448struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
449{
450	struct task_struct *result;
451	rcu_read_lock();
452	result = pid_task(pid, type);
453	if (result)
454		get_task_struct(result);
455	rcu_read_unlock();
456	return result;
457}
458EXPORT_SYMBOL_GPL(get_pid_task);
459
460struct pid *find_get_pid(pid_t nr)
461{
462	struct pid *pid;
463
464	rcu_read_lock();
465	pid = get_pid(find_vpid(nr));
466	rcu_read_unlock();
467
468	return pid;
469}
470EXPORT_SYMBOL_GPL(find_get_pid);
471
472pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
473{
474	struct upid *upid;
475	pid_t nr = 0;
476
477	if (pid && ns->level <= pid->level) {
478		upid = &pid->numbers[ns->level];
479		if (upid->ns == ns)
480			nr = upid->nr;
481	}
482	return nr;
483}
484EXPORT_SYMBOL_GPL(pid_nr_ns);
485
486pid_t pid_vnr(struct pid *pid)
487{
488	return pid_nr_ns(pid, task_active_pid_ns(current));
489}
490EXPORT_SYMBOL_GPL(pid_vnr);
491
492pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
493			struct pid_namespace *ns)
494{
495	pid_t nr = 0;
496
497	rcu_read_lock();
498	if (!ns)
499		ns = task_active_pid_ns(current);
500	nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
 
501	rcu_read_unlock();
502
503	return nr;
504}
505EXPORT_SYMBOL(__task_pid_nr_ns);
506
507struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
508{
509	return ns_of_pid(task_pid(tsk));
510}
511EXPORT_SYMBOL_GPL(task_active_pid_ns);
512
513/*
514 * Used by proc to find the first pid that is greater than or equal to nr.
515 *
516 * If there is a pid at nr this function is exactly the same as find_pid_ns.
517 */
518struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
519{
520	return idr_get_next(&ns->idr, &nr);
521}
522
523struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
524{
525	struct fd f;
526	struct pid *pid;
527
528	f = fdget(fd);
529	if (!f.file)
530		return ERR_PTR(-EBADF);
531
532	pid = pidfd_pid(f.file);
533	if (!IS_ERR(pid)) {
534		get_pid(pid);
535		*flags = f.file->f_flags;
536	}
537
538	fdput(f);
539	return pid;
540}
541
542/**
543 * pidfd_create() - Create a new pid file descriptor.
544 *
545 * @pid:   struct pid that the pidfd will reference
546 * @flags: flags to pass
547 *
548 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
549 *
550 * Note, that this function can only be called after the fd table has
551 * been unshared to avoid leaking the pidfd to the new process.
552 *
553 * Return: On success, a cloexec pidfd is returned.
554 *         On error, a negative errno number will be returned.
555 */
556static int pidfd_create(struct pid *pid, unsigned int flags)
557{
558	int fd;
559
560	fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
561			      flags | O_RDWR | O_CLOEXEC);
562	if (fd < 0)
563		put_pid(pid);
564
565	return fd;
566}
567
568/**
569 * pidfd_open() - Open new pid file descriptor.
570 *
571 * @pid:   pid for which to retrieve a pidfd
572 * @flags: flags to pass
573 *
574 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
575 * the process identified by @pid. Currently, the process identified by
576 * @pid must be a thread-group leader. This restriction currently exists
577 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
578 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
579 * leaders).
580 *
581 * Return: On success, a cloexec pidfd is returned.
582 *         On error, a negative errno number will be returned.
583 */
584SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
585{
586	int fd;
587	struct pid *p;
588
589	if (flags & ~PIDFD_NONBLOCK)
590		return -EINVAL;
591
592	if (pid <= 0)
593		return -EINVAL;
594
595	p = find_get_pid(pid);
596	if (!p)
597		return -ESRCH;
598
599	if (pid_has_task(p, PIDTYPE_TGID))
600		fd = pidfd_create(p, flags);
601	else
602		fd = -EINVAL;
 
603
 
604	put_pid(p);
605	return fd;
606}
607
608void __init pid_idr_init(void)
609{
610	/* Verify no one has done anything silly: */
611	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
612
613	/* bump default and minimum pid_max based on number of cpus */
614	pid_max = min(pid_max_max, max_t(int, pid_max,
615				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
616	pid_max_min = max_t(int, pid_max_min,
617				PIDS_PER_CPU_MIN * num_possible_cpus());
618	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
619
620	idr_init(&init_pid_ns.idr);
621
622	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
623			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
624}
625
626static struct file *__pidfd_fget(struct task_struct *task, int fd)
627{
628	struct file *file;
629	int ret;
630
631	ret = down_read_killable(&task->signal->exec_update_lock);
632	if (ret)
633		return ERR_PTR(ret);
634
635	if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
636		file = fget_task(task, fd);
637	else
638		file = ERR_PTR(-EPERM);
639
640	up_read(&task->signal->exec_update_lock);
641
642	return file ?: ERR_PTR(-EBADF);
643}
644
645static int pidfd_getfd(struct pid *pid, int fd)
646{
647	struct task_struct *task;
648	struct file *file;
649	int ret;
650
651	task = get_pid_task(pid, PIDTYPE_PID);
652	if (!task)
653		return -ESRCH;
654
655	file = __pidfd_fget(task, fd);
656	put_task_struct(task);
657	if (IS_ERR(file))
658		return PTR_ERR(file);
659
660	ret = receive_fd(file, O_CLOEXEC);
661	fput(file);
662
663	return ret;
664}
665
666/**
667 * sys_pidfd_getfd() - Get a file descriptor from another process
668 *
669 * @pidfd:	the pidfd file descriptor of the process
670 * @fd:		the file descriptor number to get
671 * @flags:	flags on how to get the fd (reserved)
672 *
673 * This syscall gets a copy of a file descriptor from another process
674 * based on the pidfd, and file descriptor number. It requires that
675 * the calling process has the ability to ptrace the process represented
676 * by the pidfd. The process which is having its file descriptor copied
677 * is otherwise unaffected.
678 *
679 * Return: On success, a cloexec file descriptor is returned.
680 *         On error, a negative errno number will be returned.
681 */
682SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
683		unsigned int, flags)
684{
685	struct pid *pid;
686	struct fd f;
687	int ret;
688
689	/* flags is currently unused - make sure it's unset */
690	if (flags)
691		return -EINVAL;
692
693	f = fdget(pidfd);
694	if (!f.file)
695		return -EBADF;
696
697	pid = pidfd_pid(f.file);
698	if (IS_ERR(pid))
699		ret = PTR_ERR(pid);
700	else
701		ret = pidfd_getfd(pid, fd);
702
703	fdput(f);
704	return ret;
705}