Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Generic pidhash and scalable, time-bounded PID allocator
  3 *
  4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
  5 * (C) 2004 Nadia Yvette Chambers, Oracle
  6 * (C) 2002-2004 Ingo Molnar, Red Hat
  7 *
  8 * pid-structures are backing objects for tasks sharing a given ID to chain
  9 * against. There is very little to them aside from hashing them and
 10 * parking tasks using given ID's on a list.
 11 *
 12 * The hash is always changed with the tasklist_lock write-acquired,
 13 * and the hash is only accessed with the tasklist_lock at least
 14 * read-acquired, so there's no additional SMP locking needed here.
 15 *
 16 * We have a list of bitmap pages, which bitmaps represent the PID space.
 17 * Allocating and freeing PIDs is completely lockless. The worst-case
 18 * allocation scenario when all but one out of 1 million PIDs possible are
 19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
 20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
 21 *
 22 * Pid namespaces:
 23 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
 24 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
 25 *     Many thanks to Oleg Nesterov for comments and help
 26 *
 27 */
 28
 29#include <linux/mm.h>
 30#include <linux/export.h>
 31#include <linux/slab.h>
 32#include <linux/init.h>
 33#include <linux/rculist.h>
 34#include <linux/bootmem.h>
 35#include <linux/hash.h>
 36#include <linux/pid_namespace.h>
 37#include <linux/init_task.h>
 38#include <linux/syscalls.h>
 39#include <linux/proc_ns.h>
 40#include <linux/proc_fs.h>
 41
 42#define pid_hashfn(nr, ns)	\
 43	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
 44static struct hlist_head *pid_hash;
 45static unsigned int pidhash_shift = 4;
 46struct pid init_struct_pid = INIT_STRUCT_PID;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48int pid_max = PID_MAX_DEFAULT;
 49
 50#define RESERVED_PIDS		300
 51
 52int pid_max_min = RESERVED_PIDS + 1;
 53int pid_max_max = PID_MAX_LIMIT;
 54
 55static inline int mk_pid(struct pid_namespace *pid_ns,
 56		struct pidmap *map, int off)
 57{
 58	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
 59}
 60
 61#define find_next_offset(map, off)					\
 62		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
 63
 64/*
 65 * PID-map pages start out as NULL, they get allocated upon
 66 * first use and are never deallocated. This way a low pid_max
 67 * value does not cause lots of bitmaps to be allocated, but
 68 * the scheme scales to up to 4 million PIDs, runtime.
 69 */
 70struct pid_namespace init_pid_ns = {
 71	.kref = {
 72		.refcount       = ATOMIC_INIT(2),
 73	},
 74	.pidmap = {
 75		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
 76	},
 77	.last_pid = 0,
 78	.nr_hashed = PIDNS_HASH_ADDING,
 79	.level = 0,
 80	.child_reaper = &init_task,
 81	.user_ns = &init_user_ns,
 82	.proc_inum = PROC_PID_INIT_INO,
 
 
 
 
 
 
 83};
 84EXPORT_SYMBOL_GPL(init_pid_ns);
 85
 86/*
 87 * Note: disable interrupts while the pidmap_lock is held as an
 88 * interrupt might come in and do read_lock(&tasklist_lock).
 89 *
 90 * If we don't disable interrupts there is a nasty deadlock between
 91 * detach_pid()->free_pid() and another cpu that does
 92 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
 93 * read_lock(&tasklist_lock);
 94 *
 95 * After we clean up the tasklist_lock and know there are no
 96 * irq handlers that take it we can leave the interrupts enabled.
 97 * For now it is easier to be safe than to prove it can't happen.
 98 */
 99
100static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
101
102static void free_pidmap(struct upid *upid)
103{
104	int nr = upid->nr;
105	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
106	int offset = nr & BITS_PER_PAGE_MASK;
107
108	clear_bit(offset, map->page);
109	atomic_inc(&map->nr_free);
110}
111
112/*
113 * If we started walking pids at 'base', is 'a' seen before 'b'?
114 */
115static int pid_before(int base, int a, int b)
116{
117	/*
118	 * This is the same as saying
119	 *
120	 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
121	 * and that mapping orders 'a' and 'b' with respect to 'base'.
122	 */
123	return (unsigned)(a - base) < (unsigned)(b - base);
124}
125
126/*
127 * We might be racing with someone else trying to set pid_ns->last_pid
128 * at the pid allocation time (there's also a sysctl for this, but racing
129 * with this one is OK, see comment in kernel/pid_namespace.c about it).
130 * We want the winner to have the "later" value, because if the
131 * "earlier" value prevails, then a pid may get reused immediately.
132 *
133 * Since pids rollover, it is not sufficient to just pick the bigger
134 * value.  We have to consider where we started counting from.
135 *
136 * 'base' is the value of pid_ns->last_pid that we observed when
137 * we started looking for a pid.
138 *
139 * 'pid' is the pid that we eventually found.
140 */
141static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
142{
143	int prev;
144	int last_write = base;
145	do {
146		prev = last_write;
147		last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
148	} while ((prev != last_write) && (pid_before(base, last_write, pid)));
149}
150
151static int alloc_pidmap(struct pid_namespace *pid_ns)
152{
153	int i, offset, max_scan, pid, last = pid_ns->last_pid;
154	struct pidmap *map;
155
156	pid = last + 1;
157	if (pid >= pid_max)
158		pid = RESERVED_PIDS;
159	offset = pid & BITS_PER_PAGE_MASK;
160	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
161	/*
162	 * If last_pid points into the middle of the map->page we
163	 * want to scan this bitmap block twice, the second time
164	 * we start with offset == 0 (or RESERVED_PIDS).
165	 */
166	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
167	for (i = 0; i <= max_scan; ++i) {
168		if (unlikely(!map->page)) {
169			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
170			/*
171			 * Free the page if someone raced with us
172			 * installing it:
173			 */
174			spin_lock_irq(&pidmap_lock);
175			if (!map->page) {
176				map->page = page;
177				page = NULL;
178			}
179			spin_unlock_irq(&pidmap_lock);
180			kfree(page);
181			if (unlikely(!map->page))
182				break;
183		}
184		if (likely(atomic_read(&map->nr_free))) {
185			for ( ; ; ) {
186				if (!test_and_set_bit(offset, map->page)) {
187					atomic_dec(&map->nr_free);
188					set_last_pid(pid_ns, last, pid);
189					return pid;
190				}
191				offset = find_next_offset(map, offset);
192				if (offset >= BITS_PER_PAGE)
193					break;
194				pid = mk_pid(pid_ns, map, offset);
195				if (pid >= pid_max)
196					break;
197			}
198		}
199		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
200			++map;
201			offset = 0;
202		} else {
203			map = &pid_ns->pidmap[0];
204			offset = RESERVED_PIDS;
205			if (unlikely(last == offset))
206				break;
207		}
208		pid = mk_pid(pid_ns, map, offset);
209	}
210	return -1;
211}
212
213int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
214{
215	int offset;
216	struct pidmap *map, *end;
217
218	if (last >= PID_MAX_LIMIT)
219		return -1;
220
221	offset = (last + 1) & BITS_PER_PAGE_MASK;
222	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
223	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
224	for (; map < end; map++, offset = 0) {
225		if (unlikely(!map->page))
226			continue;
227		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
228		if (offset < BITS_PER_PAGE)
229			return mk_pid(pid_ns, map, offset);
230	}
231	return -1;
232}
233
234void put_pid(struct pid *pid)
235{
236	struct pid_namespace *ns;
237
238	if (!pid)
239		return;
240
241	ns = pid->numbers[pid->level].ns;
242	if ((atomic_read(&pid->count) == 1) ||
243	     atomic_dec_and_test(&pid->count)) {
244		kmem_cache_free(ns->pid_cachep, pid);
245		put_pid_ns(ns);
246	}
247}
248EXPORT_SYMBOL_GPL(put_pid);
249
250static void delayed_put_pid(struct rcu_head *rhp)
251{
252	struct pid *pid = container_of(rhp, struct pid, rcu);
253	put_pid(pid);
254}
255
256void free_pid(struct pid *pid)
257{
258	/* We can be called with write_lock_irq(&tasklist_lock) held */
259	int i;
260	unsigned long flags;
261
262	spin_lock_irqsave(&pidmap_lock, flags);
263	for (i = 0; i <= pid->level; i++) {
264		struct upid *upid = pid->numbers + i;
265		struct pid_namespace *ns = upid->ns;
266		hlist_del_rcu(&upid->pid_chain);
267		switch(--ns->nr_hashed) {
268		case 2:
269		case 1:
270			/* When all that is left in the pid namespace
271			 * is the reaper wake up the reaper.  The reaper
272			 * may be sleeping in zap_pid_ns_processes().
273			 */
274			wake_up_process(ns->child_reaper);
275			break;
276		case PIDNS_HASH_ADDING:
277			/* Handle a fork failure of the first process */
278			WARN_ON(ns->child_reaper);
279			ns->nr_hashed = 0;
280			/* fall through */
281		case 0:
282			schedule_work(&ns->proc_work);
283			break;
284		}
 
 
285	}
286	spin_unlock_irqrestore(&pidmap_lock, flags);
287
288	for (i = 0; i <= pid->level; i++)
289		free_pidmap(pid->numbers + i);
290
291	call_rcu(&pid->rcu, delayed_put_pid);
292}
293
294struct pid *alloc_pid(struct pid_namespace *ns)
 
295{
296	struct pid *pid;
297	enum pid_type type;
298	int i, nr;
299	struct pid_namespace *tmp;
300	struct upid *upid;
 
 
 
 
 
 
 
 
 
 
 
 
301
302	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
303	if (!pid)
304		goto out;
305
306	tmp = ns;
307	pid->level = ns->level;
 
308	for (i = ns->level; i >= 0; i--) {
309		nr = alloc_pidmap(tmp);
310		if (nr < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311			goto out_free;
 
312
313		pid->numbers[i].nr = nr;
314		pid->numbers[i].ns = tmp;
315		tmp = tmp->parent;
316	}
317
318	if (unlikely(is_child_reaper(pid))) {
319		if (pid_ns_prepare_proc(ns))
320			goto out_free;
321	}
 
 
 
 
 
322
323	get_pid_ns(ns);
324	atomic_set(&pid->count, 1);
 
325	for (type = 0; type < PIDTYPE_MAX; ++type)
326		INIT_HLIST_HEAD(&pid->tasks[type]);
327
 
 
 
328	upid = pid->numbers + ns->level;
329	spin_lock_irq(&pidmap_lock);
330	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
331		goto out_unlock;
 
 
332	for ( ; upid >= pid->numbers; --upid) {
333		hlist_add_head_rcu(&upid->pid_chain,
334				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
335		upid->ns->nr_hashed++;
336	}
337	spin_unlock_irq(&pidmap_lock);
338
339out:
340	return pid;
341
342out_unlock:
343	spin_unlock_irq(&pidmap_lock);
 
 
344out_free:
345	while (++i <= ns->level)
346		free_pidmap(pid->numbers + i);
 
 
 
 
 
 
 
 
 
347
348	kmem_cache_free(ns->pid_cachep, pid);
349	pid = NULL;
350	goto out;
351}
352
353void disable_pid_allocation(struct pid_namespace *ns)
354{
355	spin_lock_irq(&pidmap_lock);
356	ns->nr_hashed &= ~PIDNS_HASH_ADDING;
357	spin_unlock_irq(&pidmap_lock);
358}
359
360struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
361{
362	struct upid *pnr;
363
364	hlist_for_each_entry_rcu(pnr,
365			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
366		if (pnr->nr == nr && pnr->ns == ns)
367			return container_of(pnr, struct pid,
368					numbers[ns->level]);
369
370	return NULL;
371}
372EXPORT_SYMBOL_GPL(find_pid_ns);
373
374struct pid *find_vpid(int nr)
375{
376	return find_pid_ns(nr, task_active_pid_ns(current));
377}
378EXPORT_SYMBOL_GPL(find_vpid);
379
 
 
 
 
 
 
 
380/*
381 * attach_pid() must be called with the tasklist_lock write-held.
382 */
383void attach_pid(struct task_struct *task, enum pid_type type)
384{
385	struct pid_link *link = &task->pids[type];
386	hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
387}
388
389static void __change_pid(struct task_struct *task, enum pid_type type,
390			struct pid *new)
391{
392	struct pid_link *link;
393	struct pid *pid;
394	int tmp;
395
396	link = &task->pids[type];
397	pid = link->pid;
 
 
398
399	hlist_del_rcu(&link->node);
400	link->pid = new;
 
 
401
402	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
403		if (!hlist_empty(&pid->tasks[tmp]))
404			return;
405
406	free_pid(pid);
407}
408
409void detach_pid(struct task_struct *task, enum pid_type type)
410{
411	__change_pid(task, type, NULL);
412}
413
414void change_pid(struct task_struct *task, enum pid_type type,
415		struct pid *pid)
416{
417	__change_pid(task, type, pid);
418	attach_pid(task, type);
419}
420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
422void transfer_pid(struct task_struct *old, struct task_struct *new,
423			   enum pid_type type)
424{
425	new->pids[type].pid = old->pids[type].pid;
426	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
427}
428
429struct task_struct *pid_task(struct pid *pid, enum pid_type type)
430{
431	struct task_struct *result = NULL;
432	if (pid) {
433		struct hlist_node *first;
434		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
435					      lockdep_tasklist_lock_is_held());
436		if (first)
437			result = hlist_entry(first, struct task_struct, pids[(type)].node);
438	}
439	return result;
440}
441EXPORT_SYMBOL(pid_task);
442
443/*
444 * Must be called under rcu_read_lock().
445 */
446struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
447{
448	rcu_lockdep_assert(rcu_read_lock_held(),
449			   "find_task_by_pid_ns() needs rcu_read_lock()"
450			   " protection");
451	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
452}
453
454struct task_struct *find_task_by_vpid(pid_t vnr)
455{
456	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
457}
458
 
 
 
 
 
 
 
 
 
 
 
 
 
459struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
460{
461	struct pid *pid;
462	rcu_read_lock();
463	if (type != PIDTYPE_PID)
464		task = task->group_leader;
465	pid = get_pid(task->pids[type].pid);
466	rcu_read_unlock();
467	return pid;
468}
469EXPORT_SYMBOL_GPL(get_task_pid);
470
471struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
472{
473	struct task_struct *result;
474	rcu_read_lock();
475	result = pid_task(pid, type);
476	if (result)
477		get_task_struct(result);
478	rcu_read_unlock();
479	return result;
480}
481EXPORT_SYMBOL_GPL(get_pid_task);
482
483struct pid *find_get_pid(pid_t nr)
484{
485	struct pid *pid;
486
487	rcu_read_lock();
488	pid = get_pid(find_vpid(nr));
489	rcu_read_unlock();
490
491	return pid;
492}
493EXPORT_SYMBOL_GPL(find_get_pid);
494
495pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
496{
497	struct upid *upid;
498	pid_t nr = 0;
499
500	if (pid && ns->level <= pid->level) {
501		upid = &pid->numbers[ns->level];
502		if (upid->ns == ns)
503			nr = upid->nr;
504	}
505	return nr;
506}
507EXPORT_SYMBOL_GPL(pid_nr_ns);
508
509pid_t pid_vnr(struct pid *pid)
510{
511	return pid_nr_ns(pid, task_active_pid_ns(current));
512}
513EXPORT_SYMBOL_GPL(pid_vnr);
514
515pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
516			struct pid_namespace *ns)
517{
518	pid_t nr = 0;
519
520	rcu_read_lock();
521	if (!ns)
522		ns = task_active_pid_ns(current);
523	if (likely(pid_alive(task))) {
524		if (type != PIDTYPE_PID)
525			task = task->group_leader;
526		nr = pid_nr_ns(task->pids[type].pid, ns);
527	}
528	rcu_read_unlock();
529
530	return nr;
531}
532EXPORT_SYMBOL(__task_pid_nr_ns);
533
534pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
535{
536	return pid_nr_ns(task_tgid(tsk), ns);
537}
538EXPORT_SYMBOL(task_tgid_nr_ns);
539
540struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
541{
542	return ns_of_pid(task_pid(tsk));
543}
544EXPORT_SYMBOL_GPL(task_active_pid_ns);
545
546/*
547 * Used by proc to find the first pid that is greater than or equal to nr.
548 *
549 * If there is a pid at nr this function is exactly the same as find_pid_ns.
550 */
551struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
552{
 
 
 
 
 
 
 
553	struct pid *pid;
554
555	do {
556		pid = find_pid_ns(nr, ns);
557		if (pid)
558			break;
559		nr = next_pidmap(ns, nr);
560	} while (nr > 0);
561
 
 
 
 
 
562	return pid;
563}
564
565/*
566 * The pid hash table is scaled according to the amount of memory in the
567 * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
568 * more.
 
 
 
 
 
 
 
569 */
570void __init pidhash_init(void)
571{
572	unsigned int i, pidhash_size;
 
 
573
574	pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
575					   HASH_EARLY | HASH_SMALL,
576					   &pidhash_shift, NULL,
577					   0, 4096);
578	pidhash_size = 1U << pidhash_shift;
579
580	for (i = 0; i < pidhash_size; i++)
581		INIT_HLIST_HEAD(&pid_hash[i]);
 
 
 
 
 
582}
583
584void __init pidmap_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585{
586	/* Veryify no one has done anything silly */
587	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
588
589	/* bump default and minimum pid_max based on number of cpus */
590	pid_max = min(pid_max_max, max_t(int, pid_max,
591				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
592	pid_max_min = max_t(int, pid_max_min,
593				PIDS_PER_CPU_MIN * num_possible_cpus());
594	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
595
596	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
597	/* Reserve PID 0. We never call free_pidmap(0) */
598	set_bit(0, init_pid_ns.pidmap[0].page);
599	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
600
601	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
602			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
603}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic pidhash and scalable, time-bounded PID allocator
  4 *
  5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
  6 * (C) 2004 Nadia Yvette Chambers, Oracle
  7 * (C) 2002-2004 Ingo Molnar, Red Hat
  8 *
  9 * pid-structures are backing objects for tasks sharing a given ID to chain
 10 * against. There is very little to them aside from hashing them and
 11 * parking tasks using given ID's on a list.
 12 *
 13 * The hash is always changed with the tasklist_lock write-acquired,
 14 * and the hash is only accessed with the tasklist_lock at least
 15 * read-acquired, so there's no additional SMP locking needed here.
 16 *
 17 * We have a list of bitmap pages, which bitmaps represent the PID space.
 18 * Allocating and freeing PIDs is completely lockless. The worst-case
 19 * allocation scenario when all but one out of 1 million PIDs possible are
 20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
 21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
 22 *
 23 * Pid namespaces:
 24 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
 25 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
 26 *     Many thanks to Oleg Nesterov for comments and help
 27 *
 28 */
 29
 30#include <linux/mm.h>
 31#include <linux/export.h>
 32#include <linux/slab.h>
 33#include <linux/init.h>
 34#include <linux/rculist.h>
 35#include <linux/memblock.h>
 
 36#include <linux/pid_namespace.h>
 37#include <linux/init_task.h>
 38#include <linux/syscalls.h>
 39#include <linux/proc_ns.h>
 40#include <linux/refcount.h>
 41#include <linux/anon_inodes.h>
 42#include <linux/sched/signal.h>
 43#include <linux/sched/task.h>
 44#include <linux/idr.h>
 45#include <linux/pidfs.h>
 46#include <net/sock.h>
 47#include <uapi/linux/pidfd.h>
 48
 49struct pid init_struct_pid = {
 50	.count		= REFCOUNT_INIT(1),
 51	.tasks		= {
 52		{ .first = NULL },
 53		{ .first = NULL },
 54		{ .first = NULL },
 55	},
 56	.level		= 0,
 57	.numbers	= { {
 58		.nr		= 0,
 59		.ns		= &init_pid_ns,
 60	}, }
 61};
 62
 63int pid_max = PID_MAX_DEFAULT;
 64
 
 
 65int pid_max_min = RESERVED_PIDS + 1;
 66int pid_max_max = PID_MAX_LIMIT;
 67/*
 68 * Pseudo filesystems start inode numbering after one. We use Reserved
 69 * PIDs as a natural offset.
 70 */
 71static u64 pidfs_ino = RESERVED_PIDS;
 
 
 
 
 72
 73/*
 74 * PID-map pages start out as NULL, they get allocated upon
 75 * first use and are never deallocated. This way a low pid_max
 76 * value does not cause lots of bitmaps to be allocated, but
 77 * the scheme scales to up to 4 million PIDs, runtime.
 78 */
 79struct pid_namespace init_pid_ns = {
 80	.ns.count = REFCOUNT_INIT(2),
 81	.idr = IDR_INIT(init_pid_ns.idr),
 82	.pid_allocated = PIDNS_ADDING,
 
 
 
 
 
 83	.level = 0,
 84	.child_reaper = &init_task,
 85	.user_ns = &init_user_ns,
 86	.ns.inum = PROC_PID_INIT_INO,
 87#ifdef CONFIG_PID_NS
 88	.ns.ops = &pidns_operations,
 89#endif
 90#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
 91	.memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC,
 92#endif
 93};
 94EXPORT_SYMBOL_GPL(init_pid_ns);
 95
 96/*
 97 * Note: disable interrupts while the pidmap_lock is held as an
 98 * interrupt might come in and do read_lock(&tasklist_lock).
 99 *
100 * If we don't disable interrupts there is a nasty deadlock between
101 * detach_pid()->free_pid() and another cpu that does
102 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
103 * read_lock(&tasklist_lock);
104 *
105 * After we clean up the tasklist_lock and know there are no
106 * irq handlers that take it we can leave the interrupts enabled.
107 * For now it is easier to be safe than to prove it can't happen.
108 */
109
110static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112void put_pid(struct pid *pid)
113{
114	struct pid_namespace *ns;
115
116	if (!pid)
117		return;
118
119	ns = pid->numbers[pid->level].ns;
120	if (refcount_dec_and_test(&pid->count)) {
 
121		kmem_cache_free(ns->pid_cachep, pid);
122		put_pid_ns(ns);
123	}
124}
125EXPORT_SYMBOL_GPL(put_pid);
126
127static void delayed_put_pid(struct rcu_head *rhp)
128{
129	struct pid *pid = container_of(rhp, struct pid, rcu);
130	put_pid(pid);
131}
132
133void free_pid(struct pid *pid)
134{
135	/* We can be called with write_lock_irq(&tasklist_lock) held */
136	int i;
137	unsigned long flags;
138
139	spin_lock_irqsave(&pidmap_lock, flags);
140	for (i = 0; i <= pid->level; i++) {
141		struct upid *upid = pid->numbers + i;
142		struct pid_namespace *ns = upid->ns;
143		switch (--ns->pid_allocated) {
 
144		case 2:
145		case 1:
146			/* When all that is left in the pid namespace
147			 * is the reaper wake up the reaper.  The reaper
148			 * may be sleeping in zap_pid_ns_processes().
149			 */
150			wake_up_process(ns->child_reaper);
151			break;
152		case PIDNS_ADDING:
153			/* Handle a fork failure of the first process */
154			WARN_ON(ns->child_reaper);
155			ns->pid_allocated = 0;
 
 
 
156			break;
157		}
158
159		idr_remove(&ns->idr, upid->nr);
160	}
161	spin_unlock_irqrestore(&pidmap_lock, flags);
162
 
 
 
163	call_rcu(&pid->rcu, delayed_put_pid);
164}
165
166struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
167		      size_t set_tid_size)
168{
169	struct pid *pid;
170	enum pid_type type;
171	int i, nr;
172	struct pid_namespace *tmp;
173	struct upid *upid;
174	int retval = -ENOMEM;
175
176	/*
177	 * set_tid_size contains the size of the set_tid array. Starting at
178	 * the most nested currently active PID namespace it tells alloc_pid()
179	 * which PID to set for a process in that most nested PID namespace
180	 * up to set_tid_size PID namespaces. It does not have to set the PID
181	 * for a process in all nested PID namespaces but set_tid_size must
182	 * never be greater than the current ns->level + 1.
183	 */
184	if (set_tid_size > ns->level + 1)
185		return ERR_PTR(-EINVAL);
186
187	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
188	if (!pid)
189		return ERR_PTR(retval);
190
191	tmp = ns;
192	pid->level = ns->level;
193
194	for (i = ns->level; i >= 0; i--) {
195		int tid = 0;
196
197		if (set_tid_size) {
198			tid = set_tid[ns->level - i];
199
200			retval = -EINVAL;
201			if (tid < 1 || tid >= pid_max)
202				goto out_free;
203			/*
204			 * Also fail if a PID != 1 is requested and
205			 * no PID 1 exists.
206			 */
207			if (tid != 1 && !tmp->child_reaper)
208				goto out_free;
209			retval = -EPERM;
210			if (!checkpoint_restore_ns_capable(tmp->user_ns))
211				goto out_free;
212			set_tid_size--;
213		}
214
215		idr_preload(GFP_KERNEL);
216		spin_lock_irq(&pidmap_lock);
217
218		if (tid) {
219			nr = idr_alloc(&tmp->idr, NULL, tid,
220				       tid + 1, GFP_ATOMIC);
221			/*
222			 * If ENOSPC is returned it means that the PID is
223			 * alreay in use. Return EEXIST in that case.
224			 */
225			if (nr == -ENOSPC)
226				nr = -EEXIST;
227		} else {
228			int pid_min = 1;
229			/*
230			 * init really needs pid 1, but after reaching the
231			 * maximum wrap back to RESERVED_PIDS
232			 */
233			if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
234				pid_min = RESERVED_PIDS;
235
236			/*
237			 * Store a null pointer so find_pid_ns does not find
238			 * a partially initialized PID (see below).
239			 */
240			nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
241					      pid_max, GFP_ATOMIC);
242		}
243		spin_unlock_irq(&pidmap_lock);
244		idr_preload_end();
245
246		if (nr < 0) {
247			retval = (nr == -ENOSPC) ? -EAGAIN : nr;
248			goto out_free;
249		}
250
251		pid->numbers[i].nr = nr;
252		pid->numbers[i].ns = tmp;
253		tmp = tmp->parent;
254	}
255
256	/*
257	 * ENOMEM is not the most obvious choice especially for the case
258	 * where the child subreaper has already exited and the pid
259	 * namespace denies the creation of any new processes. But ENOMEM
260	 * is what we have exposed to userspace for a long time and it is
261	 * documented behavior for pid namespaces. So we can't easily
262	 * change it even if there were an error code better suited.
263	 */
264	retval = -ENOMEM;
265
266	get_pid_ns(ns);
267	refcount_set(&pid->count, 1);
268	spin_lock_init(&pid->lock);
269	for (type = 0; type < PIDTYPE_MAX; ++type)
270		INIT_HLIST_HEAD(&pid->tasks[type]);
271
272	init_waitqueue_head(&pid->wait_pidfd);
273	INIT_HLIST_HEAD(&pid->inodes);
274
275	upid = pid->numbers + ns->level;
276	spin_lock_irq(&pidmap_lock);
277	if (!(ns->pid_allocated & PIDNS_ADDING))
278		goto out_unlock;
279	pid->stashed = NULL;
280	pid->ino = ++pidfs_ino;
281	for ( ; upid >= pid->numbers; --upid) {
282		/* Make the PID visible to find_pid_ns. */
283		idr_replace(&upid->ns->idr, pid, upid->nr);
284		upid->ns->pid_allocated++;
285	}
286	spin_unlock_irq(&pidmap_lock);
287
 
288	return pid;
289
290out_unlock:
291	spin_unlock_irq(&pidmap_lock);
292	put_pid_ns(ns);
293
294out_free:
295	spin_lock_irq(&pidmap_lock);
296	while (++i <= ns->level) {
297		upid = pid->numbers + i;
298		idr_remove(&upid->ns->idr, upid->nr);
299	}
300
301	/* On failure to allocate the first pid, reset the state */
302	if (ns->pid_allocated == PIDNS_ADDING)
303		idr_set_cursor(&ns->idr, 0);
304
305	spin_unlock_irq(&pidmap_lock);
306
307	kmem_cache_free(ns->pid_cachep, pid);
308	return ERR_PTR(retval);
 
309}
310
311void disable_pid_allocation(struct pid_namespace *ns)
312{
313	spin_lock_irq(&pidmap_lock);
314	ns->pid_allocated &= ~PIDNS_ADDING;
315	spin_unlock_irq(&pidmap_lock);
316}
317
318struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
319{
320	return idr_find(&ns->idr, nr);
 
 
 
 
 
 
 
 
321}
322EXPORT_SYMBOL_GPL(find_pid_ns);
323
324struct pid *find_vpid(int nr)
325{
326	return find_pid_ns(nr, task_active_pid_ns(current));
327}
328EXPORT_SYMBOL_GPL(find_vpid);
329
330static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
331{
332	return (type == PIDTYPE_PID) ?
333		&task->thread_pid :
334		&task->signal->pids[type];
335}
336
337/*
338 * attach_pid() must be called with the tasklist_lock write-held.
339 */
340void attach_pid(struct task_struct *task, enum pid_type type)
341{
342	struct pid *pid = *task_pid_ptr(task, type);
343	hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
344}
345
346static void __change_pid(struct task_struct *task, enum pid_type type,
347			struct pid *new)
348{
349	struct pid **pid_ptr = task_pid_ptr(task, type);
350	struct pid *pid;
351	int tmp;
352
353	pid = *pid_ptr;
354
355	hlist_del_rcu(&task->pid_links[type]);
356	*pid_ptr = new;
357
358	if (type == PIDTYPE_PID) {
359		WARN_ON_ONCE(pid_has_task(pid, PIDTYPE_PID));
360		wake_up_all(&pid->wait_pidfd);
361	}
362
363	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
364		if (pid_has_task(pid, tmp))
365			return;
366
367	free_pid(pid);
368}
369
370void detach_pid(struct task_struct *task, enum pid_type type)
371{
372	__change_pid(task, type, NULL);
373}
374
375void change_pid(struct task_struct *task, enum pid_type type,
376		struct pid *pid)
377{
378	__change_pid(task, type, pid);
379	attach_pid(task, type);
380}
381
382void exchange_tids(struct task_struct *left, struct task_struct *right)
383{
384	struct pid *pid1 = left->thread_pid;
385	struct pid *pid2 = right->thread_pid;
386	struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
387	struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
388
389	/* Swap the single entry tid lists */
390	hlists_swap_heads_rcu(head1, head2);
391
392	/* Swap the per task_struct pid */
393	rcu_assign_pointer(left->thread_pid, pid2);
394	rcu_assign_pointer(right->thread_pid, pid1);
395
396	/* Swap the cached value */
397	WRITE_ONCE(left->pid, pid_nr(pid2));
398	WRITE_ONCE(right->pid, pid_nr(pid1));
399}
400
401/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
402void transfer_pid(struct task_struct *old, struct task_struct *new,
403			   enum pid_type type)
404{
405	WARN_ON_ONCE(type == PIDTYPE_PID);
406	hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
407}
408
409struct task_struct *pid_task(struct pid *pid, enum pid_type type)
410{
411	struct task_struct *result = NULL;
412	if (pid) {
413		struct hlist_node *first;
414		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
415					      lockdep_tasklist_lock_is_held());
416		if (first)
417			result = hlist_entry(first, struct task_struct, pid_links[(type)]);
418	}
419	return result;
420}
421EXPORT_SYMBOL(pid_task);
422
423/*
424 * Must be called under rcu_read_lock().
425 */
426struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
427{
428	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
429			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
 
430	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
431}
432
433struct task_struct *find_task_by_vpid(pid_t vnr)
434{
435	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
436}
437
438struct task_struct *find_get_task_by_vpid(pid_t nr)
439{
440	struct task_struct *task;
441
442	rcu_read_lock();
443	task = find_task_by_vpid(nr);
444	if (task)
445		get_task_struct(task);
446	rcu_read_unlock();
447
448	return task;
449}
450
451struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
452{
453	struct pid *pid;
454	rcu_read_lock();
455	pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
 
 
456	rcu_read_unlock();
457	return pid;
458}
459EXPORT_SYMBOL_GPL(get_task_pid);
460
461struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
462{
463	struct task_struct *result;
464	rcu_read_lock();
465	result = pid_task(pid, type);
466	if (result)
467		get_task_struct(result);
468	rcu_read_unlock();
469	return result;
470}
471EXPORT_SYMBOL_GPL(get_pid_task);
472
473struct pid *find_get_pid(pid_t nr)
474{
475	struct pid *pid;
476
477	rcu_read_lock();
478	pid = get_pid(find_vpid(nr));
479	rcu_read_unlock();
480
481	return pid;
482}
483EXPORT_SYMBOL_GPL(find_get_pid);
484
485pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
486{
487	struct upid *upid;
488	pid_t nr = 0;
489
490	if (pid && ns->level <= pid->level) {
491		upid = &pid->numbers[ns->level];
492		if (upid->ns == ns)
493			nr = upid->nr;
494	}
495	return nr;
496}
497EXPORT_SYMBOL_GPL(pid_nr_ns);
498
499pid_t pid_vnr(struct pid *pid)
500{
501	return pid_nr_ns(pid, task_active_pid_ns(current));
502}
503EXPORT_SYMBOL_GPL(pid_vnr);
504
505pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
506			struct pid_namespace *ns)
507{
508	pid_t nr = 0;
509
510	rcu_read_lock();
511	if (!ns)
512		ns = task_active_pid_ns(current);
513	nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
 
 
 
 
514	rcu_read_unlock();
515
516	return nr;
517}
518EXPORT_SYMBOL(__task_pid_nr_ns);
519
 
 
 
 
 
 
520struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
521{
522	return ns_of_pid(task_pid(tsk));
523}
524EXPORT_SYMBOL_GPL(task_active_pid_ns);
525
526/*
527 * Used by proc to find the first pid that is greater than or equal to nr.
528 *
529 * If there is a pid at nr this function is exactly the same as find_pid_ns.
530 */
531struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
532{
533	return idr_get_next(&ns->idr, &nr);
534}
535EXPORT_SYMBOL_GPL(find_ge_pid);
536
537struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
538{
539	CLASS(fd, f)(fd);
540	struct pid *pid;
541
542	if (fd_empty(f))
543		return ERR_PTR(-EBADF);
 
 
 
 
544
545	pid = pidfd_pid(fd_file(f));
546	if (!IS_ERR(pid)) {
547		get_pid(pid);
548		*flags = fd_file(f)->f_flags;
549	}
550	return pid;
551}
552
553/**
554 * pidfd_get_task() - Get the task associated with a pidfd
555 *
556 * @pidfd: pidfd for which to get the task
557 * @flags: flags associated with this pidfd
558 *
559 * Return the task associated with @pidfd. The function takes a reference on
560 * the returned task. The caller is responsible for releasing that reference.
561 *
562 * Return: On success, the task_struct associated with the pidfd.
563 *	   On error, a negative errno number will be returned.
564 */
565struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags)
566{
567	unsigned int f_flags;
568	struct pid *pid;
569	struct task_struct *task;
570
571	pid = pidfd_get_pid(pidfd, &f_flags);
572	if (IS_ERR(pid))
573		return ERR_CAST(pid);
 
 
574
575	task = get_pid_task(pid, PIDTYPE_TGID);
576	put_pid(pid);
577	if (!task)
578		return ERR_PTR(-ESRCH);
579
580	*flags = f_flags;
581	return task;
582}
583
584/**
585 * pidfd_create() - Create a new pid file descriptor.
586 *
587 * @pid:   struct pid that the pidfd will reference
588 * @flags: flags to pass
589 *
590 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
591 *
592 * Note, that this function can only be called after the fd table has
593 * been unshared to avoid leaking the pidfd to the new process.
594 *
595 * This symbol should not be explicitly exported to loadable modules.
596 *
597 * Return: On success, a cloexec pidfd is returned.
598 *         On error, a negative errno number will be returned.
599 */
600static int pidfd_create(struct pid *pid, unsigned int flags)
601{
602	int pidfd;
603	struct file *pidfd_file;
604
605	pidfd = pidfd_prepare(pid, flags, &pidfd_file);
606	if (pidfd < 0)
607		return pidfd;
608
609	fd_install(pidfd, pidfd_file);
610	return pidfd;
611}
612
613/**
614 * sys_pidfd_open() - Open new pid file descriptor.
615 *
616 * @pid:   pid for which to retrieve a pidfd
617 * @flags: flags to pass
618 *
619 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
620 * the task identified by @pid. Without PIDFD_THREAD flag the target task
621 * must be a thread-group leader.
622 *
623 * Return: On success, a cloexec pidfd is returned.
624 *         On error, a negative errno number will be returned.
625 */
626SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
627{
628	int fd;
629	struct pid *p;
630
631	if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD))
632		return -EINVAL;
633
634	if (pid <= 0)
635		return -EINVAL;
636
637	p = find_get_pid(pid);
638	if (!p)
639		return -ESRCH;
640
641	fd = pidfd_create(p, flags);
642
643	put_pid(p);
644	return fd;
645}
646
647void __init pid_idr_init(void)
648{
649	/* Verify no one has done anything silly: */
650	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
651
652	/* bump default and minimum pid_max based on number of cpus */
653	pid_max = min(pid_max_max, max_t(int, pid_max,
654				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
655	pid_max_min = max_t(int, pid_max_min,
656				PIDS_PER_CPU_MIN * num_possible_cpus());
657	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
658
659	idr_init(&init_pid_ns.idr);
660
661	init_pid_ns.pid_cachep = kmem_cache_create("pid",
662			struct_size_t(struct pid, numbers, 1),
663			__alignof__(struct pid),
664			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
665			NULL);
666}
667
668static struct file *__pidfd_fget(struct task_struct *task, int fd)
669{
670	struct file *file;
671	int ret;
672
673	ret = down_read_killable(&task->signal->exec_update_lock);
674	if (ret)
675		return ERR_PTR(ret);
676
677	if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
678		file = fget_task(task, fd);
679	else
680		file = ERR_PTR(-EPERM);
681
682	up_read(&task->signal->exec_update_lock);
683
684	if (!file) {
685		/*
686		 * It is possible that the target thread is exiting; it can be
687		 * either:
688		 * 1. before exit_signals(), which gives a real fd
689		 * 2. before exit_files() takes the task_lock() gives a real fd
690		 * 3. after exit_files() releases task_lock(), ->files is NULL;
691		 *    this has PF_EXITING, since it was set in exit_signals(),
692		 *    __pidfd_fget() returns EBADF.
693		 * In case 3 we get EBADF, but that really means ESRCH, since
694		 * the task is currently exiting and has freed its files
695		 * struct, so we fix it up.
696		 */
697		if (task->flags & PF_EXITING)
698			file = ERR_PTR(-ESRCH);
699		else
700			file = ERR_PTR(-EBADF);
701	}
702
703	return file;
704}
705
706static int pidfd_getfd(struct pid *pid, int fd)
707{
708	struct task_struct *task;
709	struct file *file;
710	int ret;
711
712	task = get_pid_task(pid, PIDTYPE_PID);
713	if (!task)
714		return -ESRCH;
715
716	file = __pidfd_fget(task, fd);
717	put_task_struct(task);
718	if (IS_ERR(file))
719		return PTR_ERR(file);
720
721	ret = receive_fd(file, NULL, O_CLOEXEC);
722	fput(file);
723
724	return ret;
725}
726
727/**
728 * sys_pidfd_getfd() - Get a file descriptor from another process
729 *
730 * @pidfd:	the pidfd file descriptor of the process
731 * @fd:		the file descriptor number to get
732 * @flags:	flags on how to get the fd (reserved)
733 *
734 * This syscall gets a copy of a file descriptor from another process
735 * based on the pidfd, and file descriptor number. It requires that
736 * the calling process has the ability to ptrace the process represented
737 * by the pidfd. The process which is having its file descriptor copied
738 * is otherwise unaffected.
739 *
740 * Return: On success, a cloexec file descriptor is returned.
741 *         On error, a negative errno number will be returned.
742 */
743SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
744		unsigned int, flags)
745{
746	struct pid *pid;
747
748	/* flags is currently unused - make sure it's unset */
749	if (flags)
750		return -EINVAL;
751
752	CLASS(fd, f)(pidfd);
753	if (fd_empty(f))
754		return -EBADF;
755
756	pid = pidfd_pid(fd_file(f));
757	if (IS_ERR(pid))
758		return PTR_ERR(pid);
759
760	return pidfd_getfd(pid, fd);
 
761}