Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  linux/fs/proc/base.c
   3 *
   4 *  Copyright (C) 1991, 1992 Linus Torvalds
   5 *
   6 *  proc base directory handling functions
   7 *
   8 *  1999, Al Viro. Rewritten. Now it covers the whole per-process part.
   9 *  Instead of using magical inumbers to determine the kind of object
  10 *  we allocate and fill in-core inodes upon lookup. They don't even
  11 *  go into icache. We cache the reference to task_struct upon lookup too.
  12 *  Eventually it should become a filesystem in its own. We don't use the
  13 *  rest of procfs anymore.
  14 *
  15 *
  16 *  Changelog:
  17 *  17-Jan-2005
  18 *  Allan Bezerra
  19 *  Bruna Moreira <bruna.moreira@indt.org.br>
  20 *  Edjard Mota <edjard.mota@indt.org.br>
  21 *  Ilias Biris <ilias.biris@indt.org.br>
  22 *  Mauricio Lin <mauricio.lin@indt.org.br>
  23 *
  24 *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
  25 *
  26 *  A new process specific entry (smaps) included in /proc. It shows the
  27 *  size of rss for each memory area. The maps entry lacks information
  28 *  about physical memory size (rss) for each mapped file, i.e.,
  29 *  rss information for executables and library files.
  30 *  This additional information is useful for any tools that need to know
  31 *  about physical memory consumption for a process specific library.
  32 *
  33 *  Changelog:
  34 *  21-Feb-2005
  35 *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
  36 *  Pud inclusion in the page table walking.
  37 *
  38 *  ChangeLog:
  39 *  10-Mar-2005
  40 *  10LE Instituto Nokia de Tecnologia - INdT:
  41 *  A better way to walks through the page table as suggested by Hugh Dickins.
  42 *
  43 *  Simo Piiroinen <simo.piiroinen@nokia.com>:
  44 *  Smaps information related to shared, private, clean and dirty pages.
  45 *
  46 *  Paul Mundt <paul.mundt@nokia.com>:
  47 *  Overall revision about smaps.
  48 */
  49
  50#include <asm/uaccess.h>
  51
  52#include <linux/errno.h>
  53#include <linux/time.h>
  54#include <linux/proc_fs.h>
  55#include <linux/stat.h>
  56#include <linux/task_io_accounting_ops.h>
  57#include <linux/init.h>
  58#include <linux/capability.h>
  59#include <linux/file.h>
  60#include <linux/fdtable.h>
  61#include <linux/string.h>
  62#include <linux/seq_file.h>
  63#include <linux/namei.h>
  64#include <linux/mnt_namespace.h>
  65#include <linux/mm.h>
  66#include <linux/swap.h>
  67#include <linux/rcupdate.h>
  68#include <linux/kallsyms.h>
  69#include <linux/stacktrace.h>
  70#include <linux/resource.h>
  71#include <linux/module.h>
  72#include <linux/mount.h>
  73#include <linux/security.h>
  74#include <linux/ptrace.h>
  75#include <linux/tracehook.h>
  76#include <linux/printk.h>
  77#include <linux/cgroup.h>
  78#include <linux/cpuset.h>
  79#include <linux/audit.h>
  80#include <linux/poll.h>
  81#include <linux/nsproxy.h>
  82#include <linux/oom.h>
  83#include <linux/elf.h>
  84#include <linux/pid_namespace.h>
  85#include <linux/user_namespace.h>
  86#include <linux/fs_struct.h>
  87#include <linux/slab.h>
  88#include <linux/flex_array.h>
  89#include <linux/posix-timers.h>
  90#ifdef CONFIG_HARDWALL
  91#include <asm/hardwall.h>
  92#endif
  93#include <trace/events/oom.h>
  94#include "internal.h"
  95#include "fd.h"
  96
  97/* NOTE:
  98 *	Implementing inode permission operations in /proc is almost
  99 *	certainly an error.  Permission checks need to happen during
 100 *	each system call not at open time.  The reason is that most of
 101 *	what we wish to check for permissions in /proc varies at runtime.
 102 *
 103 *	The classic example of a problem is opening file descriptors
 104 *	in /proc for a task before it execs a suid executable.
 105 */
 106
 107struct pid_entry {
 108	const char *name;
 109	int len;
 110	umode_t mode;
 111	const struct inode_operations *iop;
 112	const struct file_operations *fop;
 113	union proc_op op;
 114};
 115
 116#define NOD(NAME, MODE, IOP, FOP, OP) {			\
 117	.name = (NAME),					\
 118	.len  = sizeof(NAME) - 1,			\
 119	.mode = MODE,					\
 120	.iop  = IOP,					\
 121	.fop  = FOP,					\
 122	.op   = OP,					\
 123}
 124
 125#define DIR(NAME, MODE, iops, fops)	\
 126	NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} )
 127#define LNK(NAME, get_link)					\
 128	NOD(NAME, (S_IFLNK|S_IRWXUGO),				\
 129		&proc_pid_link_inode_operations, NULL,		\
 130		{ .proc_get_link = get_link } )
 131#define REG(NAME, MODE, fops)				\
 132	NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
 
 
 
 
 133#define ONE(NAME, MODE, show)				\
 134	NOD(NAME, (S_IFREG|(MODE)), 			\
 135		NULL, &proc_single_file_operations,	\
 136		{ .proc_show = show } )
 137
 
 
 138/*
 139 * Count the number of hardlinks for the pid_entry table, excluding the .
 140 * and .. links.
 141 */
 142static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
 143	unsigned int n)
 144{
 145	unsigned int i;
 146	unsigned int count;
 147
 148	count = 0;
 149	for (i = 0; i < n; ++i) {
 150		if (S_ISDIR(entries[i].mode))
 151			++count;
 152	}
 153
 154	return count;
 155}
 156
 157static int get_task_root(struct task_struct *task, struct path *root)
 158{
 159	int result = -ENOENT;
 160
 161	task_lock(task);
 162	if (task->fs) {
 163		get_fs_root(task->fs, root);
 164		result = 0;
 165	}
 166	task_unlock(task);
 167	return result;
 168}
 169
 170static int proc_cwd_link(struct dentry *dentry, struct path *path)
 171{
 172	struct task_struct *task = get_proc_task(d_inode(dentry));
 173	int result = -ENOENT;
 174
 175	if (task) {
 176		task_lock(task);
 177		if (task->fs) {
 178			get_fs_pwd(task->fs, path);
 179			result = 0;
 180		}
 181		task_unlock(task);
 182		put_task_struct(task);
 183	}
 184	return result;
 185}
 186
 187static int proc_root_link(struct dentry *dentry, struct path *path)
 188{
 189	struct task_struct *task = get_proc_task(d_inode(dentry));
 190	int result = -ENOENT;
 191
 192	if (task) {
 193		result = get_task_root(task, path);
 194		put_task_struct(task);
 195	}
 196	return result;
 197}
 198
 199static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
 200				     size_t _count, loff_t *pos)
 201{
 202	struct task_struct *tsk;
 203	struct mm_struct *mm;
 204	char *page;
 205	unsigned long count = _count;
 206	unsigned long arg_start, arg_end, env_start, env_end;
 207	unsigned long len1, len2, len;
 208	unsigned long p;
 209	char c;
 210	ssize_t rv;
 211
 212	BUG_ON(*pos < 0);
 213
 214	tsk = get_proc_task(file_inode(file));
 215	if (!tsk)
 216		return -ESRCH;
 217	mm = get_task_mm(tsk);
 218	put_task_struct(tsk);
 219	if (!mm)
 220		return 0;
 221	/* Check if process spawned far enough to have cmdline. */
 222	if (!mm->env_end) {
 223		rv = 0;
 224		goto out_mmput;
 225	}
 226
 227	page = (char *)__get_free_page(GFP_TEMPORARY);
 228	if (!page) {
 229		rv = -ENOMEM;
 230		goto out_mmput;
 231	}
 232
 233	down_read(&mm->mmap_sem);
 234	arg_start = mm->arg_start;
 235	arg_end = mm->arg_end;
 236	env_start = mm->env_start;
 237	env_end = mm->env_end;
 238	up_read(&mm->mmap_sem);
 239
 240	BUG_ON(arg_start > arg_end);
 241	BUG_ON(env_start > env_end);
 242
 243	len1 = arg_end - arg_start;
 244	len2 = env_end - env_start;
 245
 246	/* Empty ARGV. */
 247	if (len1 == 0) {
 248		rv = 0;
 249		goto out_free_page;
 250	}
 251	/*
 252	 * Inherently racy -- command line shares address space
 253	 * with code and data.
 254	 */
 255	rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
 256	if (rv <= 0)
 257		goto out_free_page;
 258
 259	rv = 0;
 260
 261	if (c == '\0') {
 262		/* Command line (set of strings) occupies whole ARGV. */
 263		if (len1 <= *pos)
 264			goto out_free_page;
 265
 266		p = arg_start + *pos;
 267		len = len1 - *pos;
 268		while (count > 0 && len > 0) {
 269			unsigned int _count;
 270			int nr_read;
 271
 272			_count = min3(count, len, PAGE_SIZE);
 273			nr_read = access_remote_vm(mm, p, page, _count, 0);
 274			if (nr_read < 0)
 275				rv = nr_read;
 276			if (nr_read <= 0)
 277				goto out_free_page;
 278
 279			if (copy_to_user(buf, page, nr_read)) {
 280				rv = -EFAULT;
 281				goto out_free_page;
 282			}
 283
 284			p	+= nr_read;
 285			len	-= nr_read;
 286			buf	+= nr_read;
 287			count	-= nr_read;
 288			rv	+= nr_read;
 289		}
 290	} else {
 291		/*
 292		 * Command line (1 string) occupies ARGV and maybe
 293		 * extends into ENVP.
 294		 */
 295		if (len1 + len2 <= *pos)
 296			goto skip_argv_envp;
 297		if (len1 <= *pos)
 298			goto skip_argv;
 299
 300		p = arg_start + *pos;
 301		len = len1 - *pos;
 302		while (count > 0 && len > 0) {
 303			unsigned int _count, l;
 304			int nr_read;
 305			bool final;
 306
 307			_count = min3(count, len, PAGE_SIZE);
 308			nr_read = access_remote_vm(mm, p, page, _count, 0);
 309			if (nr_read < 0)
 310				rv = nr_read;
 311			if (nr_read <= 0)
 312				goto out_free_page;
 313
 314			/*
 315			 * Command line can be shorter than whole ARGV
 316			 * even if last "marker" byte says it is not.
 317			 */
 318			final = false;
 319			l = strnlen(page, nr_read);
 320			if (l < nr_read) {
 321				nr_read = l;
 322				final = true;
 323			}
 324
 325			if (copy_to_user(buf, page, nr_read)) {
 326				rv = -EFAULT;
 327				goto out_free_page;
 328			}
 329
 330			p	+= nr_read;
 331			len	-= nr_read;
 332			buf	+= nr_read;
 333			count	-= nr_read;
 334			rv	+= nr_read;
 335
 336			if (final)
 337				goto out_free_page;
 338		}
 339skip_argv:
 340		/*
 341		 * Command line (1 string) occupies ARGV and
 342		 * extends into ENVP.
 343		 */
 344		if (len1 <= *pos) {
 345			p = env_start + *pos - len1;
 346			len = len1 + len2 - *pos;
 
 
 347		} else {
 348			p = env_start;
 349			len = len2;
 
 
 
 350		}
 351		while (count > 0 && len > 0) {
 352			unsigned int _count, l;
 353			int nr_read;
 354			bool final;
 355
 356			_count = min3(count, len, PAGE_SIZE);
 357			nr_read = access_remote_vm(mm, p, page, _count, 0);
 358			if (nr_read < 0)
 359				rv = nr_read;
 360			if (nr_read <= 0)
 361				goto out_free_page;
 362
 363			/* Find EOS. */
 364			final = false;
 365			l = strnlen(page, nr_read);
 366			if (l < nr_read) {
 367				nr_read = l;
 368				final = true;
 369			}
 370
 371			if (copy_to_user(buf, page, nr_read)) {
 372				rv = -EFAULT;
 373				goto out_free_page;
 374			}
 375
 376			p	+= nr_read;
 377			len	-= nr_read;
 378			buf	+= nr_read;
 379			count	-= nr_read;
 380			rv	+= nr_read;
 381
 382			if (final)
 383				goto out_free_page;
 384		}
 385skip_argv_envp:
 386		;
 387	}
 388
 389out_free_page:
 390	free_page((unsigned long)page);
 391out_mmput:
 392	mmput(mm);
 393	if (rv > 0)
 394		*pos += rv;
 395	return rv;
 396}
 397
 398static const struct file_operations proc_pid_cmdline_ops = {
 399	.read	= proc_pid_cmdline_read,
 400	.llseek	= generic_file_llseek,
 401};
 402
 403static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
 404			 struct pid *pid, struct task_struct *task)
 405{
 406	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
 
 407	if (mm && !IS_ERR(mm)) {
 408		unsigned int nwords = 0;
 409		do {
 410			nwords += 2;
 411		} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
 412		seq_write(m, mm->saved_auxv, nwords * sizeof(mm->saved_auxv[0]));
 
 
 
 413		mmput(mm);
 414		return 0;
 415	} else
 416		return PTR_ERR(mm);
 417}
 418
 419
 420#ifdef CONFIG_KALLSYMS
 421/*
 422 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
 423 * Returns the resolved symbol.  If that fails, simply return the address.
 424 */
 425static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
 426			  struct pid *pid, struct task_struct *task)
 427{
 428	unsigned long wchan;
 429	char symname[KSYM_NAME_LEN];
 430
 431	wchan = get_wchan(task);
 432
 433	if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
 434			&& !lookup_symbol_name(wchan, symname))
 435		seq_printf(m, "%s", symname);
 
 
 436	else
 437		seq_putc(m, '0');
 438
 439	return 0;
 440}
 441#endif /* CONFIG_KALLSYMS */
 442
 443static int lock_trace(struct task_struct *task)
 444{
 445	int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
 446	if (err)
 447		return err;
 448	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
 449		mutex_unlock(&task->signal->cred_guard_mutex);
 450		return -EPERM;
 451	}
 452	return 0;
 453}
 454
 455static void unlock_trace(struct task_struct *task)
 456{
 457	mutex_unlock(&task->signal->cred_guard_mutex);
 458}
 459
 460#ifdef CONFIG_STACKTRACE
 461
 462#define MAX_STACK_TRACE_DEPTH	64
 463
 464static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
 465			  struct pid *pid, struct task_struct *task)
 466{
 467	struct stack_trace trace;
 468	unsigned long *entries;
 469	int err;
 470	int i;
 471
 472	entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
 473	if (!entries)
 474		return -ENOMEM;
 475
 476	trace.nr_entries	= 0;
 477	trace.max_entries	= MAX_STACK_TRACE_DEPTH;
 478	trace.entries		= entries;
 479	trace.skip		= 0;
 480
 481	err = lock_trace(task);
 482	if (!err) {
 483		save_stack_trace_tsk(task, &trace);
 484
 485		for (i = 0; i < trace.nr_entries; i++) {
 486			seq_printf(m, "[<%pK>] %pS\n",
 487				   (void *)entries[i], (void *)entries[i]);
 488		}
 489		unlock_trace(task);
 490	}
 491	kfree(entries);
 492
 493	return err;
 494}
 495#endif
 496
 497#ifdef CONFIG_SCHED_INFO
 498/*
 499 * Provides /proc/PID/schedstat
 500 */
 501static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
 502			      struct pid *pid, struct task_struct *task)
 503{
 504	if (unlikely(!sched_info_on()))
 505		seq_printf(m, "0 0 0\n");
 506	else
 507		seq_printf(m, "%llu %llu %lu\n",
 508		   (unsigned long long)task->se.sum_exec_runtime,
 509		   (unsigned long long)task->sched_info.run_delay,
 510		   task->sched_info.pcount);
 511
 512	return 0;
 513}
 514#endif
 515
 516#ifdef CONFIG_LATENCYTOP
 517static int lstats_show_proc(struct seq_file *m, void *v)
 518{
 519	int i;
 520	struct inode *inode = m->private;
 521	struct task_struct *task = get_proc_task(inode);
 522
 523	if (!task)
 524		return -ESRCH;
 525	seq_puts(m, "Latency Top version : v0.1\n");
 526	for (i = 0; i < 32; i++) {
 527		struct latency_record *lr = &task->latency_record[i];
 528		if (lr->backtrace[0]) {
 529			int q;
 530			seq_printf(m, "%i %li %li",
 531				   lr->count, lr->time, lr->max);
 532			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 533				unsigned long bt = lr->backtrace[q];
 534				if (!bt)
 535					break;
 536				if (bt == ULONG_MAX)
 537					break;
 538				seq_printf(m, " %ps", (void *)bt);
 539			}
 540			seq_putc(m, '\n');
 541		}
 542
 543	}
 544	put_task_struct(task);
 545	return 0;
 546}
 547
 548static int lstats_open(struct inode *inode, struct file *file)
 549{
 550	return single_open(file, lstats_show_proc, inode);
 551}
 552
 553static ssize_t lstats_write(struct file *file, const char __user *buf,
 554			    size_t count, loff_t *offs)
 555{
 556	struct task_struct *task = get_proc_task(file_inode(file));
 557
 558	if (!task)
 559		return -ESRCH;
 560	clear_all_latency_tracing(task);
 561	put_task_struct(task);
 562
 563	return count;
 564}
 565
 566static const struct file_operations proc_lstats_operations = {
 567	.open		= lstats_open,
 568	.read		= seq_read,
 569	.write		= lstats_write,
 570	.llseek		= seq_lseek,
 571	.release	= single_release,
 572};
 573
 574#endif
 575
 576static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
 577			  struct pid *pid, struct task_struct *task)
 578{
 579	unsigned long totalpages = totalram_pages + total_swap_pages;
 580	unsigned long points = 0;
 581
 582	read_lock(&tasklist_lock);
 583	if (pid_alive(task))
 584		points = oom_badness(task, NULL, NULL, totalpages) *
 585						1000 / totalpages;
 586	read_unlock(&tasklist_lock);
 587	seq_printf(m, "%lu\n", points);
 588
 589	return 0;
 590}
 591
 592struct limit_names {
 593	const char *name;
 594	const char *unit;
 595};
 596
 597static const struct limit_names lnames[RLIM_NLIMITS] = {
 598	[RLIMIT_CPU] = {"Max cpu time", "seconds"},
 599	[RLIMIT_FSIZE] = {"Max file size", "bytes"},
 600	[RLIMIT_DATA] = {"Max data size", "bytes"},
 601	[RLIMIT_STACK] = {"Max stack size", "bytes"},
 602	[RLIMIT_CORE] = {"Max core file size", "bytes"},
 603	[RLIMIT_RSS] = {"Max resident set", "bytes"},
 604	[RLIMIT_NPROC] = {"Max processes", "processes"},
 605	[RLIMIT_NOFILE] = {"Max open files", "files"},
 606	[RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"},
 607	[RLIMIT_AS] = {"Max address space", "bytes"},
 608	[RLIMIT_LOCKS] = {"Max file locks", "locks"},
 609	[RLIMIT_SIGPENDING] = {"Max pending signals", "signals"},
 610	[RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"},
 611	[RLIMIT_NICE] = {"Max nice priority", NULL},
 612	[RLIMIT_RTPRIO] = {"Max realtime priority", NULL},
 613	[RLIMIT_RTTIME] = {"Max realtime timeout", "us"},
 614};
 615
 616/* Display limits for a process */
 617static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
 618			   struct pid *pid, struct task_struct *task)
 619{
 620	unsigned int i;
 
 621	unsigned long flags;
 
 622
 623	struct rlimit rlim[RLIM_NLIMITS];
 624
 625	if (!lock_task_sighand(task, &flags))
 626		return 0;
 627	memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
 628	unlock_task_sighand(task, &flags);
 629
 630	/*
 631	 * print the file header
 632	 */
 633       seq_printf(m, "%-25s %-20s %-20s %-10s\n",
 634		  "Limit", "Soft Limit", "Hard Limit", "Units");
 635
 636	for (i = 0; i < RLIM_NLIMITS; i++) {
 637		if (rlim[i].rlim_cur == RLIM_INFINITY)
 638			seq_printf(m, "%-25s %-20s ",
 639				   lnames[i].name, "unlimited");
 640		else
 641			seq_printf(m, "%-25s %-20lu ",
 642				   lnames[i].name, rlim[i].rlim_cur);
 643
 644		if (rlim[i].rlim_max == RLIM_INFINITY)
 645			seq_printf(m, "%-20s ", "unlimited");
 646		else
 647			seq_printf(m, "%-20lu ", rlim[i].rlim_max);
 
 648
 649		if (lnames[i].unit)
 650			seq_printf(m, "%-10s\n", lnames[i].unit);
 
 651		else
 652			seq_putc(m, '\n');
 653	}
 654
 655	return 0;
 656}
 657
 658#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 659static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
 660			    struct pid *pid, struct task_struct *task)
 661{
 662	long nr;
 663	unsigned long args[6], sp, pc;
 664	int res;
 665
 666	res = lock_trace(task);
 667	if (res)
 668		return res;
 669
 670	if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
 671		seq_puts(m, "running\n");
 672	else if (nr < 0)
 673		seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
 674	else
 675		seq_printf(m,
 676		       "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
 677		       nr,
 678		       args[0], args[1], args[2], args[3], args[4], args[5],
 679		       sp, pc);
 680	unlock_trace(task);
 681
 682	return 0;
 683}
 684#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
 685
 686/************************************************************************/
 687/*                       Here the fs part begins                        */
 688/************************************************************************/
 689
 690/* permission checks */
 691static int proc_fd_access_allowed(struct inode *inode)
 692{
 693	struct task_struct *task;
 694	int allowed = 0;
 695	/* Allow access to a task's file descriptors if it is us or we
 696	 * may use ptrace attach to the process and find out that
 697	 * information.
 698	 */
 699	task = get_proc_task(inode);
 700	if (task) {
 701		allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
 702		put_task_struct(task);
 703	}
 704	return allowed;
 705}
 706
 707int proc_setattr(struct dentry *dentry, struct iattr *attr)
 708{
 709	int error;
 710	struct inode *inode = d_inode(dentry);
 711
 712	if (attr->ia_valid & ATTR_MODE)
 713		return -EPERM;
 714
 715	error = inode_change_ok(inode, attr);
 716	if (error)
 717		return error;
 718
 
 
 
 
 
 
 
 719	setattr_copy(inode, attr);
 720	mark_inode_dirty(inode);
 721	return 0;
 722}
 723
 724/*
 725 * May current process learn task's sched/cmdline info (for hide_pid_min=1)
 726 * or euid/egid (for hide_pid_min=2)?
 727 */
 728static bool has_pid_permissions(struct pid_namespace *pid,
 729				 struct task_struct *task,
 730				 int hide_pid_min)
 731{
 732	if (pid->hide_pid < hide_pid_min)
 733		return true;
 734	if (in_group_p(pid->pid_gid))
 735		return true;
 736	return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
 737}
 738
 739
 740static int proc_pid_permission(struct inode *inode, int mask)
 741{
 742	struct pid_namespace *pid = inode->i_sb->s_fs_info;
 743	struct task_struct *task;
 744	bool has_perms;
 745
 746	task = get_proc_task(inode);
 747	if (!task)
 748		return -ESRCH;
 749	has_perms = has_pid_permissions(pid, task, 1);
 750	put_task_struct(task);
 751
 752	if (!has_perms) {
 753		if (pid->hide_pid == 2) {
 754			/*
 755			 * Let's make getdents(), stat(), and open()
 756			 * consistent with each other.  If a process
 757			 * may not stat() a file, it shouldn't be seen
 758			 * in procfs at all.
 759			 */
 760			return -ENOENT;
 761		}
 762
 763		return -EPERM;
 764	}
 765	return generic_permission(inode, mask);
 766}
 767
 768
 769
 770static const struct inode_operations proc_def_inode_operations = {
 771	.setattr	= proc_setattr,
 772};
 773
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774static int proc_single_show(struct seq_file *m, void *v)
 775{
 776	struct inode *inode = m->private;
 777	struct pid_namespace *ns;
 778	struct pid *pid;
 779	struct task_struct *task;
 780	int ret;
 781
 782	ns = inode->i_sb->s_fs_info;
 783	pid = proc_pid(inode);
 784	task = get_pid_task(pid, PIDTYPE_PID);
 785	if (!task)
 786		return -ESRCH;
 787
 788	ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
 789
 790	put_task_struct(task);
 791	return ret;
 792}
 793
 794static int proc_single_open(struct inode *inode, struct file *filp)
 795{
 796	return single_open(filp, proc_single_show, inode);
 797}
 798
 799static const struct file_operations proc_single_file_operations = {
 800	.open		= proc_single_open,
 801	.read		= seq_read,
 802	.llseek		= seq_lseek,
 803	.release	= single_release,
 804};
 805
 806
 807struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
 808{
 809	struct task_struct *task = get_proc_task(inode);
 810	struct mm_struct *mm = ERR_PTR(-ESRCH);
 811
 812	if (task) {
 813		mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
 814		put_task_struct(task);
 815
 816		if (!IS_ERR_OR_NULL(mm)) {
 817			/* ensure this mm_struct can't be freed */
 818			atomic_inc(&mm->mm_count);
 819			/* but do not pin its memory */
 820			mmput(mm);
 821		}
 822	}
 823
 824	return mm;
 825}
 826
 827static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
 828{
 829	struct mm_struct *mm = proc_mem_open(inode, mode);
 830
 831	if (IS_ERR(mm))
 832		return PTR_ERR(mm);
 833
 
 
 
 
 
 
 
 
 
 834	file->private_data = mm;
 
 835	return 0;
 836}
 837
 838static int mem_open(struct inode *inode, struct file *file)
 839{
 840	int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
 841
 842	/* OK to pass negative loff_t, we can catch out-of-range */
 843	file->f_mode |= FMODE_UNSIGNED_OFFSET;
 844
 845	return ret;
 846}
 847
 848static ssize_t mem_rw(struct file *file, char __user *buf,
 849			size_t count, loff_t *ppos, int write)
 850{
 851	struct mm_struct *mm = file->private_data;
 852	unsigned long addr = *ppos;
 853	ssize_t copied;
 854	char *page;
 855
 856	if (!mm)
 857		return 0;
 858
 859	page = (char *)__get_free_page(GFP_TEMPORARY);
 860	if (!page)
 861		return -ENOMEM;
 862
 863	copied = 0;
 864	if (!atomic_inc_not_zero(&mm->mm_users))
 865		goto free;
 866
 867	while (count > 0) {
 868		int this_len = min_t(int, count, PAGE_SIZE);
 869
 870		if (write && copy_from_user(page, buf, this_len)) {
 871			copied = -EFAULT;
 872			break;
 873		}
 874
 875		this_len = access_remote_vm(mm, addr, page, this_len, write);
 876		if (!this_len) {
 877			if (!copied)
 878				copied = -EIO;
 879			break;
 880		}
 881
 882		if (!write && copy_to_user(buf, page, this_len)) {
 883			copied = -EFAULT;
 884			break;
 885		}
 886
 887		buf += this_len;
 888		addr += this_len;
 889		copied += this_len;
 890		count -= this_len;
 891	}
 892	*ppos = addr;
 893
 894	mmput(mm);
 895free:
 896	free_page((unsigned long) page);
 897	return copied;
 898}
 899
 900static ssize_t mem_read(struct file *file, char __user *buf,
 901			size_t count, loff_t *ppos)
 902{
 903	return mem_rw(file, buf, count, ppos, 0);
 904}
 905
 906static ssize_t mem_write(struct file *file, const char __user *buf,
 907			 size_t count, loff_t *ppos)
 908{
 909	return mem_rw(file, (char __user*)buf, count, ppos, 1);
 910}
 911
 912loff_t mem_lseek(struct file *file, loff_t offset, int orig)
 913{
 914	switch (orig) {
 915	case 0:
 916		file->f_pos = offset;
 917		break;
 918	case 1:
 919		file->f_pos += offset;
 920		break;
 921	default:
 922		return -EINVAL;
 923	}
 924	force_successful_syscall_return();
 925	return file->f_pos;
 926}
 927
 928static int mem_release(struct inode *inode, struct file *file)
 929{
 930	struct mm_struct *mm = file->private_data;
 931	if (mm)
 932		mmdrop(mm);
 933	return 0;
 934}
 935
 936static const struct file_operations proc_mem_operations = {
 937	.llseek		= mem_lseek,
 938	.read		= mem_read,
 939	.write		= mem_write,
 940	.open		= mem_open,
 941	.release	= mem_release,
 942};
 943
 944static int environ_open(struct inode *inode, struct file *file)
 945{
 946	return __mem_open(inode, file, PTRACE_MODE_READ);
 947}
 948
 949static ssize_t environ_read(struct file *file, char __user *buf,
 950			size_t count, loff_t *ppos)
 951{
 952	char *page;
 953	unsigned long src = *ppos;
 954	int ret = 0;
 955	struct mm_struct *mm = file->private_data;
 956	unsigned long env_start, env_end;
 957
 958	/* Ensure the process spawned far enough to have an environment. */
 959	if (!mm || !mm->env_end)
 960		return 0;
 961
 962	page = (char *)__get_free_page(GFP_TEMPORARY);
 963	if (!page)
 964		return -ENOMEM;
 965
 966	ret = 0;
 967	if (!atomic_inc_not_zero(&mm->mm_users))
 968		goto free;
 969
 970	down_read(&mm->mmap_sem);
 971	env_start = mm->env_start;
 972	env_end = mm->env_end;
 973	up_read(&mm->mmap_sem);
 974
 975	while (count > 0) {
 976		size_t this_len, max_len;
 977		int retval;
 978
 979		if (src >= (env_end - env_start))
 980			break;
 981
 982		this_len = env_end - (env_start + src);
 
 983
 984		max_len = min_t(size_t, PAGE_SIZE, count);
 985		this_len = min(max_len, this_len);
 986
 987		retval = access_remote_vm(mm, (env_start + src),
 988			page, this_len, 0);
 989
 990		if (retval <= 0) {
 991			ret = retval;
 992			break;
 993		}
 994
 995		if (copy_to_user(buf, page, retval)) {
 996			ret = -EFAULT;
 997			break;
 998		}
 999
1000		ret += retval;
1001		src += retval;
1002		buf += retval;
1003		count -= retval;
1004	}
1005	*ppos = src;
1006	mmput(mm);
1007
1008free:
1009	free_page((unsigned long) page);
1010	return ret;
1011}
1012
1013static const struct file_operations proc_environ_operations = {
1014	.open		= environ_open,
1015	.read		= environ_read,
1016	.llseek		= generic_file_llseek,
1017	.release	= mem_release,
1018};
1019
1020static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
1021			    loff_t *ppos)
1022{
1023	struct task_struct *task = get_proc_task(file_inode(file));
1024	char buffer[PROC_NUMBUF];
1025	int oom_adj = OOM_ADJUST_MIN;
1026	size_t len;
 
1027	unsigned long flags;
1028
1029	if (!task)
1030		return -ESRCH;
 
1031	if (lock_task_sighand(task, &flags)) {
1032		if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
1033			oom_adj = OOM_ADJUST_MAX;
1034		else
1035			oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
1036				  OOM_SCORE_ADJ_MAX;
1037		unlock_task_sighand(task, &flags);
1038	}
 
1039	put_task_struct(task);
1040	len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
 
 
1041	return simple_read_from_buffer(buf, count, ppos, buffer, len);
1042}
1043
1044/*
1045 * /proc/pid/oom_adj exists solely for backwards compatibility with previous
1046 * kernels.  The effective policy is defined by oom_score_adj, which has a
1047 * different scale: oom_adj grew exponentially and oom_score_adj grows linearly.
1048 * Values written to oom_adj are simply mapped linearly to oom_score_adj.
1049 * Processes that become oom disabled via oom_adj will still be oom disabled
1050 * with this implementation.
1051 *
1052 * oom_adj cannot be removed since existing userspace binaries use it.
1053 */
1054static ssize_t oom_adj_write(struct file *file, const char __user *buf,
1055			     size_t count, loff_t *ppos)
1056{
1057	struct task_struct *task;
1058	char buffer[PROC_NUMBUF];
1059	int oom_adj;
1060	unsigned long flags;
1061	int err;
1062
1063	memset(buffer, 0, sizeof(buffer));
1064	if (count > sizeof(buffer) - 1)
1065		count = sizeof(buffer) - 1;
1066	if (copy_from_user(buffer, buf, count)) {
1067		err = -EFAULT;
1068		goto out;
1069	}
1070
1071	err = kstrtoint(strstrip(buffer), 0, &oom_adj);
1072	if (err)
1073		goto out;
1074	if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
1075	     oom_adj != OOM_DISABLE) {
1076		err = -EINVAL;
1077		goto out;
1078	}
1079
1080	task = get_proc_task(file_inode(file));
1081	if (!task) {
1082		err = -ESRCH;
1083		goto out;
1084	}
1085
1086	task_lock(task);
1087	if (!task->mm) {
1088		err = -EINVAL;
1089		goto err_task_lock;
1090	}
1091
1092	if (!lock_task_sighand(task, &flags)) {
1093		err = -ESRCH;
1094		goto err_task_lock;
1095	}
1096
1097	/*
1098	 * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
1099	 * value is always attainable.
1100	 */
1101	if (oom_adj == OOM_ADJUST_MAX)
1102		oom_adj = OOM_SCORE_ADJ_MAX;
1103	else
1104		oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
1105
1106	if (oom_adj < task->signal->oom_score_adj &&
1107	    !capable(CAP_SYS_RESOURCE)) {
1108		err = -EACCES;
1109		goto err_sighand;
1110	}
1111
1112	/*
1113	 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
1114	 * /proc/pid/oom_score_adj instead.
1115	 */
1116	pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
1117		  current->comm, task_pid_nr(current), task_pid_nr(task),
1118		  task_pid_nr(task));
1119
1120	task->signal->oom_score_adj = oom_adj;
 
 
 
 
 
 
 
 
1121	trace_oom_score_adj_update(task);
1122err_sighand:
1123	unlock_task_sighand(task, &flags);
1124err_task_lock:
1125	task_unlock(task);
1126	put_task_struct(task);
1127out:
1128	return err < 0 ? err : count;
1129}
1130
1131static const struct file_operations proc_oom_adj_operations = {
1132	.read		= oom_adj_read,
1133	.write		= oom_adj_write,
1134	.llseek		= generic_file_llseek,
1135};
1136
1137static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
1138					size_t count, loff_t *ppos)
1139{
1140	struct task_struct *task = get_proc_task(file_inode(file));
1141	char buffer[PROC_NUMBUF];
1142	short oom_score_adj = OOM_SCORE_ADJ_MIN;
1143	unsigned long flags;
1144	size_t len;
1145
1146	if (!task)
1147		return -ESRCH;
1148	if (lock_task_sighand(task, &flags)) {
1149		oom_score_adj = task->signal->oom_score_adj;
1150		unlock_task_sighand(task, &flags);
1151	}
1152	put_task_struct(task);
1153	len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj);
1154	return simple_read_from_buffer(buf, count, ppos, buffer, len);
1155}
1156
1157static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
1158					size_t count, loff_t *ppos)
1159{
1160	struct task_struct *task;
1161	char buffer[PROC_NUMBUF];
1162	unsigned long flags;
1163	int oom_score_adj;
1164	int err;
1165
1166	memset(buffer, 0, sizeof(buffer));
1167	if (count > sizeof(buffer) - 1)
1168		count = sizeof(buffer) - 1;
1169	if (copy_from_user(buffer, buf, count)) {
1170		err = -EFAULT;
1171		goto out;
1172	}
1173
1174	err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
1175	if (err)
1176		goto out;
1177	if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
1178			oom_score_adj > OOM_SCORE_ADJ_MAX) {
1179		err = -EINVAL;
1180		goto out;
1181	}
1182
1183	task = get_proc_task(file_inode(file));
1184	if (!task) {
1185		err = -ESRCH;
1186		goto out;
1187	}
1188
1189	task_lock(task);
1190	if (!task->mm) {
1191		err = -EINVAL;
1192		goto err_task_lock;
1193	}
1194
1195	if (!lock_task_sighand(task, &flags)) {
1196		err = -ESRCH;
1197		goto err_task_lock;
1198	}
1199
1200	if ((short)oom_score_adj < task->signal->oom_score_adj_min &&
1201			!capable(CAP_SYS_RESOURCE)) {
1202		err = -EACCES;
1203		goto err_sighand;
1204	}
1205
1206	task->signal->oom_score_adj = (short)oom_score_adj;
1207	if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
1208		task->signal->oom_score_adj_min = (short)oom_score_adj;
1209	trace_oom_score_adj_update(task);
1210
 
 
 
 
 
 
 
 
1211err_sighand:
1212	unlock_task_sighand(task, &flags);
1213err_task_lock:
1214	task_unlock(task);
1215	put_task_struct(task);
1216out:
1217	return err < 0 ? err : count;
1218}
1219
1220static const struct file_operations proc_oom_score_adj_operations = {
1221	.read		= oom_score_adj_read,
1222	.write		= oom_score_adj_write,
1223	.llseek		= default_llseek,
1224};
1225
1226#ifdef CONFIG_AUDITSYSCALL
1227#define TMPBUFLEN 21
1228static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
1229				  size_t count, loff_t *ppos)
1230{
1231	struct inode * inode = file_inode(file);
1232	struct task_struct *task = get_proc_task(inode);
1233	ssize_t length;
1234	char tmpbuf[TMPBUFLEN];
1235
1236	if (!task)
1237		return -ESRCH;
1238	length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1239			   from_kuid(file->f_cred->user_ns,
1240				     audit_get_loginuid(task)));
1241	put_task_struct(task);
1242	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1243}
1244
1245static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1246				   size_t count, loff_t *ppos)
1247{
1248	struct inode * inode = file_inode(file);
 
 
1249	uid_t loginuid;
1250	kuid_t kloginuid;
1251	int rv;
1252
1253	rcu_read_lock();
1254	if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
1255		rcu_read_unlock();
1256		return -EPERM;
1257	}
1258	rcu_read_unlock();
1259
 
 
 
1260	if (*ppos != 0) {
1261		/* No partial writes. */
1262		return -EINVAL;
1263	}
 
 
 
 
 
 
1264
1265	rv = kstrtou32_from_user(buf, count, 10, &loginuid);
1266	if (rv < 0)
1267		return rv;
1268
1269	/* is userspace tring to explicitly UNSET the loginuid? */
1270	if (loginuid == AUDIT_UID_UNSET) {
1271		kloginuid = INVALID_UID;
1272	} else {
1273		kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
1274		if (!uid_valid(kloginuid))
1275			return -EINVAL;
1276	}
1277
1278	rv = audit_set_loginuid(kloginuid);
1279	if (rv < 0)
1280		return rv;
1281	return count;
1282}
1283
1284static const struct file_operations proc_loginuid_operations = {
1285	.read		= proc_loginuid_read,
1286	.write		= proc_loginuid_write,
1287	.llseek		= generic_file_llseek,
1288};
1289
1290static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
1291				  size_t count, loff_t *ppos)
1292{
1293	struct inode * inode = file_inode(file);
1294	struct task_struct *task = get_proc_task(inode);
1295	ssize_t length;
1296	char tmpbuf[TMPBUFLEN];
1297
1298	if (!task)
1299		return -ESRCH;
1300	length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1301				audit_get_sessionid(task));
1302	put_task_struct(task);
1303	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1304}
1305
1306static const struct file_operations proc_sessionid_operations = {
1307	.read		= proc_sessionid_read,
1308	.llseek		= generic_file_llseek,
1309};
1310#endif
1311
1312#ifdef CONFIG_FAULT_INJECTION
1313static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
1314				      size_t count, loff_t *ppos)
1315{
1316	struct task_struct *task = get_proc_task(file_inode(file));
1317	char buffer[PROC_NUMBUF];
1318	size_t len;
1319	int make_it_fail;
1320
1321	if (!task)
1322		return -ESRCH;
1323	make_it_fail = task->make_it_fail;
1324	put_task_struct(task);
1325
1326	len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
1327
1328	return simple_read_from_buffer(buf, count, ppos, buffer, len);
1329}
1330
1331static ssize_t proc_fault_inject_write(struct file * file,
1332			const char __user * buf, size_t count, loff_t *ppos)
1333{
1334	struct task_struct *task;
1335	char buffer[PROC_NUMBUF];
1336	int make_it_fail;
1337	int rv;
1338
1339	if (!capable(CAP_SYS_RESOURCE))
1340		return -EPERM;
1341	memset(buffer, 0, sizeof(buffer));
1342	if (count > sizeof(buffer) - 1)
1343		count = sizeof(buffer) - 1;
1344	if (copy_from_user(buffer, buf, count))
1345		return -EFAULT;
1346	rv = kstrtoint(strstrip(buffer), 0, &make_it_fail);
1347	if (rv < 0)
1348		return rv;
1349	if (make_it_fail < 0 || make_it_fail > 1)
1350		return -EINVAL;
1351
1352	task = get_proc_task(file_inode(file));
1353	if (!task)
1354		return -ESRCH;
1355	task->make_it_fail = make_it_fail;
1356	put_task_struct(task);
1357
1358	return count;
1359}
1360
1361static const struct file_operations proc_fault_inject_operations = {
1362	.read		= proc_fault_inject_read,
1363	.write		= proc_fault_inject_write,
1364	.llseek		= generic_file_llseek,
1365};
1366#endif
1367
1368
1369#ifdef CONFIG_SCHED_DEBUG
1370/*
1371 * Print out various scheduling related per-task fields:
1372 */
1373static int sched_show(struct seq_file *m, void *v)
1374{
1375	struct inode *inode = m->private;
1376	struct task_struct *p;
1377
1378	p = get_proc_task(inode);
1379	if (!p)
1380		return -ESRCH;
1381	proc_sched_show_task(p, m);
1382
1383	put_task_struct(p);
1384
1385	return 0;
1386}
1387
1388static ssize_t
1389sched_write(struct file *file, const char __user *buf,
1390	    size_t count, loff_t *offset)
1391{
1392	struct inode *inode = file_inode(file);
1393	struct task_struct *p;
1394
1395	p = get_proc_task(inode);
1396	if (!p)
1397		return -ESRCH;
1398	proc_sched_set_task(p);
1399
1400	put_task_struct(p);
1401
1402	return count;
1403}
1404
1405static int sched_open(struct inode *inode, struct file *filp)
1406{
1407	return single_open(filp, sched_show, inode);
1408}
1409
1410static const struct file_operations proc_pid_sched_operations = {
1411	.open		= sched_open,
1412	.read		= seq_read,
1413	.write		= sched_write,
1414	.llseek		= seq_lseek,
1415	.release	= single_release,
1416};
1417
1418#endif
1419
1420#ifdef CONFIG_SCHED_AUTOGROUP
1421/*
1422 * Print out autogroup related information:
1423 */
1424static int sched_autogroup_show(struct seq_file *m, void *v)
1425{
1426	struct inode *inode = m->private;
1427	struct task_struct *p;
1428
1429	p = get_proc_task(inode);
1430	if (!p)
1431		return -ESRCH;
1432	proc_sched_autogroup_show_task(p, m);
1433
1434	put_task_struct(p);
1435
1436	return 0;
1437}
1438
1439static ssize_t
1440sched_autogroup_write(struct file *file, const char __user *buf,
1441	    size_t count, loff_t *offset)
1442{
1443	struct inode *inode = file_inode(file);
1444	struct task_struct *p;
1445	char buffer[PROC_NUMBUF];
1446	int nice;
1447	int err;
1448
1449	memset(buffer, 0, sizeof(buffer));
1450	if (count > sizeof(buffer) - 1)
1451		count = sizeof(buffer) - 1;
1452	if (copy_from_user(buffer, buf, count))
1453		return -EFAULT;
1454
1455	err = kstrtoint(strstrip(buffer), 0, &nice);
1456	if (err < 0)
1457		return err;
1458
1459	p = get_proc_task(inode);
1460	if (!p)
1461		return -ESRCH;
1462
1463	err = proc_sched_autogroup_set_nice(p, nice);
1464	if (err)
1465		count = err;
1466
1467	put_task_struct(p);
1468
1469	return count;
1470}
1471
1472static int sched_autogroup_open(struct inode *inode, struct file *filp)
1473{
1474	int ret;
1475
1476	ret = single_open(filp, sched_autogroup_show, NULL);
1477	if (!ret) {
1478		struct seq_file *m = filp->private_data;
1479
1480		m->private = inode;
1481	}
1482	return ret;
1483}
1484
1485static const struct file_operations proc_pid_sched_autogroup_operations = {
1486	.open		= sched_autogroup_open,
1487	.read		= seq_read,
1488	.write		= sched_autogroup_write,
1489	.llseek		= seq_lseek,
1490	.release	= single_release,
1491};
1492
1493#endif /* CONFIG_SCHED_AUTOGROUP */
1494
1495static ssize_t comm_write(struct file *file, const char __user *buf,
1496				size_t count, loff_t *offset)
1497{
1498	struct inode *inode = file_inode(file);
1499	struct task_struct *p;
1500	char buffer[TASK_COMM_LEN];
1501	const size_t maxlen = sizeof(buffer) - 1;
1502
1503	memset(buffer, 0, sizeof(buffer));
1504	if (copy_from_user(buffer, buf, count > maxlen ? maxlen : count))
 
 
1505		return -EFAULT;
1506
1507	p = get_proc_task(inode);
1508	if (!p)
1509		return -ESRCH;
1510
1511	if (same_thread_group(current, p))
1512		set_task_comm(p, buffer);
1513	else
1514		count = -EINVAL;
1515
1516	put_task_struct(p);
1517
1518	return count;
1519}
1520
1521static int comm_show(struct seq_file *m, void *v)
1522{
1523	struct inode *inode = m->private;
1524	struct task_struct *p;
1525
1526	p = get_proc_task(inode);
1527	if (!p)
1528		return -ESRCH;
1529
1530	task_lock(p);
1531	seq_printf(m, "%s\n", p->comm);
1532	task_unlock(p);
1533
1534	put_task_struct(p);
1535
1536	return 0;
1537}
1538
1539static int comm_open(struct inode *inode, struct file *filp)
1540{
1541	return single_open(filp, comm_show, inode);
1542}
1543
1544static const struct file_operations proc_pid_set_comm_operations = {
1545	.open		= comm_open,
1546	.read		= seq_read,
1547	.write		= comm_write,
1548	.llseek		= seq_lseek,
1549	.release	= single_release,
1550};
1551
1552static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
1553{
1554	struct task_struct *task;
1555	struct mm_struct *mm;
1556	struct file *exe_file;
1557
1558	task = get_proc_task(d_inode(dentry));
1559	if (!task)
1560		return -ENOENT;
1561	mm = get_task_mm(task);
1562	put_task_struct(task);
1563	if (!mm)
1564		return -ENOENT;
1565	exe_file = get_mm_exe_file(mm);
1566	mmput(mm);
1567	if (exe_file) {
1568		*exe_path = exe_file->f_path;
1569		path_get(&exe_file->f_path);
1570		fput(exe_file);
1571		return 0;
1572	} else
1573		return -ENOENT;
1574}
1575
1576static const char *proc_pid_get_link(struct dentry *dentry,
1577				     struct inode *inode,
1578				     struct delayed_call *done)
1579{
1580	struct path path;
1581	int error = -EACCES;
1582
1583	if (!dentry)
1584		return ERR_PTR(-ECHILD);
1585
1586	/* Are we allowed to snoop on the tasks file descriptors? */
1587	if (!proc_fd_access_allowed(inode))
1588		goto out;
1589
1590	error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1591	if (error)
1592		goto out;
1593
1594	nd_jump_link(&path);
1595	return NULL;
1596out:
1597	return ERR_PTR(error);
1598}
1599
1600static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1601{
1602	char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
1603	char *pathname;
1604	int len;
1605
1606	if (!tmp)
1607		return -ENOMEM;
1608
1609	pathname = d_path(path, tmp, PAGE_SIZE);
1610	len = PTR_ERR(pathname);
1611	if (IS_ERR(pathname))
1612		goto out;
1613	len = tmp + PAGE_SIZE - 1 - pathname;
1614
1615	if (len > buflen)
1616		len = buflen;
1617	if (copy_to_user(buffer, pathname, len))
1618		len = -EFAULT;
1619 out:
1620	free_page((unsigned long)tmp);
1621	return len;
1622}
1623
1624static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
1625{
1626	int error = -EACCES;
1627	struct inode *inode = d_inode(dentry);
1628	struct path path;
1629
1630	/* Are we allowed to snoop on the tasks file descriptors? */
1631	if (!proc_fd_access_allowed(inode))
1632		goto out;
1633
1634	error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1635	if (error)
1636		goto out;
1637
1638	error = do_proc_readlink(&path, buffer, buflen);
1639	path_put(&path);
1640out:
1641	return error;
1642}
1643
1644const struct inode_operations proc_pid_link_inode_operations = {
1645	.readlink	= proc_pid_readlink,
1646	.get_link	= proc_pid_get_link,
1647	.setattr	= proc_setattr,
1648};
1649
1650
1651/* building an inode */
1652
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1653struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1654{
1655	struct inode * inode;
1656	struct proc_inode *ei;
1657	const struct cred *cred;
1658
1659	/* We need a new inode */
1660
1661	inode = new_inode(sb);
1662	if (!inode)
1663		goto out;
1664
1665	/* Common stuff */
1666	ei = PROC_I(inode);
1667	inode->i_ino = get_next_ino();
1668	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1669	inode->i_op = &proc_def_inode_operations;
1670
1671	/*
1672	 * grab the reference to task.
1673	 */
1674	ei->pid = get_task_pid(task, PIDTYPE_PID);
1675	if (!ei->pid)
1676		goto out_unlock;
1677
1678	if (task_dumpable(task)) {
1679		rcu_read_lock();
1680		cred = __task_cred(task);
1681		inode->i_uid = cred->euid;
1682		inode->i_gid = cred->egid;
1683		rcu_read_unlock();
1684	}
1685	security_task_to_inode(task, inode);
1686
1687out:
1688	return inode;
1689
1690out_unlock:
1691	iput(inode);
1692	return NULL;
1693}
1694
1695int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1696{
1697	struct inode *inode = d_inode(dentry);
1698	struct task_struct *task;
1699	const struct cred *cred;
1700	struct pid_namespace *pid = dentry->d_sb->s_fs_info;
1701
1702	generic_fillattr(inode, stat);
1703
1704	rcu_read_lock();
1705	stat->uid = GLOBAL_ROOT_UID;
1706	stat->gid = GLOBAL_ROOT_GID;
1707	task = pid_task(proc_pid(inode), PIDTYPE_PID);
1708	if (task) {
1709		if (!has_pid_permissions(pid, task, 2)) {
1710			rcu_read_unlock();
1711			/*
1712			 * This doesn't prevent learning whether PID exists,
1713			 * it only makes getattr() consistent with readdir().
1714			 */
1715			return -ENOENT;
1716		}
1717		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1718		    task_dumpable(task)) {
1719			cred = __task_cred(task);
1720			stat->uid = cred->euid;
1721			stat->gid = cred->egid;
1722		}
1723	}
1724	rcu_read_unlock();
1725	return 0;
1726}
1727
1728/* dentry stuff */
1729
1730/*
1731 *	Exceptional case: normally we are not allowed to unhash a busy
1732 * directory. In this case, however, we can do it - no aliasing problems
1733 * due to the way we treat inodes.
1734 *
1735 * Rewrite the inode's ownerships here because the owning task may have
1736 * performed a setuid(), etc.
1737 *
1738 * Before the /proc/pid/status file was created the only way to read
1739 * the effective uid of a /process was to stat /proc/pid.  Reading
1740 * /proc/pid/status is slow enough that procps and other packages
1741 * kept stating /proc/pid.  To keep the rules in /proc simple I have
1742 * made this apply to all per process world readable and executable
1743 * directories.
1744 */
1745int pid_revalidate(struct dentry *dentry, unsigned int flags)
1746{
1747	struct inode *inode;
1748	struct task_struct *task;
1749	const struct cred *cred;
1750
1751	if (flags & LOOKUP_RCU)
1752		return -ECHILD;
1753
1754	inode = d_inode(dentry);
1755	task = get_proc_task(inode);
1756
1757	if (task) {
1758		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1759		    task_dumpable(task)) {
1760			rcu_read_lock();
1761			cred = __task_cred(task);
1762			inode->i_uid = cred->euid;
1763			inode->i_gid = cred->egid;
1764			rcu_read_unlock();
1765		} else {
1766			inode->i_uid = GLOBAL_ROOT_UID;
1767			inode->i_gid = GLOBAL_ROOT_GID;
1768		}
1769		inode->i_mode &= ~(S_ISUID | S_ISGID);
1770		security_task_to_inode(task, inode);
1771		put_task_struct(task);
1772		return 1;
1773	}
 
1774	return 0;
1775}
1776
1777static inline bool proc_inode_is_dead(struct inode *inode)
1778{
1779	return !proc_pid(inode)->tasks[PIDTYPE_PID].first;
1780}
1781
1782int pid_delete_dentry(const struct dentry *dentry)
1783{
1784	/* Is the task we represent dead?
1785	 * If so, then don't put the dentry on the lru list,
1786	 * kill it immediately.
1787	 */
1788	return proc_inode_is_dead(d_inode(dentry));
1789}
1790
1791const struct dentry_operations pid_dentry_operations =
1792{
1793	.d_revalidate	= pid_revalidate,
1794	.d_delete	= pid_delete_dentry,
1795};
1796
1797/* Lookups */
1798
1799/*
1800 * Fill a directory entry.
1801 *
1802 * If possible create the dcache entry and derive our inode number and
1803 * file type from dcache entry.
1804 *
1805 * Since all of the proc inode numbers are dynamically generated, the inode
1806 * numbers do not exist until the inode is cache.  This means creating the
1807 * the dcache entry in readdir is necessary to keep the inode numbers
1808 * reported by readdir in sync with the inode numbers reported
1809 * by stat.
1810 */
1811bool proc_fill_cache(struct file *file, struct dir_context *ctx,
1812	const char *name, int len,
1813	instantiate_t instantiate, struct task_struct *task, const void *ptr)
1814{
1815	struct dentry *child, *dir = file->f_path.dentry;
1816	struct qstr qname = QSTR_INIT(name, len);
1817	struct inode *inode;
1818	unsigned type;
1819	ino_t ino;
 
 
 
 
 
1820
1821	child = d_hash_and_lookup(dir, &qname);
1822	if (!child) {
1823		child = d_alloc(dir, &qname);
1824		if (!child)
1825			goto end_instantiate;
1826		if (instantiate(d_inode(dir), child, task, ptr) < 0) {
1827			dput(child);
1828			goto end_instantiate;
1829		}
 
 
 
 
 
 
 
 
 
1830	}
1831	inode = d_inode(child);
1832	ino = inode->i_ino;
1833	type = inode->i_mode >> 12;
1834	dput(child);
1835	return dir_emit(ctx, name, len, ino, type);
1836
1837end_instantiate:
1838	return dir_emit(ctx, name, len, 1, DT_UNKNOWN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1839}
1840
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1841/*
1842 * dname_to_vma_addr - maps a dentry name into two unsigned longs
1843 * which represent vma start and end addresses.
1844 */
1845static int dname_to_vma_addr(struct dentry *dentry,
1846			     unsigned long *start, unsigned long *end)
1847{
1848	if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
1849		return -EINVAL;
1850
1851	return 0;
1852}
1853
1854static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
1855{
1856	unsigned long vm_start, vm_end;
1857	bool exact_vma_exists = false;
1858	struct mm_struct *mm = NULL;
1859	struct task_struct *task;
1860	const struct cred *cred;
1861	struct inode *inode;
1862	int status = 0;
1863
1864	if (flags & LOOKUP_RCU)
1865		return -ECHILD;
1866
1867	inode = d_inode(dentry);
 
 
 
 
 
1868	task = get_proc_task(inode);
1869	if (!task)
1870		goto out_notask;
1871
1872	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1873	if (IS_ERR_OR_NULL(mm))
1874		goto out;
1875
1876	if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
1877		down_read(&mm->mmap_sem);
1878		exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
1879		up_read(&mm->mmap_sem);
1880	}
1881
1882	mmput(mm);
1883
1884	if (exact_vma_exists) {
1885		if (task_dumpable(task)) {
1886			rcu_read_lock();
1887			cred = __task_cred(task);
1888			inode->i_uid = cred->euid;
1889			inode->i_gid = cred->egid;
1890			rcu_read_unlock();
1891		} else {
1892			inode->i_uid = GLOBAL_ROOT_UID;
1893			inode->i_gid = GLOBAL_ROOT_GID;
1894		}
1895		security_task_to_inode(task, inode);
1896		status = 1;
1897	}
1898
1899out:
1900	put_task_struct(task);
1901
1902out_notask:
 
 
 
1903	return status;
1904}
1905
1906static const struct dentry_operations tid_map_files_dentry_operations = {
1907	.d_revalidate	= map_files_d_revalidate,
1908	.d_delete	= pid_delete_dentry,
1909};
1910
1911static int map_files_get_link(struct dentry *dentry, struct path *path)
1912{
1913	unsigned long vm_start, vm_end;
1914	struct vm_area_struct *vma;
1915	struct task_struct *task;
1916	struct mm_struct *mm;
1917	int rc;
1918
1919	rc = -ENOENT;
1920	task = get_proc_task(d_inode(dentry));
1921	if (!task)
1922		goto out;
1923
1924	mm = get_task_mm(task);
1925	put_task_struct(task);
1926	if (!mm)
1927		goto out;
1928
1929	rc = dname_to_vma_addr(dentry, &vm_start, &vm_end);
1930	if (rc)
1931		goto out_mmput;
1932
1933	rc = -ENOENT;
1934	down_read(&mm->mmap_sem);
1935	vma = find_exact_vma(mm, vm_start, vm_end);
1936	if (vma && vma->vm_file) {
1937		*path = vma->vm_file->f_path;
1938		path_get(path);
1939		rc = 0;
1940	}
1941	up_read(&mm->mmap_sem);
1942
1943out_mmput:
1944	mmput(mm);
1945out:
1946	return rc;
1947}
1948
1949struct map_files_info {
1950	fmode_t		mode;
1951	unsigned long	len;
1952	unsigned char	name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
1953};
1954
1955/*
1956 * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the
1957 * symlinks may be used to bypass permissions on ancestor directories in the
1958 * path to the file in question.
1959 */
1960static const char *
1961proc_map_files_get_link(struct dentry *dentry,
1962			struct inode *inode,
1963		        struct delayed_call *done)
1964{
1965	if (!capable(CAP_SYS_ADMIN))
1966		return ERR_PTR(-EPERM);
1967
1968	return proc_pid_get_link(dentry, inode, done);
1969}
1970
1971/*
1972 * Identical to proc_pid_link_inode_operations except for get_link()
1973 */
1974static const struct inode_operations proc_map_files_link_inode_operations = {
1975	.readlink	= proc_pid_readlink,
1976	.get_link	= proc_map_files_get_link,
1977	.setattr	= proc_setattr,
1978};
1979
1980static int
1981proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
1982			   struct task_struct *task, const void *ptr)
1983{
1984	fmode_t mode = (fmode_t)(unsigned long)ptr;
1985	struct proc_inode *ei;
1986	struct inode *inode;
1987
 
 
 
1988	inode = proc_pid_make_inode(dir->i_sb, task);
1989	if (!inode)
1990		return -ENOENT;
1991
1992	ei = PROC_I(inode);
1993	ei->op.proc_get_link = map_files_get_link;
1994
1995	inode->i_op = &proc_map_files_link_inode_operations;
1996	inode->i_size = 64;
1997	inode->i_mode = S_IFLNK;
1998
1999	if (mode & FMODE_READ)
2000		inode->i_mode |= S_IRUSR;
2001	if (mode & FMODE_WRITE)
2002		inode->i_mode |= S_IWUSR;
2003
2004	d_set_d_op(dentry, &tid_map_files_dentry_operations);
2005	d_add(dentry, inode);
2006
2007	return 0;
2008}
2009
2010static struct dentry *proc_map_files_lookup(struct inode *dir,
2011		struct dentry *dentry, unsigned int flags)
2012{
2013	unsigned long vm_start, vm_end;
2014	struct vm_area_struct *vma;
2015	struct task_struct *task;
2016	int result;
2017	struct mm_struct *mm;
2018
2019	result = -ENOENT;
 
 
 
 
2020	task = get_proc_task(dir);
2021	if (!task)
2022		goto out;
2023
2024	result = -EACCES;
2025	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
2026		goto out_put_task;
2027
2028	result = -ENOENT;
2029	if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
2030		goto out_put_task;
2031
2032	mm = get_task_mm(task);
2033	if (!mm)
2034		goto out_put_task;
2035
2036	down_read(&mm->mmap_sem);
2037	vma = find_exact_vma(mm, vm_start, vm_end);
2038	if (!vma)
2039		goto out_no_vma;
2040
2041	if (vma->vm_file)
2042		result = proc_map_files_instantiate(dir, dentry, task,
2043				(void *)(unsigned long)vma->vm_file->f_mode);
2044
2045out_no_vma:
2046	up_read(&mm->mmap_sem);
2047	mmput(mm);
2048out_put_task:
2049	put_task_struct(task);
2050out:
2051	return ERR_PTR(result);
2052}
2053
2054static const struct inode_operations proc_map_files_inode_operations = {
2055	.lookup		= proc_map_files_lookup,
2056	.permission	= proc_fd_permission,
2057	.setattr	= proc_setattr,
2058};
2059
2060static int
2061proc_map_files_readdir(struct file *file, struct dir_context *ctx)
2062{
 
 
2063	struct vm_area_struct *vma;
2064	struct task_struct *task;
2065	struct mm_struct *mm;
2066	unsigned long nr_files, pos, i;
2067	struct flex_array *fa = NULL;
2068	struct map_files_info info;
2069	struct map_files_info *p;
2070	int ret;
2071
 
 
 
 
2072	ret = -ENOENT;
2073	task = get_proc_task(file_inode(file));
2074	if (!task)
2075		goto out;
2076
2077	ret = -EACCES;
2078	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
2079		goto out_put_task;
2080
2081	ret = 0;
2082	if (!dir_emit_dots(file, ctx))
2083		goto out_put_task;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2084
2085	mm = get_task_mm(task);
2086	if (!mm)
2087		goto out_put_task;
2088	down_read(&mm->mmap_sem);
2089
2090	nr_files = 0;
2091
2092	/*
2093	 * We need two passes here:
2094	 *
2095	 *  1) Collect vmas of mapped files with mmap_sem taken
2096	 *  2) Release mmap_sem and instantiate entries
2097	 *
2098	 * otherwise we get lockdep complained, since filldir()
2099	 * routine might require mmap_sem taken in might_fault().
2100	 */
2101
2102	for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
2103		if (vma->vm_file && ++pos > ctx->pos)
2104			nr_files++;
2105	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2106
2107	if (nr_files) {
2108		fa = flex_array_alloc(sizeof(info), nr_files,
2109					GFP_KERNEL);
2110		if (!fa || flex_array_prealloc(fa, 0, nr_files,
2111						GFP_KERNEL)) {
2112			ret = -ENOMEM;
2113			if (fa)
2114				flex_array_free(fa);
2115			up_read(&mm->mmap_sem);
2116			mmput(mm);
2117			goto out_put_task;
2118		}
2119		for (i = 0, vma = mm->mmap, pos = 2; vma;
2120				vma = vma->vm_next) {
2121			if (!vma->vm_file)
2122				continue;
2123			if (++pos <= ctx->pos)
2124				continue;
2125
2126			info.mode = vma->vm_file->f_mode;
2127			info.len = snprintf(info.name,
2128					sizeof(info.name), "%lx-%lx",
2129					vma->vm_start, vma->vm_end);
2130			if (flex_array_put(fa, i++, &info, GFP_KERNEL))
2131				BUG();
2132		}
 
 
 
2133	}
2134	up_read(&mm->mmap_sem);
2135
2136	for (i = 0; i < nr_files; i++) {
2137		p = flex_array_get(fa, i);
2138		if (!proc_fill_cache(file, ctx,
2139				      p->name, p->len,
2140				      proc_map_files_instantiate,
2141				      task,
2142				      (void *)(unsigned long)p->mode))
2143			break;
2144		ctx->pos++;
2145	}
2146	if (fa)
2147		flex_array_free(fa);
2148	mmput(mm);
2149
2150out_put_task:
2151	put_task_struct(task);
2152out:
2153	return ret;
2154}
2155
2156static const struct file_operations proc_map_files_operations = {
2157	.read		= generic_read_dir,
2158	.iterate	= proc_map_files_readdir,
2159	.llseek		= default_llseek,
2160};
2161
2162#ifdef CONFIG_CHECKPOINT_RESTORE
2163struct timers_private {
2164	struct pid *pid;
2165	struct task_struct *task;
2166	struct sighand_struct *sighand;
2167	struct pid_namespace *ns;
2168	unsigned long flags;
2169};
2170
2171static void *timers_start(struct seq_file *m, loff_t *pos)
2172{
2173	struct timers_private *tp = m->private;
2174
2175	tp->task = get_pid_task(tp->pid, PIDTYPE_PID);
2176	if (!tp->task)
2177		return ERR_PTR(-ESRCH);
2178
2179	tp->sighand = lock_task_sighand(tp->task, &tp->flags);
2180	if (!tp->sighand)
2181		return ERR_PTR(-ESRCH);
2182
2183	return seq_list_start(&tp->task->signal->posix_timers, *pos);
2184}
2185
2186static void *timers_next(struct seq_file *m, void *v, loff_t *pos)
2187{
2188	struct timers_private *tp = m->private;
2189	return seq_list_next(v, &tp->task->signal->posix_timers, pos);
2190}
2191
2192static void timers_stop(struct seq_file *m, void *v)
2193{
2194	struct timers_private *tp = m->private;
2195
2196	if (tp->sighand) {
2197		unlock_task_sighand(tp->task, &tp->flags);
2198		tp->sighand = NULL;
2199	}
2200
2201	if (tp->task) {
2202		put_task_struct(tp->task);
2203		tp->task = NULL;
2204	}
2205}
2206
2207static int show_timer(struct seq_file *m, void *v)
2208{
2209	struct k_itimer *timer;
2210	struct timers_private *tp = m->private;
2211	int notify;
2212	static const char * const nstr[] = {
2213		[SIGEV_SIGNAL] = "signal",
2214		[SIGEV_NONE] = "none",
2215		[SIGEV_THREAD] = "thread",
2216	};
2217
2218	timer = list_entry((struct list_head *)v, struct k_itimer, list);
2219	notify = timer->it_sigev_notify;
2220
2221	seq_printf(m, "ID: %d\n", timer->it_id);
2222	seq_printf(m, "signal: %d/%p\n",
2223		   timer->sigq->info.si_signo,
2224		   timer->sigq->info.si_value.sival_ptr);
2225	seq_printf(m, "notify: %s/%s.%d\n",
2226		   nstr[notify & ~SIGEV_THREAD_ID],
2227		   (notify & SIGEV_THREAD_ID) ? "tid" : "pid",
2228		   pid_nr_ns(timer->it_pid, tp->ns));
2229	seq_printf(m, "ClockID: %d\n", timer->it_clock);
2230
2231	return 0;
2232}
2233
2234static const struct seq_operations proc_timers_seq_ops = {
2235	.start	= timers_start,
2236	.next	= timers_next,
2237	.stop	= timers_stop,
2238	.show	= show_timer,
 
 
2239};
2240
2241static int proc_timers_open(struct inode *inode, struct file *file)
 
2242{
2243	struct timers_private *tp;
 
 
 
2244
2245	tp = __seq_open_private(file, &proc_timers_seq_ops,
2246			sizeof(struct timers_private));
2247	if (!tp)
2248		return -ENOMEM;
 
 
 
 
 
 
 
 
2249
2250	tp->pid = proc_pid(inode);
2251	tp->ns = inode->i_sb->s_fs_info;
2252	return 0;
2253}
2254
2255static const struct file_operations proc_timers_operations = {
2256	.open		= proc_timers_open,
2257	.read		= seq_read,
2258	.llseek		= seq_lseek,
2259	.release	= seq_release_private,
2260};
2261#endif
2262
2263static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
2264					size_t count, loff_t *offset)
2265{
2266	struct inode *inode = file_inode(file);
2267	struct task_struct *p;
2268	u64 slack_ns;
2269	int err;
2270
2271	err = kstrtoull_from_user(buf, count, 10, &slack_ns);
2272	if (err < 0)
2273		return err;
2274
2275	p = get_proc_task(inode);
2276	if (!p)
2277		return -ESRCH;
2278
2279	if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
2280		task_lock(p);
2281		if (slack_ns == 0)
2282			p->timer_slack_ns = p->default_timer_slack_ns;
2283		else
2284			p->timer_slack_ns = slack_ns;
2285		task_unlock(p);
2286	} else
2287		count = -EPERM;
2288
2289	put_task_struct(p);
2290
2291	return count;
2292}
2293
2294static int timerslack_ns_show(struct seq_file *m, void *v)
2295{
2296	struct inode *inode = m->private;
2297	struct task_struct *p;
2298	int err =  0;
2299
2300	p = get_proc_task(inode);
2301	if (!p)
2302		return -ESRCH;
2303
2304	if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
2305		task_lock(p);
2306		seq_printf(m, "%llu\n", p->timer_slack_ns);
2307		task_unlock(p);
2308	} else
2309		err = -EPERM;
2310
2311	put_task_struct(p);
2312
2313	return err;
2314}
2315
2316static int timerslack_ns_open(struct inode *inode, struct file *filp)
2317{
2318	return single_open(filp, timerslack_ns_show, inode);
2319}
 
2320
2321static const struct file_operations proc_pid_set_timerslack_ns_operations = {
2322	.open		= timerslack_ns_open,
2323	.read		= seq_read,
2324	.write		= timerslack_ns_write,
2325	.llseek		= seq_lseek,
2326	.release	= single_release,
2327};
2328
2329static int proc_pident_instantiate(struct inode *dir,
 
2330	struct dentry *dentry, struct task_struct *task, const void *ptr)
2331{
2332	const struct pid_entry *p = ptr;
2333	struct inode *inode;
2334	struct proc_inode *ei;
 
2335
2336	inode = proc_pid_make_inode(dir->i_sb, task);
2337	if (!inode)
2338		goto out;
2339
2340	ei = PROC_I(inode);
2341	inode->i_mode = p->mode;
2342	if (S_ISDIR(inode->i_mode))
2343		set_nlink(inode, 2);	/* Use getattr to fix if necessary */
2344	if (p->iop)
2345		inode->i_op = p->iop;
2346	if (p->fop)
2347		inode->i_fop = p->fop;
2348	ei->op = p->op;
2349	d_set_d_op(dentry, &pid_dentry_operations);
2350	d_add(dentry, inode);
2351	/* Close the race of the process dying before we return the dentry */
2352	if (pid_revalidate(dentry, 0))
2353		return 0;
2354out:
2355	return -ENOENT;
2356}
2357
2358static struct dentry *proc_pident_lookup(struct inode *dir, 
2359					 struct dentry *dentry,
2360					 const struct pid_entry *ents,
2361					 unsigned int nents)
2362{
2363	int error;
2364	struct task_struct *task = get_proc_task(dir);
2365	const struct pid_entry *p, *last;
2366
2367	error = -ENOENT;
2368
2369	if (!task)
2370		goto out_no_task;
2371
2372	/*
2373	 * Yes, it does not scale. And it should not. Don't add
2374	 * new entries into /proc/<tgid>/ without very good reasons.
2375	 */
2376	last = &ents[nents - 1];
2377	for (p = ents; p <= last; p++) {
2378		if (p->len != dentry->d_name.len)
2379			continue;
2380		if (!memcmp(dentry->d_name.name, p->name, p->len))
2381			break;
2382	}
2383	if (p > last)
2384		goto out;
2385
2386	error = proc_pident_instantiate(dir, dentry, task, p);
2387out:
2388	put_task_struct(task);
2389out_no_task:
2390	return ERR_PTR(error);
2391}
2392
2393static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
 
 
 
 
 
 
 
 
2394		const struct pid_entry *ents, unsigned int nents)
2395{
2396	struct task_struct *task = get_proc_task(file_inode(file));
2397	const struct pid_entry *p;
 
 
 
 
 
2398
 
2399	if (!task)
2400		return -ENOENT;
2401
2402	if (!dir_emit_dots(file, ctx))
2403		goto out;
2404
2405	if (ctx->pos >= nents + 2)
2406		goto out;
2407
2408	for (p = ents + (ctx->pos - 2); p <= ents + nents - 1; p++) {
2409		if (!proc_fill_cache(file, ctx, p->name, p->len,
2410				proc_pident_instantiate, task, p))
2411			break;
2412		ctx->pos++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2413	}
 
 
2414out:
2415	put_task_struct(task);
2416	return 0;
 
2417}
2418
2419#ifdef CONFIG_SECURITY
2420static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
2421				  size_t count, loff_t *ppos)
2422{
2423	struct inode * inode = file_inode(file);
2424	char *p = NULL;
2425	ssize_t length;
2426	struct task_struct *task = get_proc_task(inode);
2427
2428	if (!task)
2429		return -ESRCH;
2430
2431	length = security_getprocattr(task,
2432				      (char*)file->f_path.dentry->d_name.name,
2433				      &p);
2434	put_task_struct(task);
2435	if (length > 0)
2436		length = simple_read_from_buffer(buf, count, ppos, p, length);
2437	kfree(p);
2438	return length;
2439}
2440
2441static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
2442				   size_t count, loff_t *ppos)
2443{
2444	struct inode * inode = file_inode(file);
2445	void *page;
2446	ssize_t length;
2447	struct task_struct *task = get_proc_task(inode);
2448
2449	length = -ESRCH;
2450	if (!task)
2451		goto out_no_task;
2452	if (count > PAGE_SIZE)
2453		count = PAGE_SIZE;
2454
2455	/* No partial writes. */
2456	length = -EINVAL;
2457	if (*ppos != 0)
2458		goto out;
2459
2460	page = memdup_user(buf, count);
2461	if (IS_ERR(page)) {
2462		length = PTR_ERR(page);
2463		goto out;
2464	}
 
 
 
2465
2466	/* Guard against adverse ptrace interaction */
2467	length = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
2468	if (length < 0)
2469		goto out_free;
2470
2471	length = security_setprocattr(task,
2472				      (char*)file->f_path.dentry->d_name.name,
2473				      page, count);
2474	mutex_unlock(&task->signal->cred_guard_mutex);
2475out_free:
2476	kfree(page);
2477out:
2478	put_task_struct(task);
2479out_no_task:
2480	return length;
2481}
2482
2483static const struct file_operations proc_pid_attr_operations = {
2484	.read		= proc_pid_attr_read,
2485	.write		= proc_pid_attr_write,
2486	.llseek		= generic_file_llseek,
2487};
2488
2489static const struct pid_entry attr_dir_stuff[] = {
2490	REG("current",    S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2491	REG("prev",       S_IRUGO,	   proc_pid_attr_operations),
2492	REG("exec",       S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2493	REG("fscreate",   S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2494	REG("keycreate",  S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2495	REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2496};
2497
2498static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
 
2499{
2500	return proc_pident_readdir(file, ctx, 
2501				   attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
2502}
2503
2504static const struct file_operations proc_attr_dir_operations = {
2505	.read		= generic_read_dir,
2506	.iterate	= proc_attr_dir_readdir,
2507	.llseek		= default_llseek,
2508};
2509
2510static struct dentry *proc_attr_dir_lookup(struct inode *dir,
2511				struct dentry *dentry, unsigned int flags)
2512{
2513	return proc_pident_lookup(dir, dentry,
2514				  attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
2515}
2516
2517static const struct inode_operations proc_attr_dir_inode_operations = {
2518	.lookup		= proc_attr_dir_lookup,
2519	.getattr	= pid_getattr,
2520	.setattr	= proc_setattr,
2521};
2522
2523#endif
2524
2525#ifdef CONFIG_ELF_CORE
2526static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
2527					 size_t count, loff_t *ppos)
2528{
2529	struct task_struct *task = get_proc_task(file_inode(file));
2530	struct mm_struct *mm;
2531	char buffer[PROC_NUMBUF];
2532	size_t len;
2533	int ret;
2534
2535	if (!task)
2536		return -ESRCH;
2537
2538	ret = 0;
2539	mm = get_task_mm(task);
2540	if (mm) {
2541		len = snprintf(buffer, sizeof(buffer), "%08lx\n",
2542			       ((mm->flags & MMF_DUMP_FILTER_MASK) >>
2543				MMF_DUMP_FILTER_SHIFT));
2544		mmput(mm);
2545		ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
2546	}
2547
2548	put_task_struct(task);
2549
2550	return ret;
2551}
2552
2553static ssize_t proc_coredump_filter_write(struct file *file,
2554					  const char __user *buf,
2555					  size_t count,
2556					  loff_t *ppos)
2557{
2558	struct task_struct *task;
2559	struct mm_struct *mm;
 
2560	unsigned int val;
2561	int ret;
2562	int i;
2563	unsigned long mask;
2564
2565	ret = kstrtouint_from_user(buf, count, 0, &val);
2566	if (ret < 0)
2567		return ret;
 
 
 
 
 
 
 
 
 
 
2568
2569	ret = -ESRCH;
2570	task = get_proc_task(file_inode(file));
2571	if (!task)
2572		goto out_no_task;
2573
 
2574	mm = get_task_mm(task);
2575	if (!mm)
2576		goto out_no_mm;
2577	ret = 0;
2578
2579	for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
2580		if (val & mask)
2581			set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2582		else
2583			clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2584	}
2585
2586	mmput(mm);
2587 out_no_mm:
2588	put_task_struct(task);
2589 out_no_task:
2590	if (ret < 0)
2591		return ret;
2592	return count;
2593}
2594
2595static const struct file_operations proc_coredump_filter_operations = {
2596	.read		= proc_coredump_filter_read,
2597	.write		= proc_coredump_filter_write,
2598	.llseek		= generic_file_llseek,
2599};
2600#endif
2601
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2602#ifdef CONFIG_TASK_IO_ACCOUNTING
2603static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole)
2604{
2605	struct task_io_accounting acct = task->ioac;
2606	unsigned long flags;
2607	int result;
2608
2609	result = mutex_lock_killable(&task->signal->cred_guard_mutex);
2610	if (result)
2611		return result;
2612
2613	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
2614		result = -EACCES;
2615		goto out_unlock;
2616	}
2617
2618	if (whole && lock_task_sighand(task, &flags)) {
2619		struct task_struct *t = task;
2620
2621		task_io_accounting_add(&acct, &task->signal->ioac);
2622		while_each_thread(task, t)
2623			task_io_accounting_add(&acct, &t->ioac);
2624
2625		unlock_task_sighand(task, &flags);
2626	}
2627	seq_printf(m,
2628		   "rchar: %llu\n"
2629		   "wchar: %llu\n"
2630		   "syscr: %llu\n"
2631		   "syscw: %llu\n"
2632		   "read_bytes: %llu\n"
2633		   "write_bytes: %llu\n"
2634		   "cancelled_write_bytes: %llu\n",
2635		   (unsigned long long)acct.rchar,
2636		   (unsigned long long)acct.wchar,
2637		   (unsigned long long)acct.syscr,
2638		   (unsigned long long)acct.syscw,
2639		   (unsigned long long)acct.read_bytes,
2640		   (unsigned long long)acct.write_bytes,
2641		   (unsigned long long)acct.cancelled_write_bytes);
2642	result = 0;
2643
2644out_unlock:
2645	mutex_unlock(&task->signal->cred_guard_mutex);
2646	return result;
2647}
2648
2649static int proc_tid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
2650				  struct pid *pid, struct task_struct *task)
2651{
2652	return do_io_accounting(task, m, 0);
2653}
2654
2655static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
2656				   struct pid *pid, struct task_struct *task)
2657{
2658	return do_io_accounting(task, m, 1);
2659}
2660#endif /* CONFIG_TASK_IO_ACCOUNTING */
2661
2662#ifdef CONFIG_USER_NS
2663static int proc_id_map_open(struct inode *inode, struct file *file,
2664	const struct seq_operations *seq_ops)
2665{
2666	struct user_namespace *ns = NULL;
2667	struct task_struct *task;
2668	struct seq_file *seq;
2669	int ret = -EINVAL;
2670
2671	task = get_proc_task(inode);
2672	if (task) {
2673		rcu_read_lock();
2674		ns = get_user_ns(task_cred_xxx(task, user_ns));
2675		rcu_read_unlock();
2676		put_task_struct(task);
2677	}
2678	if (!ns)
2679		goto err;
2680
2681	ret = seq_open(file, seq_ops);
2682	if (ret)
2683		goto err_put_ns;
2684
2685	seq = file->private_data;
2686	seq->private = ns;
2687
2688	return 0;
2689err_put_ns:
2690	put_user_ns(ns);
2691err:
2692	return ret;
2693}
2694
2695static int proc_id_map_release(struct inode *inode, struct file *file)
2696{
2697	struct seq_file *seq = file->private_data;
2698	struct user_namespace *ns = seq->private;
2699	put_user_ns(ns);
2700	return seq_release(inode, file);
2701}
2702
2703static int proc_uid_map_open(struct inode *inode, struct file *file)
2704{
2705	return proc_id_map_open(inode, file, &proc_uid_seq_operations);
2706}
2707
2708static int proc_gid_map_open(struct inode *inode, struct file *file)
2709{
2710	return proc_id_map_open(inode, file, &proc_gid_seq_operations);
2711}
2712
2713static int proc_projid_map_open(struct inode *inode, struct file *file)
2714{
2715	return proc_id_map_open(inode, file, &proc_projid_seq_operations);
2716}
2717
2718static const struct file_operations proc_uid_map_operations = {
2719	.open		= proc_uid_map_open,
2720	.write		= proc_uid_map_write,
2721	.read		= seq_read,
2722	.llseek		= seq_lseek,
2723	.release	= proc_id_map_release,
2724};
2725
2726static const struct file_operations proc_gid_map_operations = {
2727	.open		= proc_gid_map_open,
2728	.write		= proc_gid_map_write,
2729	.read		= seq_read,
2730	.llseek		= seq_lseek,
2731	.release	= proc_id_map_release,
2732};
2733
2734static const struct file_operations proc_projid_map_operations = {
2735	.open		= proc_projid_map_open,
2736	.write		= proc_projid_map_write,
2737	.read		= seq_read,
2738	.llseek		= seq_lseek,
2739	.release	= proc_id_map_release,
2740};
2741
2742static int proc_setgroups_open(struct inode *inode, struct file *file)
2743{
2744	struct user_namespace *ns = NULL;
2745	struct task_struct *task;
2746	int ret;
2747
2748	ret = -ESRCH;
2749	task = get_proc_task(inode);
2750	if (task) {
2751		rcu_read_lock();
2752		ns = get_user_ns(task_cred_xxx(task, user_ns));
2753		rcu_read_unlock();
2754		put_task_struct(task);
2755	}
2756	if (!ns)
2757		goto err;
2758
2759	if (file->f_mode & FMODE_WRITE) {
2760		ret = -EACCES;
2761		if (!ns_capable(ns, CAP_SYS_ADMIN))
2762			goto err_put_ns;
2763	}
2764
2765	ret = single_open(file, &proc_setgroups_show, ns);
2766	if (ret)
2767		goto err_put_ns;
2768
2769	return 0;
2770err_put_ns:
2771	put_user_ns(ns);
2772err:
2773	return ret;
2774}
2775
2776static int proc_setgroups_release(struct inode *inode, struct file *file)
2777{
2778	struct seq_file *seq = file->private_data;
2779	struct user_namespace *ns = seq->private;
2780	int ret = single_release(inode, file);
2781	put_user_ns(ns);
2782	return ret;
2783}
2784
2785static const struct file_operations proc_setgroups_operations = {
2786	.open		= proc_setgroups_open,
2787	.write		= proc_setgroups_write,
2788	.read		= seq_read,
2789	.llseek		= seq_lseek,
2790	.release	= proc_setgroups_release,
2791};
2792#endif /* CONFIG_USER_NS */
2793
2794static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
2795				struct pid *pid, struct task_struct *task)
2796{
2797	int err = lock_trace(task);
2798	if (!err) {
2799		seq_printf(m, "%08x\n", task->personality);
2800		unlock_trace(task);
2801	}
2802	return err;
2803}
2804
2805/*
2806 * Thread groups
2807 */
2808static const struct file_operations proc_task_operations;
2809static const struct inode_operations proc_task_inode_operations;
2810
2811static const struct pid_entry tgid_base_stuff[] = {
2812	DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
2813	DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
 
2814	DIR("map_files",  S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
 
2815	DIR("fdinfo",     S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2816	DIR("ns",	  S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
2817#ifdef CONFIG_NET
2818	DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
2819#endif
2820	REG("environ",    S_IRUSR, proc_environ_operations),
2821	ONE("auxv",       S_IRUSR, proc_pid_auxv),
2822	ONE("status",     S_IRUGO, proc_pid_status),
2823	ONE("personality", S_IRUSR, proc_pid_personality),
2824	ONE("limits",	  S_IRUGO, proc_pid_limits),
2825#ifdef CONFIG_SCHED_DEBUG
2826	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
2827#endif
2828#ifdef CONFIG_SCHED_AUTOGROUP
2829	REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
2830#endif
2831	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2832#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
2833	ONE("syscall",    S_IRUSR, proc_pid_syscall),
2834#endif
2835	REG("cmdline",    S_IRUGO, proc_pid_cmdline_ops),
2836	ONE("stat",       S_IRUGO, proc_tgid_stat),
2837	ONE("statm",      S_IRUGO, proc_pid_statm),
2838	REG("maps",       S_IRUGO, proc_pid_maps_operations),
2839#ifdef CONFIG_NUMA
2840	REG("numa_maps",  S_IRUGO, proc_pid_numa_maps_operations),
2841#endif
2842	REG("mem",        S_IRUSR|S_IWUSR, proc_mem_operations),
2843	LNK("cwd",        proc_cwd_link),
2844	LNK("root",       proc_root_link),
2845	LNK("exe",        proc_exe_link),
2846	REG("mounts",     S_IRUGO, proc_mounts_operations),
2847	REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
2848	REG("mountstats", S_IRUSR, proc_mountstats_operations),
2849#ifdef CONFIG_PROC_PAGE_MONITOR
2850	REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
2851	REG("smaps",      S_IRUGO, proc_pid_smaps_operations),
2852	REG("pagemap",    S_IRUSR, proc_pagemap_operations),
2853#endif
2854#ifdef CONFIG_SECURITY
2855	DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
2856#endif
2857#ifdef CONFIG_KALLSYMS
2858	ONE("wchan",      S_IRUGO, proc_pid_wchan),
2859#endif
2860#ifdef CONFIG_STACKTRACE
2861	ONE("stack",      S_IRUSR, proc_pid_stack),
2862#endif
2863#ifdef CONFIG_SCHED_INFO
2864	ONE("schedstat",  S_IRUGO, proc_pid_schedstat),
2865#endif
2866#ifdef CONFIG_LATENCYTOP
2867	REG("latency",  S_IRUGO, proc_lstats_operations),
2868#endif
2869#ifdef CONFIG_PROC_PID_CPUSET
2870	ONE("cpuset",     S_IRUGO, proc_cpuset_show),
2871#endif
2872#ifdef CONFIG_CGROUPS
2873	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
2874#endif
2875	ONE("oom_score",  S_IRUGO, proc_oom_score),
2876	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2877	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2878#ifdef CONFIG_AUDITSYSCALL
2879	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
2880	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
2881#endif
2882#ifdef CONFIG_FAULT_INJECTION
2883	REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
2884#endif
2885#ifdef CONFIG_ELF_CORE
2886	REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
2887#endif
2888#ifdef CONFIG_TASK_IO_ACCOUNTING
2889	ONE("io",	S_IRUSR, proc_tgid_io_accounting),
2890#endif
2891#ifdef CONFIG_HARDWALL
2892	ONE("hardwall",   S_IRUGO, proc_pid_hardwall),
2893#endif
2894#ifdef CONFIG_USER_NS
2895	REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
2896	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
2897	REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
2898	REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
2899#endif
2900#ifdef CONFIG_CHECKPOINT_RESTORE
2901	REG("timers",	  S_IRUGO, proc_timers_operations),
2902#endif
2903	REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
2904};
2905
2906static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
 
2907{
2908	return proc_pident_readdir(file, ctx,
2909				   tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
2910}
2911
2912static const struct file_operations proc_tgid_base_operations = {
2913	.read		= generic_read_dir,
2914	.iterate	= proc_tgid_base_readdir,
2915	.llseek		= default_llseek,
2916};
2917
2918static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
2919{
2920	return proc_pident_lookup(dir, dentry,
2921				  tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
2922}
2923
2924static const struct inode_operations proc_tgid_base_inode_operations = {
2925	.lookup		= proc_tgid_base_lookup,
2926	.getattr	= pid_getattr,
2927	.setattr	= proc_setattr,
2928	.permission	= proc_pid_permission,
2929};
2930
2931static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
2932{
2933	struct dentry *dentry, *leader, *dir;
2934	char buf[PROC_NUMBUF];
2935	struct qstr name;
2936
2937	name.name = buf;
2938	name.len = snprintf(buf, sizeof(buf), "%d", pid);
2939	/* no ->d_hash() rejects on procfs */
2940	dentry = d_hash_and_lookup(mnt->mnt_root, &name);
2941	if (dentry) {
2942		d_invalidate(dentry);
 
2943		dput(dentry);
2944	}
2945
2946	if (pid == tgid)
2947		return;
2948
2949	name.name = buf;
2950	name.len = snprintf(buf, sizeof(buf), "%d", tgid);
2951	leader = d_hash_and_lookup(mnt->mnt_root, &name);
2952	if (!leader)
2953		goto out;
2954
2955	name.name = "task";
2956	name.len = strlen(name.name);
2957	dir = d_hash_and_lookup(leader, &name);
2958	if (!dir)
2959		goto out_put_leader;
2960
2961	name.name = buf;
2962	name.len = snprintf(buf, sizeof(buf), "%d", pid);
2963	dentry = d_hash_and_lookup(dir, &name);
2964	if (dentry) {
2965		d_invalidate(dentry);
 
2966		dput(dentry);
2967	}
2968
2969	dput(dir);
2970out_put_leader:
2971	dput(leader);
2972out:
2973	return;
2974}
2975
2976/**
2977 * proc_flush_task -  Remove dcache entries for @task from the /proc dcache.
2978 * @task: task that should be flushed.
2979 *
2980 * When flushing dentries from proc, one needs to flush them from global
2981 * proc (proc_mnt) and from all the namespaces' procs this task was seen
2982 * in. This call is supposed to do all of this job.
2983 *
2984 * Looks in the dcache for
2985 * /proc/@pid
2986 * /proc/@tgid/task/@pid
2987 * if either directory is present flushes it and all of it'ts children
2988 * from the dcache.
2989 *
2990 * It is safe and reasonable to cache /proc entries for a task until
2991 * that task exits.  After that they just clog up the dcache with
2992 * useless entries, possibly causing useful dcache entries to be
2993 * flushed instead.  This routine is proved to flush those useless
2994 * dcache entries at process exit time.
2995 *
2996 * NOTE: This routine is just an optimization so it does not guarantee
2997 *       that no dcache entries will exist at process exit time it
2998 *       just makes it very unlikely that any will persist.
2999 */
3000
3001void proc_flush_task(struct task_struct *task)
3002{
3003	int i;
3004	struct pid *pid, *tgid;
3005	struct upid *upid;
3006
3007	pid = task_pid(task);
3008	tgid = task_tgid(task);
3009
3010	for (i = 0; i <= pid->level; i++) {
3011		upid = &pid->numbers[i];
3012		proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
3013					tgid->numbers[i].nr);
3014	}
 
 
 
 
3015}
3016
3017static int proc_pid_instantiate(struct inode *dir,
3018				   struct dentry * dentry,
3019				   struct task_struct *task, const void *ptr)
3020{
 
3021	struct inode *inode;
3022
3023	inode = proc_pid_make_inode(dir->i_sb, task);
3024	if (!inode)
3025		goto out;
3026
3027	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
3028	inode->i_op = &proc_tgid_base_inode_operations;
3029	inode->i_fop = &proc_tgid_base_operations;
3030	inode->i_flags|=S_IMMUTABLE;
3031
3032	set_nlink(inode, 2 + pid_entry_count_dirs(tgid_base_stuff,
3033						  ARRAY_SIZE(tgid_base_stuff)));
3034
3035	d_set_d_op(dentry, &pid_dentry_operations);
3036
3037	d_add(dentry, inode);
3038	/* Close the race of the process dying before we return the dentry */
3039	if (pid_revalidate(dentry, 0))
3040		return 0;
3041out:
3042	return -ENOENT;
3043}
3044
3045struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
3046{
3047	int result = -ENOENT;
3048	struct task_struct *task;
3049	unsigned tgid;
3050	struct pid_namespace *ns;
3051
3052	tgid = name_to_int(&dentry->d_name);
 
 
 
 
3053	if (tgid == ~0U)
3054		goto out;
3055
3056	ns = dentry->d_sb->s_fs_info;
3057	rcu_read_lock();
3058	task = find_task_by_pid_ns(tgid, ns);
3059	if (task)
3060		get_task_struct(task);
3061	rcu_read_unlock();
3062	if (!task)
3063		goto out;
3064
3065	result = proc_pid_instantiate(dir, dentry, task, NULL);
3066	put_task_struct(task);
3067out:
3068	return ERR_PTR(result);
3069}
3070
3071/*
3072 * Find the first task with tgid >= tgid
3073 *
3074 */
3075struct tgid_iter {
3076	unsigned int tgid;
3077	struct task_struct *task;
3078};
3079static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
3080{
3081	struct pid *pid;
3082
3083	if (iter.task)
3084		put_task_struct(iter.task);
3085	rcu_read_lock();
3086retry:
3087	iter.task = NULL;
3088	pid = find_ge_pid(iter.tgid, ns);
3089	if (pid) {
3090		iter.tgid = pid_nr_ns(pid, ns);
3091		iter.task = pid_task(pid, PIDTYPE_PID);
3092		/* What we to know is if the pid we have find is the
3093		 * pid of a thread_group_leader.  Testing for task
3094		 * being a thread_group_leader is the obvious thing
3095		 * todo but there is a window when it fails, due to
3096		 * the pid transfer logic in de_thread.
3097		 *
3098		 * So we perform the straight forward test of seeing
3099		 * if the pid we have found is the pid of a thread
3100		 * group leader, and don't worry if the task we have
3101		 * found doesn't happen to be a thread group leader.
3102		 * As we don't care in the case of readdir.
3103		 */
3104		if (!iter.task || !has_group_leader_pid(iter.task)) {
3105			iter.tgid += 1;
3106			goto retry;
3107		}
3108		get_task_struct(iter.task);
3109	}
3110	rcu_read_unlock();
3111	return iter;
3112}
3113
3114#define TGID_OFFSET (FIRST_PROCESS_ENTRY + 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3115
3116/* for the /proc/ directory itself, after non-process stuff has been done */
3117int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3118{
 
 
3119	struct tgid_iter iter;
3120	struct pid_namespace *ns = file_inode(file)->i_sb->s_fs_info;
3121	loff_t pos = ctx->pos;
3122
3123	if (pos >= PID_MAX_LIMIT + TGID_OFFSET)
3124		return 0;
 
3125
3126	if (pos == TGID_OFFSET - 2) {
3127		struct inode *inode = d_inode(ns->proc_self);
3128		if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK))
3129			return 0;
3130		ctx->pos = pos = pos + 1;
3131	}
3132	if (pos == TGID_OFFSET - 1) {
3133		struct inode *inode = d_inode(ns->proc_thread_self);
3134		if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK))
3135			return 0;
3136		ctx->pos = pos = pos + 1;
3137	}
3138	iter.tgid = pos - TGID_OFFSET;
 
3139	iter.task = NULL;
 
3140	for (iter = next_tgid(ns, iter);
3141	     iter.task;
3142	     iter.tgid += 1, iter = next_tgid(ns, iter)) {
3143		char name[PROC_NUMBUF];
3144		int len;
3145		if (!has_pid_permissions(ns, iter.task, 2))
3146			continue;
3147
3148		len = snprintf(name, sizeof(name), "%d", iter.tgid);
3149		ctx->pos = iter.tgid + TGID_OFFSET;
3150		if (!proc_fill_cache(file, ctx, name, len,
3151				     proc_pid_instantiate, iter.task, NULL)) {
3152			put_task_struct(iter.task);
3153			return 0;
3154		}
3155	}
3156	ctx->pos = PID_MAX_LIMIT + TGID_OFFSET;
 
 
 
3157	return 0;
3158}
3159
3160/*
3161 * Tasks
3162 */
3163static const struct pid_entry tid_base_stuff[] = {
3164	DIR("fd",        S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
3165	DIR("fdinfo",    S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
3166	DIR("ns",	 S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
3167#ifdef CONFIG_NET
3168	DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
3169#endif
3170	REG("environ",   S_IRUSR, proc_environ_operations),
3171	ONE("auxv",      S_IRUSR, proc_pid_auxv),
3172	ONE("status",    S_IRUGO, proc_pid_status),
3173	ONE("personality", S_IRUSR, proc_pid_personality),
3174	ONE("limits",	 S_IRUGO, proc_pid_limits),
3175#ifdef CONFIG_SCHED_DEBUG
3176	REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3177#endif
3178	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
3179#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3180	ONE("syscall",   S_IRUSR, proc_pid_syscall),
3181#endif
3182	REG("cmdline",   S_IRUGO, proc_pid_cmdline_ops),
3183	ONE("stat",      S_IRUGO, proc_tid_stat),
3184	ONE("statm",     S_IRUGO, proc_pid_statm),
3185	REG("maps",      S_IRUGO, proc_tid_maps_operations),
3186#ifdef CONFIG_PROC_CHILDREN
3187	REG("children",  S_IRUGO, proc_tid_children_operations),
3188#endif
3189#ifdef CONFIG_NUMA
3190	REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
3191#endif
3192	REG("mem",       S_IRUSR|S_IWUSR, proc_mem_operations),
3193	LNK("cwd",       proc_cwd_link),
3194	LNK("root",      proc_root_link),
3195	LNK("exe",       proc_exe_link),
3196	REG("mounts",    S_IRUGO, proc_mounts_operations),
3197	REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
3198#ifdef CONFIG_PROC_PAGE_MONITOR
3199	REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
3200	REG("smaps",     S_IRUGO, proc_tid_smaps_operations),
3201	REG("pagemap",    S_IRUSR, proc_pagemap_operations),
3202#endif
3203#ifdef CONFIG_SECURITY
3204	DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
3205#endif
3206#ifdef CONFIG_KALLSYMS
3207	ONE("wchan",     S_IRUGO, proc_pid_wchan),
3208#endif
3209#ifdef CONFIG_STACKTRACE
3210	ONE("stack",      S_IRUSR, proc_pid_stack),
3211#endif
3212#ifdef CONFIG_SCHED_INFO
3213	ONE("schedstat", S_IRUGO, proc_pid_schedstat),
3214#endif
3215#ifdef CONFIG_LATENCYTOP
3216	REG("latency",  S_IRUGO, proc_lstats_operations),
3217#endif
3218#ifdef CONFIG_PROC_PID_CPUSET
3219	ONE("cpuset",    S_IRUGO, proc_cpuset_show),
3220#endif
3221#ifdef CONFIG_CGROUPS
3222	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
3223#endif
3224	ONE("oom_score", S_IRUGO, proc_oom_score),
3225	REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
3226	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
3227#ifdef CONFIG_AUDITSYSCALL
3228	REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
3229	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
3230#endif
3231#ifdef CONFIG_FAULT_INJECTION
3232	REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
3233#endif
3234#ifdef CONFIG_TASK_IO_ACCOUNTING
3235	ONE("io",	S_IRUSR, proc_tid_io_accounting),
3236#endif
3237#ifdef CONFIG_HARDWALL
3238	ONE("hardwall",   S_IRUGO, proc_pid_hardwall),
3239#endif
3240#ifdef CONFIG_USER_NS
3241	REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
3242	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
3243	REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
3244	REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
3245#endif
3246};
3247
3248static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
 
3249{
3250	return proc_pident_readdir(file, ctx,
3251				   tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3252}
3253
3254static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
3255{
3256	return proc_pident_lookup(dir, dentry,
3257				  tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3258}
3259
3260static const struct file_operations proc_tid_base_operations = {
3261	.read		= generic_read_dir,
3262	.iterate	= proc_tid_base_readdir,
3263	.llseek		= default_llseek,
3264};
3265
3266static const struct inode_operations proc_tid_base_inode_operations = {
3267	.lookup		= proc_tid_base_lookup,
3268	.getattr	= pid_getattr,
3269	.setattr	= proc_setattr,
3270};
3271
3272static int proc_task_instantiate(struct inode *dir,
3273	struct dentry *dentry, struct task_struct *task, const void *ptr)
3274{
 
3275	struct inode *inode;
3276	inode = proc_pid_make_inode(dir->i_sb, task);
3277
3278	if (!inode)
3279		goto out;
3280	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
3281	inode->i_op = &proc_tid_base_inode_operations;
3282	inode->i_fop = &proc_tid_base_operations;
3283	inode->i_flags|=S_IMMUTABLE;
3284
3285	set_nlink(inode, 2 + pid_entry_count_dirs(tid_base_stuff,
3286						  ARRAY_SIZE(tid_base_stuff)));
3287
3288	d_set_d_op(dentry, &pid_dentry_operations);
3289
3290	d_add(dentry, inode);
3291	/* Close the race of the process dying before we return the dentry */
3292	if (pid_revalidate(dentry, 0))
3293		return 0;
3294out:
3295	return -ENOENT;
3296}
3297
3298static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
3299{
3300	int result = -ENOENT;
3301	struct task_struct *task;
3302	struct task_struct *leader = get_proc_task(dir);
3303	unsigned tid;
3304	struct pid_namespace *ns;
3305
3306	if (!leader)
3307		goto out_no_task;
3308
3309	tid = name_to_int(&dentry->d_name);
3310	if (tid == ~0U)
3311		goto out;
3312
3313	ns = dentry->d_sb->s_fs_info;
3314	rcu_read_lock();
3315	task = find_task_by_pid_ns(tid, ns);
3316	if (task)
3317		get_task_struct(task);
3318	rcu_read_unlock();
3319	if (!task)
3320		goto out;
3321	if (!same_thread_group(leader, task))
3322		goto out_drop_task;
3323
3324	result = proc_task_instantiate(dir, dentry, task, NULL);
3325out_drop_task:
3326	put_task_struct(task);
3327out:
3328	put_task_struct(leader);
3329out_no_task:
3330	return ERR_PTR(result);
3331}
3332
3333/*
3334 * Find the first tid of a thread group to return to user space.
3335 *
3336 * Usually this is just the thread group leader, but if the users
3337 * buffer was too small or there was a seek into the middle of the
3338 * directory we have more work todo.
3339 *
3340 * In the case of a short read we start with find_task_by_pid.
3341 *
3342 * In the case of a seek we start with the leader and walk nr
3343 * threads past it.
3344 */
3345static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos,
3346					struct pid_namespace *ns)
3347{
3348	struct task_struct *pos, *task;
3349	unsigned long nr = f_pos;
3350
3351	if (nr != f_pos)	/* 32bit overflow? */
3352		return NULL;
3353
3354	rcu_read_lock();
3355	task = pid_task(pid, PIDTYPE_PID);
3356	if (!task)
3357		goto fail;
3358
3359	/* Attempt to start with the tid of a thread */
3360	if (tid && nr) {
3361		pos = find_task_by_pid_ns(tid, ns);
3362		if (pos && same_thread_group(pos, task))
3363			goto found;
3364	}
3365
3366	/* If nr exceeds the number of threads there is nothing todo */
3367	if (nr >= get_nr_threads(task))
3368		goto fail;
 
3369
3370	/* If we haven't found our starting place yet start
3371	 * with the leader and walk nr threads forward.
3372	 */
3373	pos = task = task->group_leader;
3374	do {
3375		if (!nr--)
3376			goto found;
3377	} while_each_thread(task, pos);
3378fail:
3379	pos = NULL;
3380	goto out;
3381found:
3382	get_task_struct(pos);
3383out:
3384	rcu_read_unlock();
3385	return pos;
3386}
3387
3388/*
3389 * Find the next thread in the thread list.
3390 * Return NULL if there is an error or no next thread.
3391 *
3392 * The reference to the input task_struct is released.
3393 */
3394static struct task_struct *next_tid(struct task_struct *start)
3395{
3396	struct task_struct *pos = NULL;
3397	rcu_read_lock();
3398	if (pid_alive(start)) {
3399		pos = next_thread(start);
3400		if (thread_group_leader(pos))
3401			pos = NULL;
3402		else
3403			get_task_struct(pos);
3404	}
3405	rcu_read_unlock();
3406	put_task_struct(start);
3407	return pos;
3408}
3409
 
 
 
 
 
 
 
 
 
3410/* for the /proc/TGID/task/ directories */
3411static int proc_task_readdir(struct file *file, struct dir_context *ctx)
3412{
3413	struct inode *inode = file_inode(file);
 
 
3414	struct task_struct *task;
3415	struct pid_namespace *ns;
 
3416	int tid;
 
3417
3418	if (proc_inode_is_dead(inode))
3419		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
3420
3421	if (!dir_emit_dots(file, ctx))
3422		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
3423
3424	/* f_version caches the tgid value that the last readdir call couldn't
3425	 * return. lseek aka telldir automagically resets f_version to 0.
3426	 */
3427	ns = inode->i_sb->s_fs_info;
3428	tid = (int)file->f_version;
3429	file->f_version = 0;
3430	for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
3431	     task;
3432	     task = next_tid(task), ctx->pos++) {
3433		char name[PROC_NUMBUF];
3434		int len;
3435		tid = task_pid_nr_ns(task, ns);
3436		len = snprintf(name, sizeof(name), "%d", tid);
3437		if (!proc_fill_cache(file, ctx, name, len,
3438				proc_task_instantiate, task, NULL)) {
3439			/* returning this tgid failed, save it as the first
3440			 * pid for the next readir call */
3441			file->f_version = (u64)tid;
3442			put_task_struct(task);
3443			break;
3444		}
3445	}
3446
3447	return 0;
 
 
3448}
3449
3450static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
3451{
3452	struct inode *inode = d_inode(dentry);
3453	struct task_struct *p = get_proc_task(inode);
3454	generic_fillattr(inode, stat);
3455
3456	if (p) {
3457		stat->nlink += get_nr_threads(p);
3458		put_task_struct(p);
3459	}
3460
3461	return 0;
3462}
3463
3464static const struct inode_operations proc_task_inode_operations = {
3465	.lookup		= proc_task_lookup,
3466	.getattr	= proc_task_getattr,
3467	.setattr	= proc_setattr,
3468	.permission	= proc_pid_permission,
3469};
3470
3471static const struct file_operations proc_task_operations = {
3472	.read		= generic_read_dir,
3473	.iterate	= proc_task_readdir,
3474	.llseek		= default_llseek,
3475};
v3.5.6
   1/*
   2 *  linux/fs/proc/base.c
   3 *
   4 *  Copyright (C) 1991, 1992 Linus Torvalds
   5 *
   6 *  proc base directory handling functions
   7 *
   8 *  1999, Al Viro. Rewritten. Now it covers the whole per-process part.
   9 *  Instead of using magical inumbers to determine the kind of object
  10 *  we allocate and fill in-core inodes upon lookup. They don't even
  11 *  go into icache. We cache the reference to task_struct upon lookup too.
  12 *  Eventually it should become a filesystem in its own. We don't use the
  13 *  rest of procfs anymore.
  14 *
  15 *
  16 *  Changelog:
  17 *  17-Jan-2005
  18 *  Allan Bezerra
  19 *  Bruna Moreira <bruna.moreira@indt.org.br>
  20 *  Edjard Mota <edjard.mota@indt.org.br>
  21 *  Ilias Biris <ilias.biris@indt.org.br>
  22 *  Mauricio Lin <mauricio.lin@indt.org.br>
  23 *
  24 *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
  25 *
  26 *  A new process specific entry (smaps) included in /proc. It shows the
  27 *  size of rss for each memory area. The maps entry lacks information
  28 *  about physical memory size (rss) for each mapped file, i.e.,
  29 *  rss information for executables and library files.
  30 *  This additional information is useful for any tools that need to know
  31 *  about physical memory consumption for a process specific library.
  32 *
  33 *  Changelog:
  34 *  21-Feb-2005
  35 *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
  36 *  Pud inclusion in the page table walking.
  37 *
  38 *  ChangeLog:
  39 *  10-Mar-2005
  40 *  10LE Instituto Nokia de Tecnologia - INdT:
  41 *  A better way to walks through the page table as suggested by Hugh Dickins.
  42 *
  43 *  Simo Piiroinen <simo.piiroinen@nokia.com>:
  44 *  Smaps information related to shared, private, clean and dirty pages.
  45 *
  46 *  Paul Mundt <paul.mundt@nokia.com>:
  47 *  Overall revision about smaps.
  48 */
  49
  50#include <asm/uaccess.h>
  51
  52#include <linux/errno.h>
  53#include <linux/time.h>
  54#include <linux/proc_fs.h>
  55#include <linux/stat.h>
  56#include <linux/task_io_accounting_ops.h>
  57#include <linux/init.h>
  58#include <linux/capability.h>
  59#include <linux/file.h>
  60#include <linux/fdtable.h>
  61#include <linux/string.h>
  62#include <linux/seq_file.h>
  63#include <linux/namei.h>
  64#include <linux/mnt_namespace.h>
  65#include <linux/mm.h>
  66#include <linux/swap.h>
  67#include <linux/rcupdate.h>
  68#include <linux/kallsyms.h>
  69#include <linux/stacktrace.h>
  70#include <linux/resource.h>
  71#include <linux/module.h>
  72#include <linux/mount.h>
  73#include <linux/security.h>
  74#include <linux/ptrace.h>
  75#include <linux/tracehook.h>
 
  76#include <linux/cgroup.h>
  77#include <linux/cpuset.h>
  78#include <linux/audit.h>
  79#include <linux/poll.h>
  80#include <linux/nsproxy.h>
  81#include <linux/oom.h>
  82#include <linux/elf.h>
  83#include <linux/pid_namespace.h>
  84#include <linux/user_namespace.h>
  85#include <linux/fs_struct.h>
  86#include <linux/slab.h>
  87#include <linux/flex_array.h>
 
  88#ifdef CONFIG_HARDWALL
  89#include <asm/hardwall.h>
  90#endif
  91#include <trace/events/oom.h>
  92#include "internal.h"
 
  93
  94/* NOTE:
  95 *	Implementing inode permission operations in /proc is almost
  96 *	certainly an error.  Permission checks need to happen during
  97 *	each system call not at open time.  The reason is that most of
  98 *	what we wish to check for permissions in /proc varies at runtime.
  99 *
 100 *	The classic example of a problem is opening file descriptors
 101 *	in /proc for a task before it execs a suid executable.
 102 */
 103
 104struct pid_entry {
 105	char *name;
 106	int len;
 107	umode_t mode;
 108	const struct inode_operations *iop;
 109	const struct file_operations *fop;
 110	union proc_op op;
 111};
 112
 113#define NOD(NAME, MODE, IOP, FOP, OP) {			\
 114	.name = (NAME),					\
 115	.len  = sizeof(NAME) - 1,			\
 116	.mode = MODE,					\
 117	.iop  = IOP,					\
 118	.fop  = FOP,					\
 119	.op   = OP,					\
 120}
 121
 122#define DIR(NAME, MODE, iops, fops)	\
 123	NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} )
 124#define LNK(NAME, get_link)					\
 125	NOD(NAME, (S_IFLNK|S_IRWXUGO),				\
 126		&proc_pid_link_inode_operations, NULL,		\
 127		{ .proc_get_link = get_link } )
 128#define REG(NAME, MODE, fops)				\
 129	NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
 130#define INF(NAME, MODE, read)				\
 131	NOD(NAME, (S_IFREG|(MODE)), 			\
 132		NULL, &proc_info_file_operations,	\
 133		{ .proc_read = read } )
 134#define ONE(NAME, MODE, show)				\
 135	NOD(NAME, (S_IFREG|(MODE)), 			\
 136		NULL, &proc_single_file_operations,	\
 137		{ .proc_show = show } )
 138
 139static int proc_fd_permission(struct inode *inode, int mask);
 140
 141/*
 142 * Count the number of hardlinks for the pid_entry table, excluding the .
 143 * and .. links.
 144 */
 145static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
 146	unsigned int n)
 147{
 148	unsigned int i;
 149	unsigned int count;
 150
 151	count = 0;
 152	for (i = 0; i < n; ++i) {
 153		if (S_ISDIR(entries[i].mode))
 154			++count;
 155	}
 156
 157	return count;
 158}
 159
 160static int get_task_root(struct task_struct *task, struct path *root)
 161{
 162	int result = -ENOENT;
 163
 164	task_lock(task);
 165	if (task->fs) {
 166		get_fs_root(task->fs, root);
 167		result = 0;
 168	}
 169	task_unlock(task);
 170	return result;
 171}
 172
 173static int proc_cwd_link(struct dentry *dentry, struct path *path)
 174{
 175	struct task_struct *task = get_proc_task(dentry->d_inode);
 176	int result = -ENOENT;
 177
 178	if (task) {
 179		task_lock(task);
 180		if (task->fs) {
 181			get_fs_pwd(task->fs, path);
 182			result = 0;
 183		}
 184		task_unlock(task);
 185		put_task_struct(task);
 186	}
 187	return result;
 188}
 189
 190static int proc_root_link(struct dentry *dentry, struct path *path)
 191{
 192	struct task_struct *task = get_proc_task(dentry->d_inode);
 193	int result = -ENOENT;
 194
 195	if (task) {
 196		result = get_task_root(task, path);
 197		put_task_struct(task);
 198	}
 199	return result;
 200}
 201
 202static int proc_pid_cmdline(struct task_struct *task, char * buffer)
 
 203{
 204	int res = 0;
 205	unsigned int len;
 206	struct mm_struct *mm = get_task_mm(task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207	if (!mm)
 208		goto out;
 209	if (!mm->arg_end)
 210		goto out_mm;	/* Shh! No looking before we're done */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 211
 212 	len = mm->arg_end - mm->arg_start;
 213 
 214	if (len > PAGE_SIZE)
 215		len = PAGE_SIZE;
 216 
 217	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
 218
 219	// If the nul at the end of args has been overwritten, then
 220	// assume application is using setproctitle(3).
 221	if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
 222		len = strnlen(buffer, res);
 223		if (len < res) {
 224		    res = len;
 225		} else {
 226			len = mm->env_end - mm->env_start;
 227			if (len > PAGE_SIZE - res)
 228				len = PAGE_SIZE - res;
 229			res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
 230			res = strnlen(buffer, res);
 231		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232	}
 233out_mm:
 
 
 
 234	mmput(mm);
 235out:
 236	return res;
 
 237}
 238
 239static int proc_pid_auxv(struct task_struct *task, char *buffer)
 
 
 
 
 
 
 240{
 241	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
 242	int res = PTR_ERR(mm);
 243	if (mm && !IS_ERR(mm)) {
 244		unsigned int nwords = 0;
 245		do {
 246			nwords += 2;
 247		} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
 248		res = nwords * sizeof(mm->saved_auxv[0]);
 249		if (res > PAGE_SIZE)
 250			res = PAGE_SIZE;
 251		memcpy(buffer, mm->saved_auxv, res);
 252		mmput(mm);
 253	}
 254	return res;
 
 255}
 256
 257
 258#ifdef CONFIG_KALLSYMS
 259/*
 260 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
 261 * Returns the resolved symbol.  If that fails, simply return the address.
 262 */
 263static int proc_pid_wchan(struct task_struct *task, char *buffer)
 
 264{
 265	unsigned long wchan;
 266	char symname[KSYM_NAME_LEN];
 267
 268	wchan = get_wchan(task);
 269
 270	if (lookup_symbol_name(wchan, symname) < 0)
 271		if (!ptrace_may_access(task, PTRACE_MODE_READ))
 272			return 0;
 273		else
 274			return sprintf(buffer, "%lu", wchan);
 275	else
 276		return sprintf(buffer, "%s", symname);
 
 
 277}
 278#endif /* CONFIG_KALLSYMS */
 279
 280static int lock_trace(struct task_struct *task)
 281{
 282	int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
 283	if (err)
 284		return err;
 285	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
 286		mutex_unlock(&task->signal->cred_guard_mutex);
 287		return -EPERM;
 288	}
 289	return 0;
 290}
 291
 292static void unlock_trace(struct task_struct *task)
 293{
 294	mutex_unlock(&task->signal->cred_guard_mutex);
 295}
 296
 297#ifdef CONFIG_STACKTRACE
 298
 299#define MAX_STACK_TRACE_DEPTH	64
 300
 301static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
 302			  struct pid *pid, struct task_struct *task)
 303{
 304	struct stack_trace trace;
 305	unsigned long *entries;
 306	int err;
 307	int i;
 308
 309	entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
 310	if (!entries)
 311		return -ENOMEM;
 312
 313	trace.nr_entries	= 0;
 314	trace.max_entries	= MAX_STACK_TRACE_DEPTH;
 315	trace.entries		= entries;
 316	trace.skip		= 0;
 317
 318	err = lock_trace(task);
 319	if (!err) {
 320		save_stack_trace_tsk(task, &trace);
 321
 322		for (i = 0; i < trace.nr_entries; i++) {
 323			seq_printf(m, "[<%pK>] %pS\n",
 324				   (void *)entries[i], (void *)entries[i]);
 325		}
 326		unlock_trace(task);
 327	}
 328	kfree(entries);
 329
 330	return err;
 331}
 332#endif
 333
 334#ifdef CONFIG_SCHEDSTATS
 335/*
 336 * Provides /proc/PID/schedstat
 337 */
 338static int proc_pid_schedstat(struct task_struct *task, char *buffer)
 
 339{
 340	return sprintf(buffer, "%llu %llu %lu\n",
 341			(unsigned long long)task->se.sum_exec_runtime,
 342			(unsigned long long)task->sched_info.run_delay,
 343			task->sched_info.pcount);
 
 
 
 
 
 344}
 345#endif
 346
 347#ifdef CONFIG_LATENCYTOP
 348static int lstats_show_proc(struct seq_file *m, void *v)
 349{
 350	int i;
 351	struct inode *inode = m->private;
 352	struct task_struct *task = get_proc_task(inode);
 353
 354	if (!task)
 355		return -ESRCH;
 356	seq_puts(m, "Latency Top version : v0.1\n");
 357	for (i = 0; i < 32; i++) {
 358		struct latency_record *lr = &task->latency_record[i];
 359		if (lr->backtrace[0]) {
 360			int q;
 361			seq_printf(m, "%i %li %li",
 362				   lr->count, lr->time, lr->max);
 363			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 364				unsigned long bt = lr->backtrace[q];
 365				if (!bt)
 366					break;
 367				if (bt == ULONG_MAX)
 368					break;
 369				seq_printf(m, " %ps", (void *)bt);
 370			}
 371			seq_putc(m, '\n');
 372		}
 373
 374	}
 375	put_task_struct(task);
 376	return 0;
 377}
 378
 379static int lstats_open(struct inode *inode, struct file *file)
 380{
 381	return single_open(file, lstats_show_proc, inode);
 382}
 383
 384static ssize_t lstats_write(struct file *file, const char __user *buf,
 385			    size_t count, loff_t *offs)
 386{
 387	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
 388
 389	if (!task)
 390		return -ESRCH;
 391	clear_all_latency_tracing(task);
 392	put_task_struct(task);
 393
 394	return count;
 395}
 396
 397static const struct file_operations proc_lstats_operations = {
 398	.open		= lstats_open,
 399	.read		= seq_read,
 400	.write		= lstats_write,
 401	.llseek		= seq_lseek,
 402	.release	= single_release,
 403};
 404
 405#endif
 406
 407static int proc_oom_score(struct task_struct *task, char *buffer)
 
 408{
 409	unsigned long totalpages = totalram_pages + total_swap_pages;
 410	unsigned long points = 0;
 411
 412	read_lock(&tasklist_lock);
 413	if (pid_alive(task))
 414		points = oom_badness(task, NULL, NULL, totalpages) *
 415						1000 / totalpages;
 416	read_unlock(&tasklist_lock);
 417	return sprintf(buffer, "%lu\n", points);
 
 
 418}
 419
 420struct limit_names {
 421	char *name;
 422	char *unit;
 423};
 424
 425static const struct limit_names lnames[RLIM_NLIMITS] = {
 426	[RLIMIT_CPU] = {"Max cpu time", "seconds"},
 427	[RLIMIT_FSIZE] = {"Max file size", "bytes"},
 428	[RLIMIT_DATA] = {"Max data size", "bytes"},
 429	[RLIMIT_STACK] = {"Max stack size", "bytes"},
 430	[RLIMIT_CORE] = {"Max core file size", "bytes"},
 431	[RLIMIT_RSS] = {"Max resident set", "bytes"},
 432	[RLIMIT_NPROC] = {"Max processes", "processes"},
 433	[RLIMIT_NOFILE] = {"Max open files", "files"},
 434	[RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"},
 435	[RLIMIT_AS] = {"Max address space", "bytes"},
 436	[RLIMIT_LOCKS] = {"Max file locks", "locks"},
 437	[RLIMIT_SIGPENDING] = {"Max pending signals", "signals"},
 438	[RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"},
 439	[RLIMIT_NICE] = {"Max nice priority", NULL},
 440	[RLIMIT_RTPRIO] = {"Max realtime priority", NULL},
 441	[RLIMIT_RTTIME] = {"Max realtime timeout", "us"},
 442};
 443
 444/* Display limits for a process */
 445static int proc_pid_limits(struct task_struct *task, char *buffer)
 
 446{
 447	unsigned int i;
 448	int count = 0;
 449	unsigned long flags;
 450	char *bufptr = buffer;
 451
 452	struct rlimit rlim[RLIM_NLIMITS];
 453
 454	if (!lock_task_sighand(task, &flags))
 455		return 0;
 456	memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
 457	unlock_task_sighand(task, &flags);
 458
 459	/*
 460	 * print the file header
 461	 */
 462	count += sprintf(&bufptr[count], "%-25s %-20s %-20s %-10s\n",
 463			"Limit", "Soft Limit", "Hard Limit", "Units");
 464
 465	for (i = 0; i < RLIM_NLIMITS; i++) {
 466		if (rlim[i].rlim_cur == RLIM_INFINITY)
 467			count += sprintf(&bufptr[count], "%-25s %-20s ",
 468					 lnames[i].name, "unlimited");
 469		else
 470			count += sprintf(&bufptr[count], "%-25s %-20lu ",
 471					 lnames[i].name, rlim[i].rlim_cur);
 472
 473		if (rlim[i].rlim_max == RLIM_INFINITY)
 474			count += sprintf(&bufptr[count], "%-20s ", "unlimited");
 475		else
 476			count += sprintf(&bufptr[count], "%-20lu ",
 477					 rlim[i].rlim_max);
 478
 479		if (lnames[i].unit)
 480			count += sprintf(&bufptr[count], "%-10s\n",
 481					 lnames[i].unit);
 482		else
 483			count += sprintf(&bufptr[count], "\n");
 484	}
 485
 486	return count;
 487}
 488
 489#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 490static int proc_pid_syscall(struct task_struct *task, char *buffer)
 
 491{
 492	long nr;
 493	unsigned long args[6], sp, pc;
 494	int res = lock_trace(task);
 
 
 495	if (res)
 496		return res;
 497
 498	if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
 499		res = sprintf(buffer, "running\n");
 500	else if (nr < 0)
 501		res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
 502	else
 503		res = sprintf(buffer,
 504		       "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
 505		       nr,
 506		       args[0], args[1], args[2], args[3], args[4], args[5],
 507		       sp, pc);
 508	unlock_trace(task);
 509	return res;
 
 510}
 511#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
 512
 513/************************************************************************/
 514/*                       Here the fs part begins                        */
 515/************************************************************************/
 516
 517/* permission checks */
 518static int proc_fd_access_allowed(struct inode *inode)
 519{
 520	struct task_struct *task;
 521	int allowed = 0;
 522	/* Allow access to a task's file descriptors if it is us or we
 523	 * may use ptrace attach to the process and find out that
 524	 * information.
 525	 */
 526	task = get_proc_task(inode);
 527	if (task) {
 528		allowed = ptrace_may_access(task, PTRACE_MODE_READ);
 529		put_task_struct(task);
 530	}
 531	return allowed;
 532}
 533
 534int proc_setattr(struct dentry *dentry, struct iattr *attr)
 535{
 536	int error;
 537	struct inode *inode = dentry->d_inode;
 538
 539	if (attr->ia_valid & ATTR_MODE)
 540		return -EPERM;
 541
 542	error = inode_change_ok(inode, attr);
 543	if (error)
 544		return error;
 545
 546	if ((attr->ia_valid & ATTR_SIZE) &&
 547	    attr->ia_size != i_size_read(inode)) {
 548		error = vmtruncate(inode, attr->ia_size);
 549		if (error)
 550			return error;
 551	}
 552
 553	setattr_copy(inode, attr);
 554	mark_inode_dirty(inode);
 555	return 0;
 556}
 557
 558/*
 559 * May current process learn task's sched/cmdline info (for hide_pid_min=1)
 560 * or euid/egid (for hide_pid_min=2)?
 561 */
 562static bool has_pid_permissions(struct pid_namespace *pid,
 563				 struct task_struct *task,
 564				 int hide_pid_min)
 565{
 566	if (pid->hide_pid < hide_pid_min)
 567		return true;
 568	if (in_group_p(pid->pid_gid))
 569		return true;
 570	return ptrace_may_access(task, PTRACE_MODE_READ);
 571}
 572
 573
 574static int proc_pid_permission(struct inode *inode, int mask)
 575{
 576	struct pid_namespace *pid = inode->i_sb->s_fs_info;
 577	struct task_struct *task;
 578	bool has_perms;
 579
 580	task = get_proc_task(inode);
 581	if (!task)
 582		return -ESRCH;
 583	has_perms = has_pid_permissions(pid, task, 1);
 584	put_task_struct(task);
 585
 586	if (!has_perms) {
 587		if (pid->hide_pid == 2) {
 588			/*
 589			 * Let's make getdents(), stat(), and open()
 590			 * consistent with each other.  If a process
 591			 * may not stat() a file, it shouldn't be seen
 592			 * in procfs at all.
 593			 */
 594			return -ENOENT;
 595		}
 596
 597		return -EPERM;
 598	}
 599	return generic_permission(inode, mask);
 600}
 601
 602
 603
 604static const struct inode_operations proc_def_inode_operations = {
 605	.setattr	= proc_setattr,
 606};
 607
 608#define PROC_BLOCK_SIZE	(3*1024)		/* 4K page size but our output routines use some slack for overruns */
 609
 610static ssize_t proc_info_read(struct file * file, char __user * buf,
 611			  size_t count, loff_t *ppos)
 612{
 613	struct inode * inode = file->f_path.dentry->d_inode;
 614	unsigned long page;
 615	ssize_t length;
 616	struct task_struct *task = get_proc_task(inode);
 617
 618	length = -ESRCH;
 619	if (!task)
 620		goto out_no_task;
 621
 622	if (count > PROC_BLOCK_SIZE)
 623		count = PROC_BLOCK_SIZE;
 624
 625	length = -ENOMEM;
 626	if (!(page = __get_free_page(GFP_TEMPORARY)))
 627		goto out;
 628
 629	length = PROC_I(inode)->op.proc_read(task, (char*)page);
 630
 631	if (length >= 0)
 632		length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
 633	free_page(page);
 634out:
 635	put_task_struct(task);
 636out_no_task:
 637	return length;
 638}
 639
 640static const struct file_operations proc_info_file_operations = {
 641	.read		= proc_info_read,
 642	.llseek		= generic_file_llseek,
 643};
 644
 645static int proc_single_show(struct seq_file *m, void *v)
 646{
 647	struct inode *inode = m->private;
 648	struct pid_namespace *ns;
 649	struct pid *pid;
 650	struct task_struct *task;
 651	int ret;
 652
 653	ns = inode->i_sb->s_fs_info;
 654	pid = proc_pid(inode);
 655	task = get_pid_task(pid, PIDTYPE_PID);
 656	if (!task)
 657		return -ESRCH;
 658
 659	ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
 660
 661	put_task_struct(task);
 662	return ret;
 663}
 664
 665static int proc_single_open(struct inode *inode, struct file *filp)
 666{
 667	return single_open(filp, proc_single_show, inode);
 668}
 669
 670static const struct file_operations proc_single_file_operations = {
 671	.open		= proc_single_open,
 672	.read		= seq_read,
 673	.llseek		= seq_lseek,
 674	.release	= single_release,
 675};
 676
 677static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
 
 678{
 679	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
 680	struct mm_struct *mm;
 
 
 
 
 
 
 
 
 
 
 
 
 681
 682	if (!task)
 683		return -ESRCH;
 684
 685	mm = mm_access(task, mode);
 686	put_task_struct(task);
 
 687
 688	if (IS_ERR(mm))
 689		return PTR_ERR(mm);
 690
 691	if (mm) {
 692		/* ensure this mm_struct can't be freed */
 693		atomic_inc(&mm->mm_count);
 694		/* but do not pin its memory */
 695		mmput(mm);
 696	}
 697
 698	/* OK to pass negative loff_t, we can catch out-of-range */
 699	file->f_mode |= FMODE_UNSIGNED_OFFSET;
 700	file->private_data = mm;
 701
 702	return 0;
 703}
 704
 705static int mem_open(struct inode *inode, struct file *file)
 706{
 707	return __mem_open(inode, file, PTRACE_MODE_ATTACH);
 
 
 
 
 
 708}
 709
 710static ssize_t mem_rw(struct file *file, char __user *buf,
 711			size_t count, loff_t *ppos, int write)
 712{
 713	struct mm_struct *mm = file->private_data;
 714	unsigned long addr = *ppos;
 715	ssize_t copied;
 716	char *page;
 717
 718	if (!mm)
 719		return 0;
 720
 721	page = (char *)__get_free_page(GFP_TEMPORARY);
 722	if (!page)
 723		return -ENOMEM;
 724
 725	copied = 0;
 726	if (!atomic_inc_not_zero(&mm->mm_users))
 727		goto free;
 728
 729	while (count > 0) {
 730		int this_len = min_t(int, count, PAGE_SIZE);
 731
 732		if (write && copy_from_user(page, buf, this_len)) {
 733			copied = -EFAULT;
 734			break;
 735		}
 736
 737		this_len = access_remote_vm(mm, addr, page, this_len, write);
 738		if (!this_len) {
 739			if (!copied)
 740				copied = -EIO;
 741			break;
 742		}
 743
 744		if (!write && copy_to_user(buf, page, this_len)) {
 745			copied = -EFAULT;
 746			break;
 747		}
 748
 749		buf += this_len;
 750		addr += this_len;
 751		copied += this_len;
 752		count -= this_len;
 753	}
 754	*ppos = addr;
 755
 756	mmput(mm);
 757free:
 758	free_page((unsigned long) page);
 759	return copied;
 760}
 761
 762static ssize_t mem_read(struct file *file, char __user *buf,
 763			size_t count, loff_t *ppos)
 764{
 765	return mem_rw(file, buf, count, ppos, 0);
 766}
 767
 768static ssize_t mem_write(struct file *file, const char __user *buf,
 769			 size_t count, loff_t *ppos)
 770{
 771	return mem_rw(file, (char __user*)buf, count, ppos, 1);
 772}
 773
 774loff_t mem_lseek(struct file *file, loff_t offset, int orig)
 775{
 776	switch (orig) {
 777	case 0:
 778		file->f_pos = offset;
 779		break;
 780	case 1:
 781		file->f_pos += offset;
 782		break;
 783	default:
 784		return -EINVAL;
 785	}
 786	force_successful_syscall_return();
 787	return file->f_pos;
 788}
 789
 790static int mem_release(struct inode *inode, struct file *file)
 791{
 792	struct mm_struct *mm = file->private_data;
 793	if (mm)
 794		mmdrop(mm);
 795	return 0;
 796}
 797
 798static const struct file_operations proc_mem_operations = {
 799	.llseek		= mem_lseek,
 800	.read		= mem_read,
 801	.write		= mem_write,
 802	.open		= mem_open,
 803	.release	= mem_release,
 804};
 805
 806static int environ_open(struct inode *inode, struct file *file)
 807{
 808	return __mem_open(inode, file, PTRACE_MODE_READ);
 809}
 810
 811static ssize_t environ_read(struct file *file, char __user *buf,
 812			size_t count, loff_t *ppos)
 813{
 814	char *page;
 815	unsigned long src = *ppos;
 816	int ret = 0;
 817	struct mm_struct *mm = file->private_data;
 
 818
 819	if (!mm)
 
 820		return 0;
 821
 822	page = (char *)__get_free_page(GFP_TEMPORARY);
 823	if (!page)
 824		return -ENOMEM;
 825
 826	ret = 0;
 827	if (!atomic_inc_not_zero(&mm->mm_users))
 828		goto free;
 
 
 
 
 
 
 829	while (count > 0) {
 830		int this_len, retval, max_len;
 
 831
 832		this_len = mm->env_end - (mm->env_start + src);
 
 833
 834		if (this_len <= 0)
 835			break;
 836
 837		max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
 838		this_len = (this_len > max_len) ? max_len : this_len;
 839
 840		retval = access_remote_vm(mm, (mm->env_start + src),
 841			page, this_len, 0);
 842
 843		if (retval <= 0) {
 844			ret = retval;
 845			break;
 846		}
 847
 848		if (copy_to_user(buf, page, retval)) {
 849			ret = -EFAULT;
 850			break;
 851		}
 852
 853		ret += retval;
 854		src += retval;
 855		buf += retval;
 856		count -= retval;
 857	}
 858	*ppos = src;
 859	mmput(mm);
 860
 861free:
 862	free_page((unsigned long) page);
 863	return ret;
 864}
 865
 866static const struct file_operations proc_environ_operations = {
 867	.open		= environ_open,
 868	.read		= environ_read,
 869	.llseek		= generic_file_llseek,
 870	.release	= mem_release,
 871};
 872
 873static ssize_t oom_adjust_read(struct file *file, char __user *buf,
 874				size_t count, loff_t *ppos)
 875{
 876	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
 877	char buffer[PROC_NUMBUF];
 
 878	size_t len;
 879	int oom_adjust = OOM_DISABLE;
 880	unsigned long flags;
 881
 882	if (!task)
 883		return -ESRCH;
 884
 885	if (lock_task_sighand(task, &flags)) {
 886		oom_adjust = task->signal->oom_adj;
 
 
 
 
 887		unlock_task_sighand(task, &flags);
 888	}
 889
 890	put_task_struct(task);
 891
 892	len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
 893
 894	return simple_read_from_buffer(buf, count, ppos, buffer, len);
 895}
 896
 897static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
 898				size_t count, loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 899{
 900	struct task_struct *task;
 901	char buffer[PROC_NUMBUF];
 902	int oom_adjust;
 903	unsigned long flags;
 904	int err;
 905
 906	memset(buffer, 0, sizeof(buffer));
 907	if (count > sizeof(buffer) - 1)
 908		count = sizeof(buffer) - 1;
 909	if (copy_from_user(buffer, buf, count)) {
 910		err = -EFAULT;
 911		goto out;
 912	}
 913
 914	err = kstrtoint(strstrip(buffer), 0, &oom_adjust);
 915	if (err)
 916		goto out;
 917	if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
 918	     oom_adjust != OOM_DISABLE) {
 919		err = -EINVAL;
 920		goto out;
 921	}
 922
 923	task = get_proc_task(file->f_path.dentry->d_inode);
 924	if (!task) {
 925		err = -ESRCH;
 926		goto out;
 927	}
 928
 929	task_lock(task);
 930	if (!task->mm) {
 931		err = -EINVAL;
 932		goto err_task_lock;
 933	}
 934
 935	if (!lock_task_sighand(task, &flags)) {
 936		err = -ESRCH;
 937		goto err_task_lock;
 938	}
 939
 940	if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
 
 
 
 
 
 
 
 
 
 
 941		err = -EACCES;
 942		goto err_sighand;
 943	}
 944
 945	/*
 946	 * Warn that /proc/pid/oom_adj is deprecated, see
 947	 * Documentation/feature-removal-schedule.txt.
 948	 */
 949	printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
 950		  current->comm, task_pid_nr(current), task_pid_nr(task),
 951		  task_pid_nr(task));
 952	task->signal->oom_adj = oom_adjust;
 953	/*
 954	 * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
 955	 * value is always attainable.
 956	 */
 957	if (task->signal->oom_adj == OOM_ADJUST_MAX)
 958		task->signal->oom_score_adj = OOM_SCORE_ADJ_MAX;
 959	else
 960		task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
 961								-OOM_DISABLE;
 962	trace_oom_score_adj_update(task);
 963err_sighand:
 964	unlock_task_sighand(task, &flags);
 965err_task_lock:
 966	task_unlock(task);
 967	put_task_struct(task);
 968out:
 969	return err < 0 ? err : count;
 970}
 971
 972static const struct file_operations proc_oom_adjust_operations = {
 973	.read		= oom_adjust_read,
 974	.write		= oom_adjust_write,
 975	.llseek		= generic_file_llseek,
 976};
 977
 978static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
 979					size_t count, loff_t *ppos)
 980{
 981	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
 982	char buffer[PROC_NUMBUF];
 983	int oom_score_adj = OOM_SCORE_ADJ_MIN;
 984	unsigned long flags;
 985	size_t len;
 986
 987	if (!task)
 988		return -ESRCH;
 989	if (lock_task_sighand(task, &flags)) {
 990		oom_score_adj = task->signal->oom_score_adj;
 991		unlock_task_sighand(task, &flags);
 992	}
 993	put_task_struct(task);
 994	len = snprintf(buffer, sizeof(buffer), "%d\n", oom_score_adj);
 995	return simple_read_from_buffer(buf, count, ppos, buffer, len);
 996}
 997
 998static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
 999					size_t count, loff_t *ppos)
1000{
1001	struct task_struct *task;
1002	char buffer[PROC_NUMBUF];
1003	unsigned long flags;
1004	int oom_score_adj;
1005	int err;
1006
1007	memset(buffer, 0, sizeof(buffer));
1008	if (count > sizeof(buffer) - 1)
1009		count = sizeof(buffer) - 1;
1010	if (copy_from_user(buffer, buf, count)) {
1011		err = -EFAULT;
1012		goto out;
1013	}
1014
1015	err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
1016	if (err)
1017		goto out;
1018	if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
1019			oom_score_adj > OOM_SCORE_ADJ_MAX) {
1020		err = -EINVAL;
1021		goto out;
1022	}
1023
1024	task = get_proc_task(file->f_path.dentry->d_inode);
1025	if (!task) {
1026		err = -ESRCH;
1027		goto out;
1028	}
1029
1030	task_lock(task);
1031	if (!task->mm) {
1032		err = -EINVAL;
1033		goto err_task_lock;
1034	}
1035
1036	if (!lock_task_sighand(task, &flags)) {
1037		err = -ESRCH;
1038		goto err_task_lock;
1039	}
1040
1041	if (oom_score_adj < task->signal->oom_score_adj_min &&
1042			!capable(CAP_SYS_RESOURCE)) {
1043		err = -EACCES;
1044		goto err_sighand;
1045	}
1046
1047	task->signal->oom_score_adj = oom_score_adj;
1048	if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
1049		task->signal->oom_score_adj_min = oom_score_adj;
1050	trace_oom_score_adj_update(task);
1051	/*
1052	 * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
1053	 * always attainable.
1054	 */
1055	if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
1056		task->signal->oom_adj = OOM_DISABLE;
1057	else
1058		task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) /
1059							OOM_SCORE_ADJ_MAX;
1060err_sighand:
1061	unlock_task_sighand(task, &flags);
1062err_task_lock:
1063	task_unlock(task);
1064	put_task_struct(task);
1065out:
1066	return err < 0 ? err : count;
1067}
1068
1069static const struct file_operations proc_oom_score_adj_operations = {
1070	.read		= oom_score_adj_read,
1071	.write		= oom_score_adj_write,
1072	.llseek		= default_llseek,
1073};
1074
1075#ifdef CONFIG_AUDITSYSCALL
1076#define TMPBUFLEN 21
1077static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
1078				  size_t count, loff_t *ppos)
1079{
1080	struct inode * inode = file->f_path.dentry->d_inode;
1081	struct task_struct *task = get_proc_task(inode);
1082	ssize_t length;
1083	char tmpbuf[TMPBUFLEN];
1084
1085	if (!task)
1086		return -ESRCH;
1087	length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1088				audit_get_loginuid(task));
 
1089	put_task_struct(task);
1090	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1091}
1092
1093static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1094				   size_t count, loff_t *ppos)
1095{
1096	struct inode * inode = file->f_path.dentry->d_inode;
1097	char *page, *tmp;
1098	ssize_t length;
1099	uid_t loginuid;
 
 
1100
1101	rcu_read_lock();
1102	if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
1103		rcu_read_unlock();
1104		return -EPERM;
1105	}
1106	rcu_read_unlock();
1107
1108	if (count >= PAGE_SIZE)
1109		count = PAGE_SIZE - 1;
1110
1111	if (*ppos != 0) {
1112		/* No partial writes. */
1113		return -EINVAL;
1114	}
1115	page = (char*)__get_free_page(GFP_TEMPORARY);
1116	if (!page)
1117		return -ENOMEM;
1118	length = -EFAULT;
1119	if (copy_from_user(page, buf, count))
1120		goto out_free_page;
1121
1122	page[count] = '\0';
1123	loginuid = simple_strtoul(page, &tmp, 10);
1124	if (tmp == page) {
1125		length = -EINVAL;
1126		goto out_free_page;
1127
1128	}
1129	length = audit_set_loginuid(loginuid);
1130	if (likely(length == 0))
1131		length = count;
1132
1133out_free_page:
1134	free_page((unsigned long) page);
1135	return length;
 
 
 
1136}
1137
1138static const struct file_operations proc_loginuid_operations = {
1139	.read		= proc_loginuid_read,
1140	.write		= proc_loginuid_write,
1141	.llseek		= generic_file_llseek,
1142};
1143
1144static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
1145				  size_t count, loff_t *ppos)
1146{
1147	struct inode * inode = file->f_path.dentry->d_inode;
1148	struct task_struct *task = get_proc_task(inode);
1149	ssize_t length;
1150	char tmpbuf[TMPBUFLEN];
1151
1152	if (!task)
1153		return -ESRCH;
1154	length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1155				audit_get_sessionid(task));
1156	put_task_struct(task);
1157	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1158}
1159
1160static const struct file_operations proc_sessionid_operations = {
1161	.read		= proc_sessionid_read,
1162	.llseek		= generic_file_llseek,
1163};
1164#endif
1165
1166#ifdef CONFIG_FAULT_INJECTION
1167static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
1168				      size_t count, loff_t *ppos)
1169{
1170	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
1171	char buffer[PROC_NUMBUF];
1172	size_t len;
1173	int make_it_fail;
1174
1175	if (!task)
1176		return -ESRCH;
1177	make_it_fail = task->make_it_fail;
1178	put_task_struct(task);
1179
1180	len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
1181
1182	return simple_read_from_buffer(buf, count, ppos, buffer, len);
1183}
1184
1185static ssize_t proc_fault_inject_write(struct file * file,
1186			const char __user * buf, size_t count, loff_t *ppos)
1187{
1188	struct task_struct *task;
1189	char buffer[PROC_NUMBUF], *end;
1190	int make_it_fail;
 
1191
1192	if (!capable(CAP_SYS_RESOURCE))
1193		return -EPERM;
1194	memset(buffer, 0, sizeof(buffer));
1195	if (count > sizeof(buffer) - 1)
1196		count = sizeof(buffer) - 1;
1197	if (copy_from_user(buffer, buf, count))
1198		return -EFAULT;
1199	make_it_fail = simple_strtol(strstrip(buffer), &end, 0);
1200	if (*end)
 
 
1201		return -EINVAL;
1202	task = get_proc_task(file->f_dentry->d_inode);
 
1203	if (!task)
1204		return -ESRCH;
1205	task->make_it_fail = make_it_fail;
1206	put_task_struct(task);
1207
1208	return count;
1209}
1210
1211static const struct file_operations proc_fault_inject_operations = {
1212	.read		= proc_fault_inject_read,
1213	.write		= proc_fault_inject_write,
1214	.llseek		= generic_file_llseek,
1215};
1216#endif
1217
1218
1219#ifdef CONFIG_SCHED_DEBUG
1220/*
1221 * Print out various scheduling related per-task fields:
1222 */
1223static int sched_show(struct seq_file *m, void *v)
1224{
1225	struct inode *inode = m->private;
1226	struct task_struct *p;
1227
1228	p = get_proc_task(inode);
1229	if (!p)
1230		return -ESRCH;
1231	proc_sched_show_task(p, m);
1232
1233	put_task_struct(p);
1234
1235	return 0;
1236}
1237
1238static ssize_t
1239sched_write(struct file *file, const char __user *buf,
1240	    size_t count, loff_t *offset)
1241{
1242	struct inode *inode = file->f_path.dentry->d_inode;
1243	struct task_struct *p;
1244
1245	p = get_proc_task(inode);
1246	if (!p)
1247		return -ESRCH;
1248	proc_sched_set_task(p);
1249
1250	put_task_struct(p);
1251
1252	return count;
1253}
1254
1255static int sched_open(struct inode *inode, struct file *filp)
1256{
1257	return single_open(filp, sched_show, inode);
1258}
1259
1260static const struct file_operations proc_pid_sched_operations = {
1261	.open		= sched_open,
1262	.read		= seq_read,
1263	.write		= sched_write,
1264	.llseek		= seq_lseek,
1265	.release	= single_release,
1266};
1267
1268#endif
1269
1270#ifdef CONFIG_SCHED_AUTOGROUP
1271/*
1272 * Print out autogroup related information:
1273 */
1274static int sched_autogroup_show(struct seq_file *m, void *v)
1275{
1276	struct inode *inode = m->private;
1277	struct task_struct *p;
1278
1279	p = get_proc_task(inode);
1280	if (!p)
1281		return -ESRCH;
1282	proc_sched_autogroup_show_task(p, m);
1283
1284	put_task_struct(p);
1285
1286	return 0;
1287}
1288
1289static ssize_t
1290sched_autogroup_write(struct file *file, const char __user *buf,
1291	    size_t count, loff_t *offset)
1292{
1293	struct inode *inode = file->f_path.dentry->d_inode;
1294	struct task_struct *p;
1295	char buffer[PROC_NUMBUF];
1296	int nice;
1297	int err;
1298
1299	memset(buffer, 0, sizeof(buffer));
1300	if (count > sizeof(buffer) - 1)
1301		count = sizeof(buffer) - 1;
1302	if (copy_from_user(buffer, buf, count))
1303		return -EFAULT;
1304
1305	err = kstrtoint(strstrip(buffer), 0, &nice);
1306	if (err < 0)
1307		return err;
1308
1309	p = get_proc_task(inode);
1310	if (!p)
1311		return -ESRCH;
1312
1313	err = proc_sched_autogroup_set_nice(p, nice);
1314	if (err)
1315		count = err;
1316
1317	put_task_struct(p);
1318
1319	return count;
1320}
1321
1322static int sched_autogroup_open(struct inode *inode, struct file *filp)
1323{
1324	int ret;
1325
1326	ret = single_open(filp, sched_autogroup_show, NULL);
1327	if (!ret) {
1328		struct seq_file *m = filp->private_data;
1329
1330		m->private = inode;
1331	}
1332	return ret;
1333}
1334
1335static const struct file_operations proc_pid_sched_autogroup_operations = {
1336	.open		= sched_autogroup_open,
1337	.read		= seq_read,
1338	.write		= sched_autogroup_write,
1339	.llseek		= seq_lseek,
1340	.release	= single_release,
1341};
1342
1343#endif /* CONFIG_SCHED_AUTOGROUP */
1344
1345static ssize_t comm_write(struct file *file, const char __user *buf,
1346				size_t count, loff_t *offset)
1347{
1348	struct inode *inode = file->f_path.dentry->d_inode;
1349	struct task_struct *p;
1350	char buffer[TASK_COMM_LEN];
 
1351
1352	memset(buffer, 0, sizeof(buffer));
1353	if (count > sizeof(buffer) - 1)
1354		count = sizeof(buffer) - 1;
1355	if (copy_from_user(buffer, buf, count))
1356		return -EFAULT;
1357
1358	p = get_proc_task(inode);
1359	if (!p)
1360		return -ESRCH;
1361
1362	if (same_thread_group(current, p))
1363		set_task_comm(p, buffer);
1364	else
1365		count = -EINVAL;
1366
1367	put_task_struct(p);
1368
1369	return count;
1370}
1371
1372static int comm_show(struct seq_file *m, void *v)
1373{
1374	struct inode *inode = m->private;
1375	struct task_struct *p;
1376
1377	p = get_proc_task(inode);
1378	if (!p)
1379		return -ESRCH;
1380
1381	task_lock(p);
1382	seq_printf(m, "%s\n", p->comm);
1383	task_unlock(p);
1384
1385	put_task_struct(p);
1386
1387	return 0;
1388}
1389
1390static int comm_open(struct inode *inode, struct file *filp)
1391{
1392	return single_open(filp, comm_show, inode);
1393}
1394
1395static const struct file_operations proc_pid_set_comm_operations = {
1396	.open		= comm_open,
1397	.read		= seq_read,
1398	.write		= comm_write,
1399	.llseek		= seq_lseek,
1400	.release	= single_release,
1401};
1402
1403static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
1404{
1405	struct task_struct *task;
1406	struct mm_struct *mm;
1407	struct file *exe_file;
1408
1409	task = get_proc_task(dentry->d_inode);
1410	if (!task)
1411		return -ENOENT;
1412	mm = get_task_mm(task);
1413	put_task_struct(task);
1414	if (!mm)
1415		return -ENOENT;
1416	exe_file = get_mm_exe_file(mm);
1417	mmput(mm);
1418	if (exe_file) {
1419		*exe_path = exe_file->f_path;
1420		path_get(&exe_file->f_path);
1421		fput(exe_file);
1422		return 0;
1423	} else
1424		return -ENOENT;
1425}
1426
1427static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 
 
1428{
1429	struct inode *inode = dentry->d_inode;
1430	int error = -EACCES;
1431
1432	/* We don't need a base pointer in the /proc filesystem */
1433	path_put(&nd->path);
1434
1435	/* Are we allowed to snoop on the tasks file descriptors? */
1436	if (!proc_fd_access_allowed(inode))
1437		goto out;
1438
1439	error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
 
 
 
 
 
1440out:
1441	return ERR_PTR(error);
1442}
1443
1444static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1445{
1446	char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
1447	char *pathname;
1448	int len;
1449
1450	if (!tmp)
1451		return -ENOMEM;
1452
1453	pathname = d_path(path, tmp, PAGE_SIZE);
1454	len = PTR_ERR(pathname);
1455	if (IS_ERR(pathname))
1456		goto out;
1457	len = tmp + PAGE_SIZE - 1 - pathname;
1458
1459	if (len > buflen)
1460		len = buflen;
1461	if (copy_to_user(buffer, pathname, len))
1462		len = -EFAULT;
1463 out:
1464	free_page((unsigned long)tmp);
1465	return len;
1466}
1467
1468static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
1469{
1470	int error = -EACCES;
1471	struct inode *inode = dentry->d_inode;
1472	struct path path;
1473
1474	/* Are we allowed to snoop on the tasks file descriptors? */
1475	if (!proc_fd_access_allowed(inode))
1476		goto out;
1477
1478	error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1479	if (error)
1480		goto out;
1481
1482	error = do_proc_readlink(&path, buffer, buflen);
1483	path_put(&path);
1484out:
1485	return error;
1486}
1487
1488static const struct inode_operations proc_pid_link_inode_operations = {
1489	.readlink	= proc_pid_readlink,
1490	.follow_link	= proc_pid_follow_link,
1491	.setattr	= proc_setattr,
1492};
1493
1494
1495/* building an inode */
1496
1497static int task_dumpable(struct task_struct *task)
1498{
1499	int dumpable = 0;
1500	struct mm_struct *mm;
1501
1502	task_lock(task);
1503	mm = task->mm;
1504	if (mm)
1505		dumpable = get_dumpable(mm);
1506	task_unlock(task);
1507	if(dumpable == 1)
1508		return 1;
1509	return 0;
1510}
1511
1512struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1513{
1514	struct inode * inode;
1515	struct proc_inode *ei;
1516	const struct cred *cred;
1517
1518	/* We need a new inode */
1519
1520	inode = new_inode(sb);
1521	if (!inode)
1522		goto out;
1523
1524	/* Common stuff */
1525	ei = PROC_I(inode);
1526	inode->i_ino = get_next_ino();
1527	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1528	inode->i_op = &proc_def_inode_operations;
1529
1530	/*
1531	 * grab the reference to task.
1532	 */
1533	ei->pid = get_task_pid(task, PIDTYPE_PID);
1534	if (!ei->pid)
1535		goto out_unlock;
1536
1537	if (task_dumpable(task)) {
1538		rcu_read_lock();
1539		cred = __task_cred(task);
1540		inode->i_uid = cred->euid;
1541		inode->i_gid = cred->egid;
1542		rcu_read_unlock();
1543	}
1544	security_task_to_inode(task, inode);
1545
1546out:
1547	return inode;
1548
1549out_unlock:
1550	iput(inode);
1551	return NULL;
1552}
1553
1554int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1555{
1556	struct inode *inode = dentry->d_inode;
1557	struct task_struct *task;
1558	const struct cred *cred;
1559	struct pid_namespace *pid = dentry->d_sb->s_fs_info;
1560
1561	generic_fillattr(inode, stat);
1562
1563	rcu_read_lock();
1564	stat->uid = GLOBAL_ROOT_UID;
1565	stat->gid = GLOBAL_ROOT_GID;
1566	task = pid_task(proc_pid(inode), PIDTYPE_PID);
1567	if (task) {
1568		if (!has_pid_permissions(pid, task, 2)) {
1569			rcu_read_unlock();
1570			/*
1571			 * This doesn't prevent learning whether PID exists,
1572			 * it only makes getattr() consistent with readdir().
1573			 */
1574			return -ENOENT;
1575		}
1576		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1577		    task_dumpable(task)) {
1578			cred = __task_cred(task);
1579			stat->uid = cred->euid;
1580			stat->gid = cred->egid;
1581		}
1582	}
1583	rcu_read_unlock();
1584	return 0;
1585}
1586
1587/* dentry stuff */
1588
1589/*
1590 *	Exceptional case: normally we are not allowed to unhash a busy
1591 * directory. In this case, however, we can do it - no aliasing problems
1592 * due to the way we treat inodes.
1593 *
1594 * Rewrite the inode's ownerships here because the owning task may have
1595 * performed a setuid(), etc.
1596 *
1597 * Before the /proc/pid/status file was created the only way to read
1598 * the effective uid of a /process was to stat /proc/pid.  Reading
1599 * /proc/pid/status is slow enough that procps and other packages
1600 * kept stating /proc/pid.  To keep the rules in /proc simple I have
1601 * made this apply to all per process world readable and executable
1602 * directories.
1603 */
1604int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1605{
1606	struct inode *inode;
1607	struct task_struct *task;
1608	const struct cred *cred;
1609
1610	if (nd && nd->flags & LOOKUP_RCU)
1611		return -ECHILD;
1612
1613	inode = dentry->d_inode;
1614	task = get_proc_task(inode);
1615
1616	if (task) {
1617		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1618		    task_dumpable(task)) {
1619			rcu_read_lock();
1620			cred = __task_cred(task);
1621			inode->i_uid = cred->euid;
1622			inode->i_gid = cred->egid;
1623			rcu_read_unlock();
1624		} else {
1625			inode->i_uid = GLOBAL_ROOT_UID;
1626			inode->i_gid = GLOBAL_ROOT_GID;
1627		}
1628		inode->i_mode &= ~(S_ISUID | S_ISGID);
1629		security_task_to_inode(task, inode);
1630		put_task_struct(task);
1631		return 1;
1632	}
1633	d_drop(dentry);
1634	return 0;
1635}
1636
1637static int pid_delete_dentry(const struct dentry * dentry)
 
 
 
 
 
1638{
1639	/* Is the task we represent dead?
1640	 * If so, then don't put the dentry on the lru list,
1641	 * kill it immediately.
1642	 */
1643	return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1644}
1645
1646const struct dentry_operations pid_dentry_operations =
1647{
1648	.d_revalidate	= pid_revalidate,
1649	.d_delete	= pid_delete_dentry,
1650};
1651
1652/* Lookups */
1653
1654/*
1655 * Fill a directory entry.
1656 *
1657 * If possible create the dcache entry and derive our inode number and
1658 * file type from dcache entry.
1659 *
1660 * Since all of the proc inode numbers are dynamically generated, the inode
1661 * numbers do not exist until the inode is cache.  This means creating the
1662 * the dcache entry in readdir is necessary to keep the inode numbers
1663 * reported by readdir in sync with the inode numbers reported
1664 * by stat.
1665 */
1666int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1667	const char *name, int len,
1668	instantiate_t instantiate, struct task_struct *task, const void *ptr)
1669{
1670	struct dentry *child, *dir = filp->f_path.dentry;
 
1671	struct inode *inode;
1672	struct qstr qname;
1673	ino_t ino = 0;
1674	unsigned type = DT_UNKNOWN;
1675
1676	qname.name = name;
1677	qname.len  = len;
1678	qname.hash = full_name_hash(name, len);
1679
1680	child = d_lookup(dir, &qname);
1681	if (!child) {
1682		struct dentry *new;
1683		new = d_alloc(dir, &qname);
1684		if (new) {
1685			child = instantiate(dir->d_inode, new, task, ptr);
1686			if (child)
1687				dput(new);
1688			else
1689				child = new;
1690		}
1691	}
1692	if (!child || IS_ERR(child) || !child->d_inode)
1693		goto end_instantiate;
1694	inode = child->d_inode;
1695	if (inode) {
1696		ino = inode->i_ino;
1697		type = inode->i_mode >> 12;
1698	}
 
 
 
1699	dput(child);
 
 
1700end_instantiate:
1701	if (!ino)
1702		ino = find_inode_number(dir, &qname);
1703	if (!ino)
1704		ino = 1;
1705	return filldir(dirent, name, len, filp->f_pos, ino, type);
1706}
1707
1708static unsigned name_to_int(struct dentry *dentry)
1709{
1710	const char *name = dentry->d_name.name;
1711	int len = dentry->d_name.len;
1712	unsigned n = 0;
1713
1714	if (len > 1 && *name == '0')
1715		goto out;
1716	while (len-- > 0) {
1717		unsigned c = *name++ - '0';
1718		if (c > 9)
1719			goto out;
1720		if (n >= (~0U-9)/10)
1721			goto out;
1722		n *= 10;
1723		n += c;
1724	}
1725	return n;
1726out:
1727	return ~0U;
1728}
1729
1730#define PROC_FDINFO_MAX 64
1731
1732static int proc_fd_info(struct inode *inode, struct path *path, char *info)
1733{
1734	struct task_struct *task = get_proc_task(inode);
1735	struct files_struct *files = NULL;
1736	struct file *file;
1737	int fd = proc_fd(inode);
1738
1739	if (task) {
1740		files = get_files_struct(task);
1741		put_task_struct(task);
1742	}
1743	if (files) {
1744		/*
1745		 * We are not taking a ref to the file structure, so we must
1746		 * hold ->file_lock.
1747		 */
1748		spin_lock(&files->file_lock);
1749		file = fcheck_files(files, fd);
1750		if (file) {
1751			unsigned int f_flags;
1752			struct fdtable *fdt;
1753
1754			fdt = files_fdtable(files);
1755			f_flags = file->f_flags & ~O_CLOEXEC;
1756			if (close_on_exec(fd, fdt))
1757				f_flags |= O_CLOEXEC;
1758
1759			if (path) {
1760				*path = file->f_path;
1761				path_get(&file->f_path);
1762			}
1763			if (info)
1764				snprintf(info, PROC_FDINFO_MAX,
1765					 "pos:\t%lli\n"
1766					 "flags:\t0%o\n",
1767					 (long long) file->f_pos,
1768					 f_flags);
1769			spin_unlock(&files->file_lock);
1770			put_files_struct(files);
1771			return 0;
1772		}
1773		spin_unlock(&files->file_lock);
1774		put_files_struct(files);
1775	}
1776	return -ENOENT;
1777}
1778
1779static int proc_fd_link(struct dentry *dentry, struct path *path)
1780{
1781	return proc_fd_info(dentry->d_inode, path, NULL);
1782}
1783
1784static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1785{
1786	struct inode *inode;
1787	struct task_struct *task;
1788	int fd;
1789	struct files_struct *files;
1790	const struct cred *cred;
1791
1792	if (nd && nd->flags & LOOKUP_RCU)
1793		return -ECHILD;
1794
1795	inode = dentry->d_inode;
1796	task = get_proc_task(inode);
1797	fd = proc_fd(inode);
1798
1799	if (task) {
1800		files = get_files_struct(task);
1801		if (files) {
1802			struct file *file;
1803			rcu_read_lock();
1804			file = fcheck_files(files, fd);
1805			if (file) {
1806				unsigned f_mode = file->f_mode;
1807
1808				rcu_read_unlock();
1809				put_files_struct(files);
1810
1811				if (task_dumpable(task)) {
1812					rcu_read_lock();
1813					cred = __task_cred(task);
1814					inode->i_uid = cred->euid;
1815					inode->i_gid = cred->egid;
1816					rcu_read_unlock();
1817				} else {
1818					inode->i_uid = GLOBAL_ROOT_UID;
1819					inode->i_gid = GLOBAL_ROOT_GID;
1820				}
1821
1822				if (S_ISLNK(inode->i_mode)) {
1823					unsigned i_mode = S_IFLNK;
1824					if (f_mode & FMODE_READ)
1825						i_mode |= S_IRUSR | S_IXUSR;
1826					if (f_mode & FMODE_WRITE)
1827						i_mode |= S_IWUSR | S_IXUSR;
1828					inode->i_mode = i_mode;
1829				}
1830
1831				security_task_to_inode(task, inode);
1832				put_task_struct(task);
1833				return 1;
1834			}
1835			rcu_read_unlock();
1836			put_files_struct(files);
1837		}
1838		put_task_struct(task);
1839	}
1840	d_drop(dentry);
1841	return 0;
1842}
1843
1844static const struct dentry_operations tid_fd_dentry_operations =
1845{
1846	.d_revalidate	= tid_fd_revalidate,
1847	.d_delete	= pid_delete_dentry,
1848};
1849
1850static struct dentry *proc_fd_instantiate(struct inode *dir,
1851	struct dentry *dentry, struct task_struct *task, const void *ptr)
1852{
1853	unsigned fd = (unsigned long)ptr;
1854 	struct inode *inode;
1855 	struct proc_inode *ei;
1856	struct dentry *error = ERR_PTR(-ENOENT);
1857
1858	inode = proc_pid_make_inode(dir->i_sb, task);
1859	if (!inode)
1860		goto out;
1861	ei = PROC_I(inode);
1862	ei->fd = fd;
1863
1864	inode->i_mode = S_IFLNK;
1865	inode->i_op = &proc_pid_link_inode_operations;
1866	inode->i_size = 64;
1867	ei->op.proc_get_link = proc_fd_link;
1868	d_set_d_op(dentry, &tid_fd_dentry_operations);
1869	d_add(dentry, inode);
1870	/* Close the race of the process dying before we return the dentry */
1871	if (tid_fd_revalidate(dentry, NULL))
1872		error = NULL;
1873
1874 out:
1875	return error;
1876}
1877
1878static struct dentry *proc_lookupfd_common(struct inode *dir,
1879					   struct dentry *dentry,
1880					   instantiate_t instantiate)
1881{
1882	struct task_struct *task = get_proc_task(dir);
1883	unsigned fd = name_to_int(dentry);
1884	struct dentry *result = ERR_PTR(-ENOENT);
1885
1886	if (!task)
1887		goto out_no_task;
1888	if (fd == ~0U)
1889		goto out;
1890
1891	result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
1892out:
1893	put_task_struct(task);
1894out_no_task:
1895	return result;
1896}
1897
1898static int proc_readfd_common(struct file * filp, void * dirent,
1899			      filldir_t filldir, instantiate_t instantiate)
1900{
1901	struct dentry *dentry = filp->f_path.dentry;
1902	struct inode *inode = dentry->d_inode;
1903	struct task_struct *p = get_proc_task(inode);
1904	unsigned int fd, ino;
1905	int retval;
1906	struct files_struct * files;
1907
1908	retval = -ENOENT;
1909	if (!p)
1910		goto out_no_task;
1911	retval = 0;
1912
1913	fd = filp->f_pos;
1914	switch (fd) {
1915		case 0:
1916			if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
1917				goto out;
1918			filp->f_pos++;
1919		case 1:
1920			ino = parent_ino(dentry);
1921			if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
1922				goto out;
1923			filp->f_pos++;
1924		default:
1925			files = get_files_struct(p);
1926			if (!files)
1927				goto out;
1928			rcu_read_lock();
1929			for (fd = filp->f_pos-2;
1930			     fd < files_fdtable(files)->max_fds;
1931			     fd++, filp->f_pos++) {
1932				char name[PROC_NUMBUF];
1933				int len;
1934				int rv;
1935
1936				if (!fcheck_files(files, fd))
1937					continue;
1938				rcu_read_unlock();
1939
1940				len = snprintf(name, sizeof(name), "%d", fd);
1941				rv = proc_fill_cache(filp, dirent, filldir,
1942						     name, len, instantiate, p,
1943						     (void *)(unsigned long)fd);
1944				if (rv < 0)
1945					goto out_fd_loop;
1946				rcu_read_lock();
1947			}
1948			rcu_read_unlock();
1949out_fd_loop:
1950			put_files_struct(files);
1951	}
1952out:
1953	put_task_struct(p);
1954out_no_task:
1955	return retval;
1956}
1957
1958static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
1959				    struct nameidata *nd)
1960{
1961	return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
1962}
1963
1964static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
1965{
1966	return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
1967}
1968
1969static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
1970				      size_t len, loff_t *ppos)
1971{
1972	char tmp[PROC_FDINFO_MAX];
1973	int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp);
1974	if (!err)
1975		err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp));
1976	return err;
1977}
1978
1979static const struct file_operations proc_fdinfo_file_operations = {
1980	.open           = nonseekable_open,
1981	.read		= proc_fdinfo_read,
1982	.llseek		= no_llseek,
1983};
1984
1985static const struct file_operations proc_fd_operations = {
1986	.read		= generic_read_dir,
1987	.readdir	= proc_readfd,
1988	.llseek		= default_llseek,
1989};
1990
1991#ifdef CONFIG_CHECKPOINT_RESTORE
1992
1993/*
1994 * dname_to_vma_addr - maps a dentry name into two unsigned longs
1995 * which represent vma start and end addresses.
1996 */
1997static int dname_to_vma_addr(struct dentry *dentry,
1998			     unsigned long *start, unsigned long *end)
1999{
2000	if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
2001		return -EINVAL;
2002
2003	return 0;
2004}
2005
2006static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
2007{
2008	unsigned long vm_start, vm_end;
2009	bool exact_vma_exists = false;
2010	struct mm_struct *mm = NULL;
2011	struct task_struct *task;
2012	const struct cred *cred;
2013	struct inode *inode;
2014	int status = 0;
2015
2016	if (nd && nd->flags & LOOKUP_RCU)
2017		return -ECHILD;
2018
2019	if (!capable(CAP_SYS_ADMIN)) {
2020		status = -EACCES;
2021		goto out_notask;
2022	}
2023
2024	inode = dentry->d_inode;
2025	task = get_proc_task(inode);
2026	if (!task)
2027		goto out_notask;
2028
2029	mm = mm_access(task, PTRACE_MODE_READ);
2030	if (IS_ERR_OR_NULL(mm))
2031		goto out;
2032
2033	if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
2034		down_read(&mm->mmap_sem);
2035		exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
2036		up_read(&mm->mmap_sem);
2037	}
2038
2039	mmput(mm);
2040
2041	if (exact_vma_exists) {
2042		if (task_dumpable(task)) {
2043			rcu_read_lock();
2044			cred = __task_cred(task);
2045			inode->i_uid = cred->euid;
2046			inode->i_gid = cred->egid;
2047			rcu_read_unlock();
2048		} else {
2049			inode->i_uid = GLOBAL_ROOT_UID;
2050			inode->i_gid = GLOBAL_ROOT_GID;
2051		}
2052		security_task_to_inode(task, inode);
2053		status = 1;
2054	}
2055
2056out:
2057	put_task_struct(task);
2058
2059out_notask:
2060	if (status <= 0)
2061		d_drop(dentry);
2062
2063	return status;
2064}
2065
2066static const struct dentry_operations tid_map_files_dentry_operations = {
2067	.d_revalidate	= map_files_d_revalidate,
2068	.d_delete	= pid_delete_dentry,
2069};
2070
2071static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
2072{
2073	unsigned long vm_start, vm_end;
2074	struct vm_area_struct *vma;
2075	struct task_struct *task;
2076	struct mm_struct *mm;
2077	int rc;
2078
2079	rc = -ENOENT;
2080	task = get_proc_task(dentry->d_inode);
2081	if (!task)
2082		goto out;
2083
2084	mm = get_task_mm(task);
2085	put_task_struct(task);
2086	if (!mm)
2087		goto out;
2088
2089	rc = dname_to_vma_addr(dentry, &vm_start, &vm_end);
2090	if (rc)
2091		goto out_mmput;
2092
 
2093	down_read(&mm->mmap_sem);
2094	vma = find_exact_vma(mm, vm_start, vm_end);
2095	if (vma && vma->vm_file) {
2096		*path = vma->vm_file->f_path;
2097		path_get(path);
2098		rc = 0;
2099	}
2100	up_read(&mm->mmap_sem);
2101
2102out_mmput:
2103	mmput(mm);
2104out:
2105	return rc;
2106}
2107
2108struct map_files_info {
2109	struct file	*file;
2110	unsigned long	len;
2111	unsigned char	name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
2112};
2113
2114static struct dentry *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2115proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
2116			   struct task_struct *task, const void *ptr)
2117{
2118	const struct file *file = ptr;
2119	struct proc_inode *ei;
2120	struct inode *inode;
2121
2122	if (!file)
2123		return ERR_PTR(-ENOENT);
2124
2125	inode = proc_pid_make_inode(dir->i_sb, task);
2126	if (!inode)
2127		return ERR_PTR(-ENOENT);
2128
2129	ei = PROC_I(inode);
2130	ei->op.proc_get_link = proc_map_files_get_link;
2131
2132	inode->i_op = &proc_pid_link_inode_operations;
2133	inode->i_size = 64;
2134	inode->i_mode = S_IFLNK;
2135
2136	if (file->f_mode & FMODE_READ)
2137		inode->i_mode |= S_IRUSR;
2138	if (file->f_mode & FMODE_WRITE)
2139		inode->i_mode |= S_IWUSR;
2140
2141	d_set_d_op(dentry, &tid_map_files_dentry_operations);
2142	d_add(dentry, inode);
2143
2144	return NULL;
2145}
2146
2147static struct dentry *proc_map_files_lookup(struct inode *dir,
2148		struct dentry *dentry, struct nameidata *nd)
2149{
2150	unsigned long vm_start, vm_end;
2151	struct vm_area_struct *vma;
2152	struct task_struct *task;
2153	struct dentry *result;
2154	struct mm_struct *mm;
2155
2156	result = ERR_PTR(-EACCES);
2157	if (!capable(CAP_SYS_ADMIN))
2158		goto out;
2159
2160	result = ERR_PTR(-ENOENT);
2161	task = get_proc_task(dir);
2162	if (!task)
2163		goto out;
2164
2165	result = ERR_PTR(-EACCES);
2166	if (!ptrace_may_access(task, PTRACE_MODE_READ))
2167		goto out_put_task;
2168
2169	result = ERR_PTR(-ENOENT);
2170	if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
2171		goto out_put_task;
2172
2173	mm = get_task_mm(task);
2174	if (!mm)
2175		goto out_put_task;
2176
2177	down_read(&mm->mmap_sem);
2178	vma = find_exact_vma(mm, vm_start, vm_end);
2179	if (!vma)
2180		goto out_no_vma;
2181
2182	result = proc_map_files_instantiate(dir, dentry, task, vma->vm_file);
 
 
2183
2184out_no_vma:
2185	up_read(&mm->mmap_sem);
2186	mmput(mm);
2187out_put_task:
2188	put_task_struct(task);
2189out:
2190	return result;
2191}
2192
2193static const struct inode_operations proc_map_files_inode_operations = {
2194	.lookup		= proc_map_files_lookup,
2195	.permission	= proc_fd_permission,
2196	.setattr	= proc_setattr,
2197};
2198
2199static int
2200proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
2201{
2202	struct dentry *dentry = filp->f_path.dentry;
2203	struct inode *inode = dentry->d_inode;
2204	struct vm_area_struct *vma;
2205	struct task_struct *task;
2206	struct mm_struct *mm;
2207	ino_t ino;
 
 
 
2208	int ret;
2209
2210	ret = -EACCES;
2211	if (!capable(CAP_SYS_ADMIN))
2212		goto out;
2213
2214	ret = -ENOENT;
2215	task = get_proc_task(inode);
2216	if (!task)
2217		goto out;
2218
2219	ret = -EACCES;
2220	if (!ptrace_may_access(task, PTRACE_MODE_READ))
2221		goto out_put_task;
2222
2223	ret = 0;
2224	switch (filp->f_pos) {
2225	case 0:
2226		ino = inode->i_ino;
2227		if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
2228			goto out_put_task;
2229		filp->f_pos++;
2230	case 1:
2231		ino = parent_ino(dentry);
2232		if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
2233			goto out_put_task;
2234		filp->f_pos++;
2235	default:
2236	{
2237		unsigned long nr_files, pos, i;
2238		struct flex_array *fa = NULL;
2239		struct map_files_info info;
2240		struct map_files_info *p;
2241
2242		mm = get_task_mm(task);
2243		if (!mm)
2244			goto out_put_task;
2245		down_read(&mm->mmap_sem);
2246
2247		nr_files = 0;
2248
2249		/*
2250		 * We need two passes here:
2251		 *
2252		 *  1) Collect vmas of mapped files with mmap_sem taken
2253		 *  2) Release mmap_sem and instantiate entries
2254		 *
2255		 * otherwise we get lockdep complained, since filldir()
2256		 * routine might require mmap_sem taken in might_fault().
2257		 */
2258
2259		for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
2260			if (vma->vm_file && ++pos > filp->f_pos)
2261				nr_files++;
2262		}
2263
2264		if (nr_files) {
2265			fa = flex_array_alloc(sizeof(info), nr_files,
2266						GFP_KERNEL);
2267			if (!fa || flex_array_prealloc(fa, 0, nr_files,
2268							GFP_KERNEL)) {
2269				ret = -ENOMEM;
2270				if (fa)
2271					flex_array_free(fa);
2272				up_read(&mm->mmap_sem);
2273				mmput(mm);
2274				goto out_put_task;
2275			}
2276			for (i = 0, vma = mm->mmap, pos = 2; vma;
2277					vma = vma->vm_next) {
2278				if (!vma->vm_file)
2279					continue;
2280				if (++pos <= filp->f_pos)
2281					continue;
2282
2283				get_file(vma->vm_file);
2284				info.file = vma->vm_file;
2285				info.len = snprintf(info.name,
2286						sizeof(info.name), "%lx-%lx",
2287						vma->vm_start, vma->vm_end);
2288				if (flex_array_put(fa, i++, &info, GFP_KERNEL))
2289					BUG();
2290			}
2291		}
2292		up_read(&mm->mmap_sem);
2293
2294		for (i = 0; i < nr_files; i++) {
2295			p = flex_array_get(fa, i);
2296			ret = proc_fill_cache(filp, dirent, filldir,
2297					      p->name, p->len,
2298					      proc_map_files_instantiate,
2299					      task, p->file);
2300			if (ret)
2301				break;
2302			filp->f_pos++;
2303			fput(p->file);
 
2304		}
2305		for (; i < nr_files; i++) {
2306			/*
2307			 * In case of error don't forget
2308			 * to put rest of file refs.
2309			 */
2310			p = flex_array_get(fa, i);
2311			fput(p->file);
 
 
 
 
 
 
2312		}
2313		if (fa)
2314			flex_array_free(fa);
2315		mmput(mm);
2316	}
 
 
 
 
 
 
 
 
 
 
 
2317	}
 
 
 
2318
2319out_put_task:
2320	put_task_struct(task);
2321out:
2322	return ret;
2323}
2324
2325static const struct file_operations proc_map_files_operations = {
2326	.read		= generic_read_dir,
2327	.readdir	= proc_map_files_readdir,
2328	.llseek		= default_llseek,
2329};
2330
2331#endif /* CONFIG_CHECKPOINT_RESTORE */
 
 
 
 
 
 
 
2332
2333/*
2334 * /proc/pid/fd needs a special permission handler so that a process can still
2335 * access /proc/self/fd after it has executed a setuid().
2336 */
2337static int proc_fd_permission(struct inode *inode, int mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2338{
2339	int rv = generic_permission(inode, mask);
2340	if (rv == 0)
2341		return 0;
2342	if (task_pid(current) == proc_pid(inode))
2343		rv = 0;
2344	return rv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2345}
2346
2347/*
2348 * proc directories can do almost nothing..
2349 */
2350static const struct inode_operations proc_fd_inode_operations = {
2351	.lookup		= proc_lookupfd,
2352	.permission	= proc_fd_permission,
2353	.setattr	= proc_setattr,
2354};
2355
2356static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
2357	struct dentry *dentry, struct task_struct *task, const void *ptr)
2358{
2359	unsigned fd = (unsigned long)ptr;
2360 	struct inode *inode;
2361 	struct proc_inode *ei;
2362	struct dentry *error = ERR_PTR(-ENOENT);
2363
2364	inode = proc_pid_make_inode(dir->i_sb, task);
2365	if (!inode)
2366		goto out;
2367	ei = PROC_I(inode);
2368	ei->fd = fd;
2369	inode->i_mode = S_IFREG | S_IRUSR;
2370	inode->i_fop = &proc_fdinfo_file_operations;
2371	d_set_d_op(dentry, &tid_fd_dentry_operations);
2372	d_add(dentry, inode);
2373	/* Close the race of the process dying before we return the dentry */
2374	if (tid_fd_revalidate(dentry, NULL))
2375		error = NULL;
2376
2377 out:
2378	return error;
 
2379}
2380
2381static struct dentry *proc_lookupfdinfo(struct inode *dir,
2382					struct dentry *dentry,
2383					struct nameidata *nd)
 
 
 
 
 
 
 
2384{
2385	return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2386}
2387
2388static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
2389{
2390	return proc_readfd_common(filp, dirent, filldir,
2391				  proc_fdinfo_instantiate);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2392}
2393
2394static const struct file_operations proc_fdinfo_operations = {
2395	.read		= generic_read_dir,
2396	.readdir	= proc_readfdinfo,
2397	.llseek		= default_llseek,
2398};
2399
2400/*
2401 * proc directories can do almost nothing..
2402 */
2403static const struct inode_operations proc_fdinfo_inode_operations = {
2404	.lookup		= proc_lookupfdinfo,
2405	.setattr	= proc_setattr,
2406};
2407
2408
2409static struct dentry *proc_pident_instantiate(struct inode *dir,
2410	struct dentry *dentry, struct task_struct *task, const void *ptr)
2411{
2412	const struct pid_entry *p = ptr;
2413	struct inode *inode;
2414	struct proc_inode *ei;
2415	struct dentry *error = ERR_PTR(-ENOENT);
2416
2417	inode = proc_pid_make_inode(dir->i_sb, task);
2418	if (!inode)
2419		goto out;
2420
2421	ei = PROC_I(inode);
2422	inode->i_mode = p->mode;
2423	if (S_ISDIR(inode->i_mode))
2424		set_nlink(inode, 2);	/* Use getattr to fix if necessary */
2425	if (p->iop)
2426		inode->i_op = p->iop;
2427	if (p->fop)
2428		inode->i_fop = p->fop;
2429	ei->op = p->op;
2430	d_set_d_op(dentry, &pid_dentry_operations);
2431	d_add(dentry, inode);
2432	/* Close the race of the process dying before we return the dentry */
2433	if (pid_revalidate(dentry, NULL))
2434		error = NULL;
2435out:
2436	return error;
2437}
2438
2439static struct dentry *proc_pident_lookup(struct inode *dir, 
2440					 struct dentry *dentry,
2441					 const struct pid_entry *ents,
2442					 unsigned int nents)
2443{
2444	struct dentry *error;
2445	struct task_struct *task = get_proc_task(dir);
2446	const struct pid_entry *p, *last;
2447
2448	error = ERR_PTR(-ENOENT);
2449
2450	if (!task)
2451		goto out_no_task;
2452
2453	/*
2454	 * Yes, it does not scale. And it should not. Don't add
2455	 * new entries into /proc/<tgid>/ without very good reasons.
2456	 */
2457	last = &ents[nents - 1];
2458	for (p = ents; p <= last; p++) {
2459		if (p->len != dentry->d_name.len)
2460			continue;
2461		if (!memcmp(dentry->d_name.name, p->name, p->len))
2462			break;
2463	}
2464	if (p > last)
2465		goto out;
2466
2467	error = proc_pident_instantiate(dir, dentry, task, p);
2468out:
2469	put_task_struct(task);
2470out_no_task:
2471	return error;
2472}
2473
2474static int proc_pident_fill_cache(struct file *filp, void *dirent,
2475	filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
2476{
2477	return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
2478				proc_pident_instantiate, task, p);
2479}
2480
2481static int proc_pident_readdir(struct file *filp,
2482		void *dirent, filldir_t filldir,
2483		const struct pid_entry *ents, unsigned int nents)
2484{
2485	int i;
2486	struct dentry *dentry = filp->f_path.dentry;
2487	struct inode *inode = dentry->d_inode;
2488	struct task_struct *task = get_proc_task(inode);
2489	const struct pid_entry *p, *last;
2490	ino_t ino;
2491	int ret;
2492
2493	ret = -ENOENT;
2494	if (!task)
2495		goto out_no_task;
 
 
 
 
 
 
2496
2497	ret = 0;
2498	i = filp->f_pos;
2499	switch (i) {
2500	case 0:
2501		ino = inode->i_ino;
2502		if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
2503			goto out;
2504		i++;
2505		filp->f_pos++;
2506		/* fall through */
2507	case 1:
2508		ino = parent_ino(dentry);
2509		if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
2510			goto out;
2511		i++;
2512		filp->f_pos++;
2513		/* fall through */
2514	default:
2515		i -= 2;
2516		if (i >= nents) {
2517			ret = 1;
2518			goto out;
2519		}
2520		p = ents + i;
2521		last = &ents[nents - 1];
2522		while (p <= last) {
2523			if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
2524				goto out;
2525			filp->f_pos++;
2526			p++;
2527		}
2528	}
2529
2530	ret = 1;
2531out:
2532	put_task_struct(task);
2533out_no_task:
2534	return ret;
2535}
2536
2537#ifdef CONFIG_SECURITY
2538static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
2539				  size_t count, loff_t *ppos)
2540{
2541	struct inode * inode = file->f_path.dentry->d_inode;
2542	char *p = NULL;
2543	ssize_t length;
2544	struct task_struct *task = get_proc_task(inode);
2545
2546	if (!task)
2547		return -ESRCH;
2548
2549	length = security_getprocattr(task,
2550				      (char*)file->f_path.dentry->d_name.name,
2551				      &p);
2552	put_task_struct(task);
2553	if (length > 0)
2554		length = simple_read_from_buffer(buf, count, ppos, p, length);
2555	kfree(p);
2556	return length;
2557}
2558
2559static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
2560				   size_t count, loff_t *ppos)
2561{
2562	struct inode * inode = file->f_path.dentry->d_inode;
2563	char *page;
2564	ssize_t length;
2565	struct task_struct *task = get_proc_task(inode);
2566
2567	length = -ESRCH;
2568	if (!task)
2569		goto out_no_task;
2570	if (count > PAGE_SIZE)
2571		count = PAGE_SIZE;
2572
2573	/* No partial writes. */
2574	length = -EINVAL;
2575	if (*ppos != 0)
2576		goto out;
2577
2578	length = -ENOMEM;
2579	page = (char*)__get_free_page(GFP_TEMPORARY);
2580	if (!page)
2581		goto out;
2582
2583	length = -EFAULT;
2584	if (copy_from_user(page, buf, count))
2585		goto out_free;
2586
2587	/* Guard against adverse ptrace interaction */
2588	length = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
2589	if (length < 0)
2590		goto out_free;
2591
2592	length = security_setprocattr(task,
2593				      (char*)file->f_path.dentry->d_name.name,
2594				      (void*)page, count);
2595	mutex_unlock(&task->signal->cred_guard_mutex);
2596out_free:
2597	free_page((unsigned long) page);
2598out:
2599	put_task_struct(task);
2600out_no_task:
2601	return length;
2602}
2603
2604static const struct file_operations proc_pid_attr_operations = {
2605	.read		= proc_pid_attr_read,
2606	.write		= proc_pid_attr_write,
2607	.llseek		= generic_file_llseek,
2608};
2609
2610static const struct pid_entry attr_dir_stuff[] = {
2611	REG("current",    S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2612	REG("prev",       S_IRUGO,	   proc_pid_attr_operations),
2613	REG("exec",       S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2614	REG("fscreate",   S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2615	REG("keycreate",  S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2616	REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2617};
2618
2619static int proc_attr_dir_readdir(struct file * filp,
2620			     void * dirent, filldir_t filldir)
2621{
2622	return proc_pident_readdir(filp,dirent,filldir,
2623				   attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
2624}
2625
2626static const struct file_operations proc_attr_dir_operations = {
2627	.read		= generic_read_dir,
2628	.readdir	= proc_attr_dir_readdir,
2629	.llseek		= default_llseek,
2630};
2631
2632static struct dentry *proc_attr_dir_lookup(struct inode *dir,
2633				struct dentry *dentry, struct nameidata *nd)
2634{
2635	return proc_pident_lookup(dir, dentry,
2636				  attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
2637}
2638
2639static const struct inode_operations proc_attr_dir_inode_operations = {
2640	.lookup		= proc_attr_dir_lookup,
2641	.getattr	= pid_getattr,
2642	.setattr	= proc_setattr,
2643};
2644
2645#endif
2646
2647#ifdef CONFIG_ELF_CORE
2648static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
2649					 size_t count, loff_t *ppos)
2650{
2651	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
2652	struct mm_struct *mm;
2653	char buffer[PROC_NUMBUF];
2654	size_t len;
2655	int ret;
2656
2657	if (!task)
2658		return -ESRCH;
2659
2660	ret = 0;
2661	mm = get_task_mm(task);
2662	if (mm) {
2663		len = snprintf(buffer, sizeof(buffer), "%08lx\n",
2664			       ((mm->flags & MMF_DUMP_FILTER_MASK) >>
2665				MMF_DUMP_FILTER_SHIFT));
2666		mmput(mm);
2667		ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
2668	}
2669
2670	put_task_struct(task);
2671
2672	return ret;
2673}
2674
2675static ssize_t proc_coredump_filter_write(struct file *file,
2676					  const char __user *buf,
2677					  size_t count,
2678					  loff_t *ppos)
2679{
2680	struct task_struct *task;
2681	struct mm_struct *mm;
2682	char buffer[PROC_NUMBUF], *end;
2683	unsigned int val;
2684	int ret;
2685	int i;
2686	unsigned long mask;
2687
2688	ret = -EFAULT;
2689	memset(buffer, 0, sizeof(buffer));
2690	if (count > sizeof(buffer) - 1)
2691		count = sizeof(buffer) - 1;
2692	if (copy_from_user(buffer, buf, count))
2693		goto out_no_task;
2694
2695	ret = -EINVAL;
2696	val = (unsigned int)simple_strtoul(buffer, &end, 0);
2697	if (*end == '\n')
2698		end++;
2699	if (end - buffer == 0)
2700		goto out_no_task;
2701
2702	ret = -ESRCH;
2703	task = get_proc_task(file->f_dentry->d_inode);
2704	if (!task)
2705		goto out_no_task;
2706
2707	ret = end - buffer;
2708	mm = get_task_mm(task);
2709	if (!mm)
2710		goto out_no_mm;
 
2711
2712	for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
2713		if (val & mask)
2714			set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2715		else
2716			clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2717	}
2718
2719	mmput(mm);
2720 out_no_mm:
2721	put_task_struct(task);
2722 out_no_task:
2723	return ret;
 
 
2724}
2725
2726static const struct file_operations proc_coredump_filter_operations = {
2727	.read		= proc_coredump_filter_read,
2728	.write		= proc_coredump_filter_write,
2729	.llseek		= generic_file_llseek,
2730};
2731#endif
2732
2733/*
2734 * /proc/self:
2735 */
2736static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
2737			      int buflen)
2738{
2739	struct pid_namespace *ns = dentry->d_sb->s_fs_info;
2740	pid_t tgid = task_tgid_nr_ns(current, ns);
2741	char tmp[PROC_NUMBUF];
2742	if (!tgid)
2743		return -ENOENT;
2744	sprintf(tmp, "%d", tgid);
2745	return vfs_readlink(dentry,buffer,buflen,tmp);
2746}
2747
2748static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
2749{
2750	struct pid_namespace *ns = dentry->d_sb->s_fs_info;
2751	pid_t tgid = task_tgid_nr_ns(current, ns);
2752	char *name = ERR_PTR(-ENOENT);
2753	if (tgid) {
2754		name = __getname();
2755		if (!name)
2756			name = ERR_PTR(-ENOMEM);
2757		else
2758			sprintf(name, "%d", tgid);
2759	}
2760	nd_set_link(nd, name);
2761	return NULL;
2762}
2763
2764static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
2765				void *cookie)
2766{
2767	char *s = nd_get_link(nd);
2768	if (!IS_ERR(s))
2769		__putname(s);
2770}
2771
2772static const struct inode_operations proc_self_inode_operations = {
2773	.readlink	= proc_self_readlink,
2774	.follow_link	= proc_self_follow_link,
2775	.put_link	= proc_self_put_link,
2776};
2777
2778/*
2779 * proc base
2780 *
2781 * These are the directory entries in the root directory of /proc
2782 * that properly belong to the /proc filesystem, as they describe
2783 * describe something that is process related.
2784 */
2785static const struct pid_entry proc_base_stuff[] = {
2786	NOD("self", S_IFLNK|S_IRWXUGO,
2787		&proc_self_inode_operations, NULL, {}),
2788};
2789
2790static struct dentry *proc_base_instantiate(struct inode *dir,
2791	struct dentry *dentry, struct task_struct *task, const void *ptr)
2792{
2793	const struct pid_entry *p = ptr;
2794	struct inode *inode;
2795	struct proc_inode *ei;
2796	struct dentry *error;
2797
2798	/* Allocate the inode */
2799	error = ERR_PTR(-ENOMEM);
2800	inode = new_inode(dir->i_sb);
2801	if (!inode)
2802		goto out;
2803
2804	/* Initialize the inode */
2805	ei = PROC_I(inode);
2806	inode->i_ino = get_next_ino();
2807	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
2808
2809	/*
2810	 * grab the reference to the task.
2811	 */
2812	ei->pid = get_task_pid(task, PIDTYPE_PID);
2813	if (!ei->pid)
2814		goto out_iput;
2815
2816	inode->i_mode = p->mode;
2817	if (S_ISDIR(inode->i_mode))
2818		set_nlink(inode, 2);
2819	if (S_ISLNK(inode->i_mode))
2820		inode->i_size = 64;
2821	if (p->iop)
2822		inode->i_op = p->iop;
2823	if (p->fop)
2824		inode->i_fop = p->fop;
2825	ei->op = p->op;
2826	d_add(dentry, inode);
2827	error = NULL;
2828out:
2829	return error;
2830out_iput:
2831	iput(inode);
2832	goto out;
2833}
2834
2835static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
2836{
2837	struct dentry *error;
2838	struct task_struct *task = get_proc_task(dir);
2839	const struct pid_entry *p, *last;
2840
2841	error = ERR_PTR(-ENOENT);
2842
2843	if (!task)
2844		goto out_no_task;
2845
2846	/* Lookup the directory entry */
2847	last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1];
2848	for (p = proc_base_stuff; p <= last; p++) {
2849		if (p->len != dentry->d_name.len)
2850			continue;
2851		if (!memcmp(dentry->d_name.name, p->name, p->len))
2852			break;
2853	}
2854	if (p > last)
2855		goto out;
2856
2857	error = proc_base_instantiate(dir, dentry, task, p);
2858
2859out:
2860	put_task_struct(task);
2861out_no_task:
2862	return error;
2863}
2864
2865static int proc_base_fill_cache(struct file *filp, void *dirent,
2866	filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
2867{
2868	return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
2869				proc_base_instantiate, task, p);
2870}
2871
2872#ifdef CONFIG_TASK_IO_ACCOUNTING
2873static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
2874{
2875	struct task_io_accounting acct = task->ioac;
2876	unsigned long flags;
2877	int result;
2878
2879	result = mutex_lock_killable(&task->signal->cred_guard_mutex);
2880	if (result)
2881		return result;
2882
2883	if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
2884		result = -EACCES;
2885		goto out_unlock;
2886	}
2887
2888	if (whole && lock_task_sighand(task, &flags)) {
2889		struct task_struct *t = task;
2890
2891		task_io_accounting_add(&acct, &task->signal->ioac);
2892		while_each_thread(task, t)
2893			task_io_accounting_add(&acct, &t->ioac);
2894
2895		unlock_task_sighand(task, &flags);
2896	}
2897	result = sprintf(buffer,
2898			"rchar: %llu\n"
2899			"wchar: %llu\n"
2900			"syscr: %llu\n"
2901			"syscw: %llu\n"
2902			"read_bytes: %llu\n"
2903			"write_bytes: %llu\n"
2904			"cancelled_write_bytes: %llu\n",
2905			(unsigned long long)acct.rchar,
2906			(unsigned long long)acct.wchar,
2907			(unsigned long long)acct.syscr,
2908			(unsigned long long)acct.syscw,
2909			(unsigned long long)acct.read_bytes,
2910			(unsigned long long)acct.write_bytes,
2911			(unsigned long long)acct.cancelled_write_bytes);
 
 
2912out_unlock:
2913	mutex_unlock(&task->signal->cred_guard_mutex);
2914	return result;
2915}
2916
2917static int proc_tid_io_accounting(struct task_struct *task, char *buffer)
 
2918{
2919	return do_io_accounting(task, buffer, 0);
2920}
2921
2922static int proc_tgid_io_accounting(struct task_struct *task, char *buffer)
 
2923{
2924	return do_io_accounting(task, buffer, 1);
2925}
2926#endif /* CONFIG_TASK_IO_ACCOUNTING */
2927
2928#ifdef CONFIG_USER_NS
2929static int proc_id_map_open(struct inode *inode, struct file *file,
2930	struct seq_operations *seq_ops)
2931{
2932	struct user_namespace *ns = NULL;
2933	struct task_struct *task;
2934	struct seq_file *seq;
2935	int ret = -EINVAL;
2936
2937	task = get_proc_task(inode);
2938	if (task) {
2939		rcu_read_lock();
2940		ns = get_user_ns(task_cred_xxx(task, user_ns));
2941		rcu_read_unlock();
2942		put_task_struct(task);
2943	}
2944	if (!ns)
2945		goto err;
2946
2947	ret = seq_open(file, seq_ops);
2948	if (ret)
2949		goto err_put_ns;
2950
2951	seq = file->private_data;
2952	seq->private = ns;
2953
2954	return 0;
2955err_put_ns:
2956	put_user_ns(ns);
2957err:
2958	return ret;
2959}
2960
2961static int proc_id_map_release(struct inode *inode, struct file *file)
2962{
2963	struct seq_file *seq = file->private_data;
2964	struct user_namespace *ns = seq->private;
2965	put_user_ns(ns);
2966	return seq_release(inode, file);
2967}
2968
2969static int proc_uid_map_open(struct inode *inode, struct file *file)
2970{
2971	return proc_id_map_open(inode, file, &proc_uid_seq_operations);
2972}
2973
2974static int proc_gid_map_open(struct inode *inode, struct file *file)
2975{
2976	return proc_id_map_open(inode, file, &proc_gid_seq_operations);
2977}
2978
 
 
 
 
 
2979static const struct file_operations proc_uid_map_operations = {
2980	.open		= proc_uid_map_open,
2981	.write		= proc_uid_map_write,
2982	.read		= seq_read,
2983	.llseek		= seq_lseek,
2984	.release	= proc_id_map_release,
2985};
2986
2987static const struct file_operations proc_gid_map_operations = {
2988	.open		= proc_gid_map_open,
2989	.write		= proc_gid_map_write,
2990	.read		= seq_read,
2991	.llseek		= seq_lseek,
2992	.release	= proc_id_map_release,
2993};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2994#endif /* CONFIG_USER_NS */
2995
2996static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
2997				struct pid *pid, struct task_struct *task)
2998{
2999	int err = lock_trace(task);
3000	if (!err) {
3001		seq_printf(m, "%08x\n", task->personality);
3002		unlock_trace(task);
3003	}
3004	return err;
3005}
3006
3007/*
3008 * Thread groups
3009 */
3010static const struct file_operations proc_task_operations;
3011static const struct inode_operations proc_task_inode_operations;
3012
3013static const struct pid_entry tgid_base_stuff[] = {
3014	DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
3015	DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
3016#ifdef CONFIG_CHECKPOINT_RESTORE
3017	DIR("map_files",  S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
3018#endif
3019	DIR("fdinfo",     S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
3020	DIR("ns",	  S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
3021#ifdef CONFIG_NET
3022	DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
3023#endif
3024	REG("environ",    S_IRUSR, proc_environ_operations),
3025	INF("auxv",       S_IRUSR, proc_pid_auxv),
3026	ONE("status",     S_IRUGO, proc_pid_status),
3027	ONE("personality", S_IRUGO, proc_pid_personality),
3028	INF("limits",	  S_IRUGO, proc_pid_limits),
3029#ifdef CONFIG_SCHED_DEBUG
3030	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3031#endif
3032#ifdef CONFIG_SCHED_AUTOGROUP
3033	REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
3034#endif
3035	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
3036#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3037	INF("syscall",    S_IRUGO, proc_pid_syscall),
3038#endif
3039	INF("cmdline",    S_IRUGO, proc_pid_cmdline),
3040	ONE("stat",       S_IRUGO, proc_tgid_stat),
3041	ONE("statm",      S_IRUGO, proc_pid_statm),
3042	REG("maps",       S_IRUGO, proc_pid_maps_operations),
3043#ifdef CONFIG_NUMA
3044	REG("numa_maps",  S_IRUGO, proc_pid_numa_maps_operations),
3045#endif
3046	REG("mem",        S_IRUSR|S_IWUSR, proc_mem_operations),
3047	LNK("cwd",        proc_cwd_link),
3048	LNK("root",       proc_root_link),
3049	LNK("exe",        proc_exe_link),
3050	REG("mounts",     S_IRUGO, proc_mounts_operations),
3051	REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
3052	REG("mountstats", S_IRUSR, proc_mountstats_operations),
3053#ifdef CONFIG_PROC_PAGE_MONITOR
3054	REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
3055	REG("smaps",      S_IRUGO, proc_pid_smaps_operations),
3056	REG("pagemap",    S_IRUGO, proc_pagemap_operations),
3057#endif
3058#ifdef CONFIG_SECURITY
3059	DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
3060#endif
3061#ifdef CONFIG_KALLSYMS
3062	INF("wchan",      S_IRUGO, proc_pid_wchan),
3063#endif
3064#ifdef CONFIG_STACKTRACE
3065	ONE("stack",      S_IRUGO, proc_pid_stack),
3066#endif
3067#ifdef CONFIG_SCHEDSTATS
3068	INF("schedstat",  S_IRUGO, proc_pid_schedstat),
3069#endif
3070#ifdef CONFIG_LATENCYTOP
3071	REG("latency",  S_IRUGO, proc_lstats_operations),
3072#endif
3073#ifdef CONFIG_PROC_PID_CPUSET
3074	REG("cpuset",     S_IRUGO, proc_cpuset_operations),
3075#endif
3076#ifdef CONFIG_CGROUPS
3077	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
3078#endif
3079	INF("oom_score",  S_IRUGO, proc_oom_score),
3080	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
3081	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
3082#ifdef CONFIG_AUDITSYSCALL
3083	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
3084	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
3085#endif
3086#ifdef CONFIG_FAULT_INJECTION
3087	REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
3088#endif
3089#ifdef CONFIG_ELF_CORE
3090	REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
3091#endif
3092#ifdef CONFIG_TASK_IO_ACCOUNTING
3093	INF("io",	S_IRUSR, proc_tgid_io_accounting),
3094#endif
3095#ifdef CONFIG_HARDWALL
3096	INF("hardwall",   S_IRUGO, proc_pid_hardwall),
3097#endif
3098#ifdef CONFIG_USER_NS
3099	REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
3100	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
 
 
 
 
 
3101#endif
 
3102};
3103
3104static int proc_tgid_base_readdir(struct file * filp,
3105			     void * dirent, filldir_t filldir)
3106{
3107	return proc_pident_readdir(filp,dirent,filldir,
3108				   tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
3109}
3110
3111static const struct file_operations proc_tgid_base_operations = {
3112	.read		= generic_read_dir,
3113	.readdir	= proc_tgid_base_readdir,
3114	.llseek		= default_llseek,
3115};
3116
3117static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
 
3118	return proc_pident_lookup(dir, dentry,
3119				  tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
3120}
3121
3122static const struct inode_operations proc_tgid_base_inode_operations = {
3123	.lookup		= proc_tgid_base_lookup,
3124	.getattr	= pid_getattr,
3125	.setattr	= proc_setattr,
3126	.permission	= proc_pid_permission,
3127};
3128
3129static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
3130{
3131	struct dentry *dentry, *leader, *dir;
3132	char buf[PROC_NUMBUF];
3133	struct qstr name;
3134
3135	name.name = buf;
3136	name.len = snprintf(buf, sizeof(buf), "%d", pid);
 
3137	dentry = d_hash_and_lookup(mnt->mnt_root, &name);
3138	if (dentry) {
3139		shrink_dcache_parent(dentry);
3140		d_drop(dentry);
3141		dput(dentry);
3142	}
3143
 
 
 
3144	name.name = buf;
3145	name.len = snprintf(buf, sizeof(buf), "%d", tgid);
3146	leader = d_hash_and_lookup(mnt->mnt_root, &name);
3147	if (!leader)
3148		goto out;
3149
3150	name.name = "task";
3151	name.len = strlen(name.name);
3152	dir = d_hash_and_lookup(leader, &name);
3153	if (!dir)
3154		goto out_put_leader;
3155
3156	name.name = buf;
3157	name.len = snprintf(buf, sizeof(buf), "%d", pid);
3158	dentry = d_hash_and_lookup(dir, &name);
3159	if (dentry) {
3160		shrink_dcache_parent(dentry);
3161		d_drop(dentry);
3162		dput(dentry);
3163	}
3164
3165	dput(dir);
3166out_put_leader:
3167	dput(leader);
3168out:
3169	return;
3170}
3171
3172/**
3173 * proc_flush_task -  Remove dcache entries for @task from the /proc dcache.
3174 * @task: task that should be flushed.
3175 *
3176 * When flushing dentries from proc, one needs to flush them from global
3177 * proc (proc_mnt) and from all the namespaces' procs this task was seen
3178 * in. This call is supposed to do all of this job.
3179 *
3180 * Looks in the dcache for
3181 * /proc/@pid
3182 * /proc/@tgid/task/@pid
3183 * if either directory is present flushes it and all of it'ts children
3184 * from the dcache.
3185 *
3186 * It is safe and reasonable to cache /proc entries for a task until
3187 * that task exits.  After that they just clog up the dcache with
3188 * useless entries, possibly causing useful dcache entries to be
3189 * flushed instead.  This routine is proved to flush those useless
3190 * dcache entries at process exit time.
3191 *
3192 * NOTE: This routine is just an optimization so it does not guarantee
3193 *       that no dcache entries will exist at process exit time it
3194 *       just makes it very unlikely that any will persist.
3195 */
3196
3197void proc_flush_task(struct task_struct *task)
3198{
3199	int i;
3200	struct pid *pid, *tgid;
3201	struct upid *upid;
3202
3203	pid = task_pid(task);
3204	tgid = task_tgid(task);
3205
3206	for (i = 0; i <= pid->level; i++) {
3207		upid = &pid->numbers[i];
3208		proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
3209					tgid->numbers[i].nr);
3210	}
3211
3212	upid = &pid->numbers[pid->level];
3213	if (upid->nr == 1)
3214		pid_ns_release_proc(upid->ns);
3215}
3216
3217static struct dentry *proc_pid_instantiate(struct inode *dir,
3218					   struct dentry * dentry,
3219					   struct task_struct *task, const void *ptr)
3220{
3221	struct dentry *error = ERR_PTR(-ENOENT);
3222	struct inode *inode;
3223
3224	inode = proc_pid_make_inode(dir->i_sb, task);
3225	if (!inode)
3226		goto out;
3227
3228	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
3229	inode->i_op = &proc_tgid_base_inode_operations;
3230	inode->i_fop = &proc_tgid_base_operations;
3231	inode->i_flags|=S_IMMUTABLE;
3232
3233	set_nlink(inode, 2 + pid_entry_count_dirs(tgid_base_stuff,
3234						  ARRAY_SIZE(tgid_base_stuff)));
3235
3236	d_set_d_op(dentry, &pid_dentry_operations);
3237
3238	d_add(dentry, inode);
3239	/* Close the race of the process dying before we return the dentry */
3240	if (pid_revalidate(dentry, NULL))
3241		error = NULL;
3242out:
3243	return error;
3244}
3245
3246struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
3247{
3248	struct dentry *result;
3249	struct task_struct *task;
3250	unsigned tgid;
3251	struct pid_namespace *ns;
3252
3253	result = proc_base_lookup(dir, dentry);
3254	if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
3255		goto out;
3256
3257	tgid = name_to_int(dentry);
3258	if (tgid == ~0U)
3259		goto out;
3260
3261	ns = dentry->d_sb->s_fs_info;
3262	rcu_read_lock();
3263	task = find_task_by_pid_ns(tgid, ns);
3264	if (task)
3265		get_task_struct(task);
3266	rcu_read_unlock();
3267	if (!task)
3268		goto out;
3269
3270	result = proc_pid_instantiate(dir, dentry, task, NULL);
3271	put_task_struct(task);
3272out:
3273	return result;
3274}
3275
3276/*
3277 * Find the first task with tgid >= tgid
3278 *
3279 */
3280struct tgid_iter {
3281	unsigned int tgid;
3282	struct task_struct *task;
3283};
3284static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
3285{
3286	struct pid *pid;
3287
3288	if (iter.task)
3289		put_task_struct(iter.task);
3290	rcu_read_lock();
3291retry:
3292	iter.task = NULL;
3293	pid = find_ge_pid(iter.tgid, ns);
3294	if (pid) {
3295		iter.tgid = pid_nr_ns(pid, ns);
3296		iter.task = pid_task(pid, PIDTYPE_PID);
3297		/* What we to know is if the pid we have find is the
3298		 * pid of a thread_group_leader.  Testing for task
3299		 * being a thread_group_leader is the obvious thing
3300		 * todo but there is a window when it fails, due to
3301		 * the pid transfer logic in de_thread.
3302		 *
3303		 * So we perform the straight forward test of seeing
3304		 * if the pid we have found is the pid of a thread
3305		 * group leader, and don't worry if the task we have
3306		 * found doesn't happen to be a thread group leader.
3307		 * As we don't care in the case of readdir.
3308		 */
3309		if (!iter.task || !has_group_leader_pid(iter.task)) {
3310			iter.tgid += 1;
3311			goto retry;
3312		}
3313		get_task_struct(iter.task);
3314	}
3315	rcu_read_unlock();
3316	return iter;
3317}
3318
3319#define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
3320
3321static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
3322	struct tgid_iter iter)
3323{
3324	char name[PROC_NUMBUF];
3325	int len = snprintf(name, sizeof(name), "%d", iter.tgid);
3326	return proc_fill_cache(filp, dirent, filldir, name, len,
3327				proc_pid_instantiate, iter.task, NULL);
3328}
3329
3330static int fake_filldir(void *buf, const char *name, int namelen,
3331			loff_t offset, u64 ino, unsigned d_type)
3332{
3333	return 0;
3334}
3335
3336/* for the /proc/ directory itself, after non-process stuff has been done */
3337int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
3338{
3339	unsigned int nr;
3340	struct task_struct *reaper;
3341	struct tgid_iter iter;
3342	struct pid_namespace *ns;
3343	filldir_t __filldir;
3344
3345	if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
3346		goto out_no_task;
3347	nr = filp->f_pos - FIRST_PROCESS_ENTRY;
3348
3349	reaper = get_proc_task(filp->f_path.dentry->d_inode);
3350	if (!reaper)
3351		goto out_no_task;
3352
3353	for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) {
3354		const struct pid_entry *p = &proc_base_stuff[nr];
3355		if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0)
3356			goto out;
 
 
 
3357	}
3358
3359	ns = filp->f_dentry->d_sb->s_fs_info;
3360	iter.task = NULL;
3361	iter.tgid = filp->f_pos - TGID_OFFSET;
3362	for (iter = next_tgid(ns, iter);
3363	     iter.task;
3364	     iter.tgid += 1, iter = next_tgid(ns, iter)) {
3365		if (has_pid_permissions(ns, iter.task, 2))
3366			__filldir = filldir;
3367		else
3368			__filldir = fake_filldir;
3369
3370		filp->f_pos = iter.tgid + TGID_OFFSET;
3371		if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
 
 
3372			put_task_struct(iter.task);
3373			goto out;
3374		}
3375	}
3376	filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
3377out:
3378	put_task_struct(reaper);
3379out_no_task:
3380	return 0;
3381}
3382
3383/*
3384 * Tasks
3385 */
3386static const struct pid_entry tid_base_stuff[] = {
3387	DIR("fd",        S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
3388	DIR("fdinfo",    S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
3389	DIR("ns",	 S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
 
 
 
3390	REG("environ",   S_IRUSR, proc_environ_operations),
3391	INF("auxv",      S_IRUSR, proc_pid_auxv),
3392	ONE("status",    S_IRUGO, proc_pid_status),
3393	ONE("personality", S_IRUGO, proc_pid_personality),
3394	INF("limits",	 S_IRUGO, proc_pid_limits),
3395#ifdef CONFIG_SCHED_DEBUG
3396	REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3397#endif
3398	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
3399#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3400	INF("syscall",   S_IRUGO, proc_pid_syscall),
3401#endif
3402	INF("cmdline",   S_IRUGO, proc_pid_cmdline),
3403	ONE("stat",      S_IRUGO, proc_tid_stat),
3404	ONE("statm",     S_IRUGO, proc_pid_statm),
3405	REG("maps",      S_IRUGO, proc_tid_maps_operations),
3406#ifdef CONFIG_CHECKPOINT_RESTORE
3407	REG("children",  S_IRUGO, proc_tid_children_operations),
3408#endif
3409#ifdef CONFIG_NUMA
3410	REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
3411#endif
3412	REG("mem",       S_IRUSR|S_IWUSR, proc_mem_operations),
3413	LNK("cwd",       proc_cwd_link),
3414	LNK("root",      proc_root_link),
3415	LNK("exe",       proc_exe_link),
3416	REG("mounts",    S_IRUGO, proc_mounts_operations),
3417	REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
3418#ifdef CONFIG_PROC_PAGE_MONITOR
3419	REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
3420	REG("smaps",     S_IRUGO, proc_tid_smaps_operations),
3421	REG("pagemap",    S_IRUGO, proc_pagemap_operations),
3422#endif
3423#ifdef CONFIG_SECURITY
3424	DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
3425#endif
3426#ifdef CONFIG_KALLSYMS
3427	INF("wchan",     S_IRUGO, proc_pid_wchan),
3428#endif
3429#ifdef CONFIG_STACKTRACE
3430	ONE("stack",      S_IRUGO, proc_pid_stack),
3431#endif
3432#ifdef CONFIG_SCHEDSTATS
3433	INF("schedstat", S_IRUGO, proc_pid_schedstat),
3434#endif
3435#ifdef CONFIG_LATENCYTOP
3436	REG("latency",  S_IRUGO, proc_lstats_operations),
3437#endif
3438#ifdef CONFIG_PROC_PID_CPUSET
3439	REG("cpuset",    S_IRUGO, proc_cpuset_operations),
3440#endif
3441#ifdef CONFIG_CGROUPS
3442	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
3443#endif
3444	INF("oom_score", S_IRUGO, proc_oom_score),
3445	REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
3446	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
3447#ifdef CONFIG_AUDITSYSCALL
3448	REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
3449	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
3450#endif
3451#ifdef CONFIG_FAULT_INJECTION
3452	REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
3453#endif
3454#ifdef CONFIG_TASK_IO_ACCOUNTING
3455	INF("io",	S_IRUSR, proc_tid_io_accounting),
3456#endif
3457#ifdef CONFIG_HARDWALL
3458	INF("hardwall",   S_IRUGO, proc_pid_hardwall),
3459#endif
3460#ifdef CONFIG_USER_NS
3461	REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
3462	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
 
 
3463#endif
3464};
3465
3466static int proc_tid_base_readdir(struct file * filp,
3467			     void * dirent, filldir_t filldir)
3468{
3469	return proc_pident_readdir(filp,dirent,filldir,
3470				   tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
3471}
3472
3473static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
 
3474	return proc_pident_lookup(dir, dentry,
3475				  tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3476}
3477
3478static const struct file_operations proc_tid_base_operations = {
3479	.read		= generic_read_dir,
3480	.readdir	= proc_tid_base_readdir,
3481	.llseek		= default_llseek,
3482};
3483
3484static const struct inode_operations proc_tid_base_inode_operations = {
3485	.lookup		= proc_tid_base_lookup,
3486	.getattr	= pid_getattr,
3487	.setattr	= proc_setattr,
3488};
3489
3490static struct dentry *proc_task_instantiate(struct inode *dir,
3491	struct dentry *dentry, struct task_struct *task, const void *ptr)
3492{
3493	struct dentry *error = ERR_PTR(-ENOENT);
3494	struct inode *inode;
3495	inode = proc_pid_make_inode(dir->i_sb, task);
3496
3497	if (!inode)
3498		goto out;
3499	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
3500	inode->i_op = &proc_tid_base_inode_operations;
3501	inode->i_fop = &proc_tid_base_operations;
3502	inode->i_flags|=S_IMMUTABLE;
3503
3504	set_nlink(inode, 2 + pid_entry_count_dirs(tid_base_stuff,
3505						  ARRAY_SIZE(tid_base_stuff)));
3506
3507	d_set_d_op(dentry, &pid_dentry_operations);
3508
3509	d_add(dentry, inode);
3510	/* Close the race of the process dying before we return the dentry */
3511	if (pid_revalidate(dentry, NULL))
3512		error = NULL;
3513out:
3514	return error;
3515}
3516
3517static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
3518{
3519	struct dentry *result = ERR_PTR(-ENOENT);
3520	struct task_struct *task;
3521	struct task_struct *leader = get_proc_task(dir);
3522	unsigned tid;
3523	struct pid_namespace *ns;
3524
3525	if (!leader)
3526		goto out_no_task;
3527
3528	tid = name_to_int(dentry);
3529	if (tid == ~0U)
3530		goto out;
3531
3532	ns = dentry->d_sb->s_fs_info;
3533	rcu_read_lock();
3534	task = find_task_by_pid_ns(tid, ns);
3535	if (task)
3536		get_task_struct(task);
3537	rcu_read_unlock();
3538	if (!task)
3539		goto out;
3540	if (!same_thread_group(leader, task))
3541		goto out_drop_task;
3542
3543	result = proc_task_instantiate(dir, dentry, task, NULL);
3544out_drop_task:
3545	put_task_struct(task);
3546out:
3547	put_task_struct(leader);
3548out_no_task:
3549	return result;
3550}
3551
3552/*
3553 * Find the first tid of a thread group to return to user space.
3554 *
3555 * Usually this is just the thread group leader, but if the users
3556 * buffer was too small or there was a seek into the middle of the
3557 * directory we have more work todo.
3558 *
3559 * In the case of a short read we start with find_task_by_pid.
3560 *
3561 * In the case of a seek we start with the leader and walk nr
3562 * threads past it.
3563 */
3564static struct task_struct *first_tid(struct task_struct *leader,
3565		int tid, int nr, struct pid_namespace *ns)
3566{
3567	struct task_struct *pos;
 
 
 
 
3568
3569	rcu_read_lock();
3570	/* Attempt to start with the pid of a thread */
3571	if (tid && (nr > 0)) {
 
 
 
 
3572		pos = find_task_by_pid_ns(tid, ns);
3573		if (pos && (pos->group_leader == leader))
3574			goto found;
3575	}
3576
3577	/* If nr exceeds the number of threads there is nothing todo */
3578	pos = NULL;
3579	if (nr && nr >= get_nr_threads(leader))
3580		goto out;
3581
3582	/* If we haven't found our starting place yet start
3583	 * with the leader and walk nr threads forward.
3584	 */
3585	for (pos = leader; nr > 0; --nr) {
3586		pos = next_thread(pos);
3587		if (pos == leader) {
3588			pos = NULL;
3589			goto out;
3590		}
3591	}
 
3592found:
3593	get_task_struct(pos);
3594out:
3595	rcu_read_unlock();
3596	return pos;
3597}
3598
3599/*
3600 * Find the next thread in the thread list.
3601 * Return NULL if there is an error or no next thread.
3602 *
3603 * The reference to the input task_struct is released.
3604 */
3605static struct task_struct *next_tid(struct task_struct *start)
3606{
3607	struct task_struct *pos = NULL;
3608	rcu_read_lock();
3609	if (pid_alive(start)) {
3610		pos = next_thread(start);
3611		if (thread_group_leader(pos))
3612			pos = NULL;
3613		else
3614			get_task_struct(pos);
3615	}
3616	rcu_read_unlock();
3617	put_task_struct(start);
3618	return pos;
3619}
3620
3621static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
3622	struct task_struct *task, int tid)
3623{
3624	char name[PROC_NUMBUF];
3625	int len = snprintf(name, sizeof(name), "%d", tid);
3626	return proc_fill_cache(filp, dirent, filldir, name, len,
3627				proc_task_instantiate, task, NULL);
3628}
3629
3630/* for the /proc/TGID/task/ directories */
3631static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
3632{
3633	struct dentry *dentry = filp->f_path.dentry;
3634	struct inode *inode = dentry->d_inode;
3635	struct task_struct *leader = NULL;
3636	struct task_struct *task;
3637	int retval = -ENOENT;
3638	ino_t ino;
3639	int tid;
3640	struct pid_namespace *ns;
3641
3642	task = get_proc_task(inode);
3643	if (!task)
3644		goto out_no_task;
3645	rcu_read_lock();
3646	if (pid_alive(task)) {
3647		leader = task->group_leader;
3648		get_task_struct(leader);
3649	}
3650	rcu_read_unlock();
3651	put_task_struct(task);
3652	if (!leader)
3653		goto out_no_task;
3654	retval = 0;
3655
3656	switch ((unsigned long)filp->f_pos) {
3657	case 0:
3658		ino = inode->i_ino;
3659		if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0)
3660			goto out;
3661		filp->f_pos++;
3662		/* fall through */
3663	case 1:
3664		ino = parent_ino(dentry);
3665		if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0)
3666			goto out;
3667		filp->f_pos++;
3668		/* fall through */
3669	}
3670
3671	/* f_version caches the tgid value that the last readdir call couldn't
3672	 * return. lseek aka telldir automagically resets f_version to 0.
3673	 */
3674	ns = filp->f_dentry->d_sb->s_fs_info;
3675	tid = (int)filp->f_version;
3676	filp->f_version = 0;
3677	for (task = first_tid(leader, tid, filp->f_pos - 2, ns);
3678	     task;
3679	     task = next_tid(task), filp->f_pos++) {
 
 
3680		tid = task_pid_nr_ns(task, ns);
3681		if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
 
 
3682			/* returning this tgid failed, save it as the first
3683			 * pid for the next readir call */
3684			filp->f_version = (u64)tid;
3685			put_task_struct(task);
3686			break;
3687		}
3688	}
3689out:
3690	put_task_struct(leader);
3691out_no_task:
3692	return retval;
3693}
3694
3695static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
3696{
3697	struct inode *inode = dentry->d_inode;
3698	struct task_struct *p = get_proc_task(inode);
3699	generic_fillattr(inode, stat);
3700
3701	if (p) {
3702		stat->nlink += get_nr_threads(p);
3703		put_task_struct(p);
3704	}
3705
3706	return 0;
3707}
3708
3709static const struct inode_operations proc_task_inode_operations = {
3710	.lookup		= proc_task_lookup,
3711	.getattr	= proc_task_getattr,
3712	.setattr	= proc_setattr,
3713	.permission	= proc_pid_permission,
3714};
3715
3716static const struct file_operations proc_task_operations = {
3717	.read		= generic_read_dir,
3718	.readdir	= proc_task_readdir,
3719	.llseek		= default_llseek,
3720};