Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  linux/kernel/sys.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7#include <linux/export.h>
   8#include <linux/mm.h>
   9#include <linux/utsname.h>
  10#include <linux/mman.h>
  11#include <linux/reboot.h>
  12#include <linux/prctl.h>
  13#include <linux/highuid.h>
  14#include <linux/fs.h>
  15#include <linux/kmod.h>
  16#include <linux/perf_event.h>
  17#include <linux/resource.h>
  18#include <linux/kernel.h>
 
  19#include <linux/workqueue.h>
  20#include <linux/capability.h>
  21#include <linux/device.h>
  22#include <linux/key.h>
  23#include <linux/times.h>
  24#include <linux/posix-timers.h>
  25#include <linux/security.h>
  26#include <linux/dcookies.h>
  27#include <linux/suspend.h>
  28#include <linux/tty.h>
  29#include <linux/signal.h>
  30#include <linux/cn_proc.h>
  31#include <linux/getcpu.h>
  32#include <linux/task_io_accounting_ops.h>
  33#include <linux/seccomp.h>
  34#include <linux/cpu.h>
  35#include <linux/personality.h>
  36#include <linux/ptrace.h>
  37#include <linux/fs_struct.h>
  38#include <linux/file.h>
  39#include <linux/mount.h>
  40#include <linux/gfp.h>
  41#include <linux/syscore_ops.h>
  42#include <linux/version.h>
  43#include <linux/ctype.h>
  44
  45#include <linux/compat.h>
  46#include <linux/syscalls.h>
  47#include <linux/kprobes.h>
  48#include <linux/user_namespace.h>
  49#include <linux/binfmts.h>
  50
  51#include <linux/sched.h>
  52#include <linux/rcupdate.h>
  53#include <linux/uidgid.h>
  54#include <linux/cred.h>
  55
  56#include <linux/kmsg_dump.h>
  57/* Move somewhere else to avoid recompiling? */
  58#include <generated/utsrelease.h>
  59
  60#include <asm/uaccess.h>
  61#include <asm/io.h>
  62#include <asm/unistd.h>
  63
  64#ifndef SET_UNALIGN_CTL
  65# define SET_UNALIGN_CTL(a, b)	(-EINVAL)
  66#endif
  67#ifndef GET_UNALIGN_CTL
  68# define GET_UNALIGN_CTL(a, b)	(-EINVAL)
  69#endif
  70#ifndef SET_FPEMU_CTL
  71# define SET_FPEMU_CTL(a, b)	(-EINVAL)
  72#endif
  73#ifndef GET_FPEMU_CTL
  74# define GET_FPEMU_CTL(a, b)	(-EINVAL)
  75#endif
  76#ifndef SET_FPEXC_CTL
  77# define SET_FPEXC_CTL(a, b)	(-EINVAL)
  78#endif
  79#ifndef GET_FPEXC_CTL
  80# define GET_FPEXC_CTL(a, b)	(-EINVAL)
  81#endif
  82#ifndef GET_ENDIAN
  83# define GET_ENDIAN(a, b)	(-EINVAL)
  84#endif
  85#ifndef SET_ENDIAN
  86# define SET_ENDIAN(a, b)	(-EINVAL)
  87#endif
  88#ifndef GET_TSC_CTL
  89# define GET_TSC_CTL(a)		(-EINVAL)
  90#endif
  91#ifndef SET_TSC_CTL
  92# define SET_TSC_CTL(a)		(-EINVAL)
  93#endif
  94#ifndef MPX_ENABLE_MANAGEMENT
  95# define MPX_ENABLE_MANAGEMENT()	(-EINVAL)
  96#endif
  97#ifndef MPX_DISABLE_MANAGEMENT
  98# define MPX_DISABLE_MANAGEMENT()	(-EINVAL)
  99#endif
 100#ifndef GET_FP_MODE
 101# define GET_FP_MODE(a)		(-EINVAL)
 102#endif
 103#ifndef SET_FP_MODE
 104# define SET_FP_MODE(a,b)	(-EINVAL)
 105#endif
 106
 107/*
 108 * this is where the system-wide overflow UID and GID are defined, for
 109 * architectures that now have 32-bit UID/GID but didn't in the past
 110 */
 111
 112int overflowuid = DEFAULT_OVERFLOWUID;
 113int overflowgid = DEFAULT_OVERFLOWGID;
 114
 115EXPORT_SYMBOL(overflowuid);
 116EXPORT_SYMBOL(overflowgid);
 117
 118/*
 119 * the same as above, but for filesystems which can only store a 16-bit
 120 * UID and GID. as such, this is needed on all architectures
 121 */
 122
 123int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
 124int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
 125
 126EXPORT_SYMBOL(fs_overflowuid);
 127EXPORT_SYMBOL(fs_overflowgid);
 128
 129/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130 * Returns true if current's euid is same as p's uid or euid,
 131 * or has CAP_SYS_NICE to p's user_ns.
 132 *
 133 * Called with rcu_read_lock, creds are safe
 134 */
 135static bool set_one_prio_perm(struct task_struct *p)
 136{
 137	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
 138
 139	if (uid_eq(pcred->uid,  cred->euid) ||
 140	    uid_eq(pcred->euid, cred->euid))
 141		return true;
 142	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
 143		return true;
 144	return false;
 145}
 146
 147/*
 148 * set the priority of a task
 149 * - the caller must hold the RCU read lock
 150 */
 151static int set_one_prio(struct task_struct *p, int niceval, int error)
 152{
 153	int no_nice;
 154
 155	if (!set_one_prio_perm(p)) {
 156		error = -EPERM;
 157		goto out;
 158	}
 159	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
 160		error = -EACCES;
 161		goto out;
 162	}
 163	no_nice = security_task_setnice(p, niceval);
 164	if (no_nice) {
 165		error = no_nice;
 166		goto out;
 167	}
 168	if (error == -ESRCH)
 169		error = 0;
 170	set_user_nice(p, niceval);
 171out:
 172	return error;
 173}
 174
 175SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 176{
 177	struct task_struct *g, *p;
 178	struct user_struct *user;
 179	const struct cred *cred = current_cred();
 180	int error = -EINVAL;
 181	struct pid *pgrp;
 182	kuid_t uid;
 183
 184	if (which > PRIO_USER || which < PRIO_PROCESS)
 185		goto out;
 186
 187	/* normalize: avoid signed division (rounding problems) */
 188	error = -ESRCH;
 189	if (niceval < MIN_NICE)
 190		niceval = MIN_NICE;
 191	if (niceval > MAX_NICE)
 192		niceval = MAX_NICE;
 193
 194	rcu_read_lock();
 195	read_lock(&tasklist_lock);
 196	switch (which) {
 197	case PRIO_PROCESS:
 198		if (who)
 199			p = find_task_by_vpid(who);
 200		else
 201			p = current;
 202		if (p)
 203			error = set_one_prio(p, niceval, error);
 204		break;
 205	case PRIO_PGRP:
 206		if (who)
 207			pgrp = find_vpid(who);
 208		else
 209			pgrp = task_pgrp(current);
 210		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 211			error = set_one_prio(p, niceval, error);
 212		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 213		break;
 214	case PRIO_USER:
 215		uid = make_kuid(cred->user_ns, who);
 216		user = cred->user;
 217		if (!who)
 218			uid = cred->uid;
 219		else if (!uid_eq(uid, cred->uid)) {
 220			user = find_user(uid);
 221			if (!user)
 222				goto out_unlock;	/* No processes for this user */
 223		}
 224		do_each_thread(g, p) {
 225			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
 226				error = set_one_prio(p, niceval, error);
 227		} while_each_thread(g, p);
 228		if (!uid_eq(uid, cred->uid))
 229			free_uid(user);		/* For find_user() */
 230		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231	}
 232out_unlock:
 233	read_unlock(&tasklist_lock);
 234	rcu_read_unlock();
 235out:
 236	return error;
 237}
 238
 239/*
 240 * Ugh. To avoid negative return values, "getpriority()" will
 241 * not return the normal nice-value, but a negated value that
 242 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
 243 * to stay compatible.
 244 */
 245SYSCALL_DEFINE2(getpriority, int, which, int, who)
 246{
 247	struct task_struct *g, *p;
 248	struct user_struct *user;
 249	const struct cred *cred = current_cred();
 250	long niceval, retval = -ESRCH;
 251	struct pid *pgrp;
 252	kuid_t uid;
 253
 254	if (which > PRIO_USER || which < PRIO_PROCESS)
 255		return -EINVAL;
 256
 257	rcu_read_lock();
 258	read_lock(&tasklist_lock);
 259	switch (which) {
 260	case PRIO_PROCESS:
 261		if (who)
 262			p = find_task_by_vpid(who);
 263		else
 264			p = current;
 265		if (p) {
 266			niceval = nice_to_rlimit(task_nice(p));
 267			if (niceval > retval)
 268				retval = niceval;
 269		}
 270		break;
 271	case PRIO_PGRP:
 272		if (who)
 273			pgrp = find_vpid(who);
 274		else
 275			pgrp = task_pgrp(current);
 276		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 277			niceval = nice_to_rlimit(task_nice(p));
 278			if (niceval > retval)
 279				retval = niceval;
 280		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 281		break;
 282	case PRIO_USER:
 283		uid = make_kuid(cred->user_ns, who);
 284		user = cred->user;
 285		if (!who)
 286			uid = cred->uid;
 287		else if (!uid_eq(uid, cred->uid)) {
 288			user = find_user(uid);
 289			if (!user)
 290				goto out_unlock;	/* No processes for this user */
 291		}
 292		do_each_thread(g, p) {
 293			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
 294				niceval = nice_to_rlimit(task_nice(p));
 295				if (niceval > retval)
 296					retval = niceval;
 297			}
 298		} while_each_thread(g, p);
 299		if (!uid_eq(uid, cred->uid))
 300			free_uid(user);		/* for find_user() */
 301		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 302	}
 303out_unlock:
 304	read_unlock(&tasklist_lock);
 305	rcu_read_unlock();
 306
 307	return retval;
 308}
 309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 310/*
 311 * Unprivileged users may change the real gid to the effective gid
 312 * or vice versa.  (BSD-style)
 313 *
 314 * If you set the real gid at all, or set the effective gid to a value not
 315 * equal to the real gid, then the saved gid is set to the new effective gid.
 316 *
 317 * This makes it possible for a setgid program to completely drop its
 318 * privileges, which is often a useful assertion to make when you are doing
 319 * a security audit over a program.
 320 *
 321 * The general idea is that a program which uses just setregid() will be
 322 * 100% compatible with BSD.  A program which uses just setgid() will be
 323 * 100% compatible with POSIX with saved IDs.
 324 *
 325 * SMP: There are not races, the GIDs are checked only by filesystem
 326 *      operations (as far as semantic preservation is concerned).
 327 */
 328#ifdef CONFIG_MULTIUSER
 329SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
 330{
 331	struct user_namespace *ns = current_user_ns();
 332	const struct cred *old;
 333	struct cred *new;
 334	int retval;
 335	kgid_t krgid, kegid;
 336
 337	krgid = make_kgid(ns, rgid);
 338	kegid = make_kgid(ns, egid);
 339
 340	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 341		return -EINVAL;
 342	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 343		return -EINVAL;
 344
 345	new = prepare_creds();
 346	if (!new)
 347		return -ENOMEM;
 348	old = current_cred();
 349
 350	retval = -EPERM;
 351	if (rgid != (gid_t) -1) {
 352		if (gid_eq(old->gid, krgid) ||
 353		    gid_eq(old->egid, krgid) ||
 354		    ns_capable(old->user_ns, CAP_SETGID))
 355			new->gid = krgid;
 356		else
 357			goto error;
 358	}
 359	if (egid != (gid_t) -1) {
 360		if (gid_eq(old->gid, kegid) ||
 361		    gid_eq(old->egid, kegid) ||
 362		    gid_eq(old->sgid, kegid) ||
 363		    ns_capable(old->user_ns, CAP_SETGID))
 364			new->egid = kegid;
 365		else
 366			goto error;
 367	}
 368
 369	if (rgid != (gid_t) -1 ||
 370	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
 371		new->sgid = new->egid;
 372	new->fsgid = new->egid;
 373
 374	return commit_creds(new);
 375
 376error:
 377	abort_creds(new);
 378	return retval;
 379}
 380
 381/*
 382 * setgid() is implemented like SysV w/ SAVED_IDS
 383 *
 384 * SMP: Same implicit races as above.
 385 */
 386SYSCALL_DEFINE1(setgid, gid_t, gid)
 387{
 388	struct user_namespace *ns = current_user_ns();
 389	const struct cred *old;
 390	struct cred *new;
 391	int retval;
 392	kgid_t kgid;
 393
 394	kgid = make_kgid(ns, gid);
 395	if (!gid_valid(kgid))
 396		return -EINVAL;
 397
 398	new = prepare_creds();
 399	if (!new)
 400		return -ENOMEM;
 401	old = current_cred();
 402
 403	retval = -EPERM;
 404	if (ns_capable(old->user_ns, CAP_SETGID))
 405		new->gid = new->egid = new->sgid = new->fsgid = kgid;
 406	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
 407		new->egid = new->fsgid = kgid;
 408	else
 409		goto error;
 410
 411	return commit_creds(new);
 412
 413error:
 414	abort_creds(new);
 415	return retval;
 416}
 417
 418/*
 419 * change the user struct in a credentials set to match the new UID
 420 */
 421static int set_user(struct cred *new)
 422{
 423	struct user_struct *new_user;
 424
 425	new_user = alloc_uid(new->uid);
 426	if (!new_user)
 427		return -EAGAIN;
 428
 429	/*
 430	 * We don't fail in case of NPROC limit excess here because too many
 431	 * poorly written programs don't check set*uid() return code, assuming
 432	 * it never fails if called by root.  We may still enforce NPROC limit
 433	 * for programs doing set*uid()+execve() by harmlessly deferring the
 434	 * failure to the execve() stage.
 435	 */
 436	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
 437			new_user != INIT_USER)
 438		current->flags |= PF_NPROC_EXCEEDED;
 439	else
 440		current->flags &= ~PF_NPROC_EXCEEDED;
 441
 442	free_uid(new->user);
 443	new->user = new_user;
 444	return 0;
 445}
 446
 447/*
 448 * Unprivileged users may change the real uid to the effective uid
 449 * or vice versa.  (BSD-style)
 450 *
 451 * If you set the real uid at all, or set the effective uid to a value not
 452 * equal to the real uid, then the saved uid is set to the new effective uid.
 453 *
 454 * This makes it possible for a setuid program to completely drop its
 455 * privileges, which is often a useful assertion to make when you are doing
 456 * a security audit over a program.
 457 *
 458 * The general idea is that a program which uses just setreuid() will be
 459 * 100% compatible with BSD.  A program which uses just setuid() will be
 460 * 100% compatible with POSIX with saved IDs.
 461 */
 462SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 463{
 464	struct user_namespace *ns = current_user_ns();
 465	const struct cred *old;
 466	struct cred *new;
 467	int retval;
 468	kuid_t kruid, keuid;
 469
 470	kruid = make_kuid(ns, ruid);
 471	keuid = make_kuid(ns, euid);
 472
 473	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 474		return -EINVAL;
 475	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 476		return -EINVAL;
 477
 478	new = prepare_creds();
 479	if (!new)
 480		return -ENOMEM;
 481	old = current_cred();
 482
 483	retval = -EPERM;
 484	if (ruid != (uid_t) -1) {
 485		new->uid = kruid;
 486		if (!uid_eq(old->uid, kruid) &&
 487		    !uid_eq(old->euid, kruid) &&
 488		    !ns_capable(old->user_ns, CAP_SETUID))
 489			goto error;
 490	}
 491
 492	if (euid != (uid_t) -1) {
 493		new->euid = keuid;
 494		if (!uid_eq(old->uid, keuid) &&
 495		    !uid_eq(old->euid, keuid) &&
 496		    !uid_eq(old->suid, keuid) &&
 497		    !ns_capable(old->user_ns, CAP_SETUID))
 498			goto error;
 499	}
 500
 501	if (!uid_eq(new->uid, old->uid)) {
 502		retval = set_user(new);
 503		if (retval < 0)
 504			goto error;
 505	}
 506	if (ruid != (uid_t) -1 ||
 507	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
 508		new->suid = new->euid;
 509	new->fsuid = new->euid;
 510
 511	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
 512	if (retval < 0)
 513		goto error;
 514
 515	return commit_creds(new);
 516
 517error:
 518	abort_creds(new);
 519	return retval;
 520}
 521
 522/*
 523 * setuid() is implemented like SysV with SAVED_IDS
 524 *
 525 * Note that SAVED_ID's is deficient in that a setuid root program
 526 * like sendmail, for example, cannot set its uid to be a normal
 527 * user and then switch back, because if you're root, setuid() sets
 528 * the saved uid too.  If you don't like this, blame the bright people
 529 * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
 530 * will allow a root program to temporarily drop privileges and be able to
 531 * regain them by swapping the real and effective uid.
 532 */
 533SYSCALL_DEFINE1(setuid, uid_t, uid)
 534{
 535	struct user_namespace *ns = current_user_ns();
 536	const struct cred *old;
 537	struct cred *new;
 538	int retval;
 539	kuid_t kuid;
 540
 541	kuid = make_kuid(ns, uid);
 542	if (!uid_valid(kuid))
 543		return -EINVAL;
 544
 545	new = prepare_creds();
 546	if (!new)
 547		return -ENOMEM;
 548	old = current_cred();
 549
 550	retval = -EPERM;
 551	if (ns_capable(old->user_ns, CAP_SETUID)) {
 552		new->suid = new->uid = kuid;
 553		if (!uid_eq(kuid, old->uid)) {
 554			retval = set_user(new);
 555			if (retval < 0)
 556				goto error;
 557		}
 558	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
 559		goto error;
 560	}
 561
 562	new->fsuid = new->euid = kuid;
 563
 564	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
 565	if (retval < 0)
 566		goto error;
 567
 568	return commit_creds(new);
 569
 570error:
 571	abort_creds(new);
 572	return retval;
 573}
 574
 575
 576/*
 577 * This function implements a generic ability to update ruid, euid,
 578 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 579 */
 580SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 581{
 582	struct user_namespace *ns = current_user_ns();
 583	const struct cred *old;
 584	struct cred *new;
 585	int retval;
 586	kuid_t kruid, keuid, ksuid;
 587
 588	kruid = make_kuid(ns, ruid);
 589	keuid = make_kuid(ns, euid);
 590	ksuid = make_kuid(ns, suid);
 591
 592	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 593		return -EINVAL;
 594
 595	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 596		return -EINVAL;
 597
 598	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
 599		return -EINVAL;
 600
 601	new = prepare_creds();
 602	if (!new)
 603		return -ENOMEM;
 604
 605	old = current_cred();
 606
 607	retval = -EPERM;
 608	if (!ns_capable(old->user_ns, CAP_SETUID)) {
 609		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
 610		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
 611			goto error;
 612		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
 613		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
 614			goto error;
 615		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
 616		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
 617			goto error;
 618	}
 619
 620	if (ruid != (uid_t) -1) {
 621		new->uid = kruid;
 622		if (!uid_eq(kruid, old->uid)) {
 623			retval = set_user(new);
 624			if (retval < 0)
 625				goto error;
 626		}
 627	}
 628	if (euid != (uid_t) -1)
 629		new->euid = keuid;
 630	if (suid != (uid_t) -1)
 631		new->suid = ksuid;
 632	new->fsuid = new->euid;
 633
 634	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
 635	if (retval < 0)
 636		goto error;
 637
 638	return commit_creds(new);
 639
 640error:
 641	abort_creds(new);
 642	return retval;
 643}
 644
 645SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
 646{
 647	const struct cred *cred = current_cred();
 648	int retval;
 649	uid_t ruid, euid, suid;
 650
 651	ruid = from_kuid_munged(cred->user_ns, cred->uid);
 652	euid = from_kuid_munged(cred->user_ns, cred->euid);
 653	suid = from_kuid_munged(cred->user_ns, cred->suid);
 654
 655	retval = put_user(ruid, ruidp);
 656	if (!retval) {
 657		retval = put_user(euid, euidp);
 658		if (!retval)
 659			return put_user(suid, suidp);
 660	}
 661	return retval;
 662}
 663
 664/*
 665 * Same as above, but for rgid, egid, sgid.
 666 */
 667SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
 668{
 669	struct user_namespace *ns = current_user_ns();
 670	const struct cred *old;
 671	struct cred *new;
 672	int retval;
 673	kgid_t krgid, kegid, ksgid;
 674
 675	krgid = make_kgid(ns, rgid);
 676	kegid = make_kgid(ns, egid);
 677	ksgid = make_kgid(ns, sgid);
 678
 679	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 680		return -EINVAL;
 681	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 682		return -EINVAL;
 683	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
 684		return -EINVAL;
 685
 686	new = prepare_creds();
 687	if (!new)
 688		return -ENOMEM;
 689	old = current_cred();
 690
 691	retval = -EPERM;
 692	if (!ns_capable(old->user_ns, CAP_SETGID)) {
 693		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
 694		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
 695			goto error;
 696		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
 697		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
 698			goto error;
 699		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
 700		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
 701			goto error;
 702	}
 703
 704	if (rgid != (gid_t) -1)
 705		new->gid = krgid;
 706	if (egid != (gid_t) -1)
 707		new->egid = kegid;
 708	if (sgid != (gid_t) -1)
 709		new->sgid = ksgid;
 710	new->fsgid = new->egid;
 711
 712	return commit_creds(new);
 713
 714error:
 715	abort_creds(new);
 716	return retval;
 717}
 718
 719SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
 720{
 721	const struct cred *cred = current_cred();
 722	int retval;
 723	gid_t rgid, egid, sgid;
 724
 725	rgid = from_kgid_munged(cred->user_ns, cred->gid);
 726	egid = from_kgid_munged(cred->user_ns, cred->egid);
 727	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
 728
 729	retval = put_user(rgid, rgidp);
 730	if (!retval) {
 731		retval = put_user(egid, egidp);
 732		if (!retval)
 733			retval = put_user(sgid, sgidp);
 734	}
 735
 736	return retval;
 737}
 738
 739
 740/*
 741 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
 742 * is used for "access()" and for the NFS daemon (letting nfsd stay at
 743 * whatever uid it wants to). It normally shadows "euid", except when
 744 * explicitly set by setfsuid() or for access..
 745 */
 746SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 747{
 748	const struct cred *old;
 749	struct cred *new;
 750	uid_t old_fsuid;
 751	kuid_t kuid;
 752
 753	old = current_cred();
 754	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
 755
 756	kuid = make_kuid(old->user_ns, uid);
 757	if (!uid_valid(kuid))
 758		return old_fsuid;
 759
 760	new = prepare_creds();
 761	if (!new)
 762		return old_fsuid;
 763
 764	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
 765	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
 766	    ns_capable(old->user_ns, CAP_SETUID)) {
 767		if (!uid_eq(kuid, old->fsuid)) {
 768			new->fsuid = kuid;
 769			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
 770				goto change_okay;
 771		}
 772	}
 773
 774	abort_creds(new);
 775	return old_fsuid;
 776
 777change_okay:
 778	commit_creds(new);
 779	return old_fsuid;
 780}
 781
 782/*
 783 * Samma på svenska..
 784 */
 785SYSCALL_DEFINE1(setfsgid, gid_t, gid)
 786{
 787	const struct cred *old;
 788	struct cred *new;
 789	gid_t old_fsgid;
 790	kgid_t kgid;
 791
 792	old = current_cred();
 793	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
 794
 795	kgid = make_kgid(old->user_ns, gid);
 796	if (!gid_valid(kgid))
 797		return old_fsgid;
 798
 799	new = prepare_creds();
 800	if (!new)
 801		return old_fsgid;
 802
 803	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
 804	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
 805	    ns_capable(old->user_ns, CAP_SETGID)) {
 806		if (!gid_eq(kgid, old->fsgid)) {
 807			new->fsgid = kgid;
 808			goto change_okay;
 809		}
 810	}
 811
 812	abort_creds(new);
 813	return old_fsgid;
 814
 815change_okay:
 816	commit_creds(new);
 817	return old_fsgid;
 818}
 819#endif /* CONFIG_MULTIUSER */
 820
 821/**
 822 * sys_getpid - return the thread group id of the current process
 823 *
 824 * Note, despite the name, this returns the tgid not the pid.  The tgid and
 825 * the pid are identical unless CLONE_THREAD was specified on clone() in
 826 * which case the tgid is the same in all threads of the same group.
 827 *
 828 * This is SMP safe as current->tgid does not change.
 829 */
 830SYSCALL_DEFINE0(getpid)
 831{
 832	return task_tgid_vnr(current);
 833}
 834
 835/* Thread ID - the internal kernel "pid" */
 836SYSCALL_DEFINE0(gettid)
 837{
 838	return task_pid_vnr(current);
 839}
 840
 841/*
 842 * Accessing ->real_parent is not SMP-safe, it could
 843 * change from under us. However, we can use a stale
 844 * value of ->real_parent under rcu_read_lock(), see
 845 * release_task()->call_rcu(delayed_put_task_struct).
 846 */
 847SYSCALL_DEFINE0(getppid)
 848{
 849	int pid;
 850
 851	rcu_read_lock();
 852	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
 853	rcu_read_unlock();
 854
 855	return pid;
 856}
 857
 858SYSCALL_DEFINE0(getuid)
 859{
 860	/* Only we change this so SMP safe */
 861	return from_kuid_munged(current_user_ns(), current_uid());
 862}
 863
 864SYSCALL_DEFINE0(geteuid)
 865{
 866	/* Only we change this so SMP safe */
 867	return from_kuid_munged(current_user_ns(), current_euid());
 868}
 869
 870SYSCALL_DEFINE0(getgid)
 871{
 872	/* Only we change this so SMP safe */
 873	return from_kgid_munged(current_user_ns(), current_gid());
 874}
 875
 876SYSCALL_DEFINE0(getegid)
 877{
 878	/* Only we change this so SMP safe */
 879	return from_kgid_munged(current_user_ns(), current_egid());
 880}
 881
 882void do_sys_times(struct tms *tms)
 883{
 884	cputime_t tgutime, tgstime, cutime, cstime;
 885
 886	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
 
 887	cutime = current->signal->cutime;
 888	cstime = current->signal->cstime;
 
 889	tms->tms_utime = cputime_to_clock_t(tgutime);
 890	tms->tms_stime = cputime_to_clock_t(tgstime);
 891	tms->tms_cutime = cputime_to_clock_t(cutime);
 892	tms->tms_cstime = cputime_to_clock_t(cstime);
 893}
 894
 895SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
 896{
 897	if (tbuf) {
 898		struct tms tmp;
 899
 900		do_sys_times(&tmp);
 901		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
 902			return -EFAULT;
 903	}
 904	force_successful_syscall_return();
 905	return (long) jiffies_64_to_clock_t(get_jiffies_64());
 906}
 907
 908/*
 909 * This needs some heavy checking ...
 910 * I just haven't the stomach for it. I also don't fully
 911 * understand sessions/pgrp etc. Let somebody who does explain it.
 912 *
 913 * OK, I think I have the protection semantics right.... this is really
 914 * only important on a multi-user system anyway, to make sure one user
 915 * can't send a signal to a process owned by another.  -TYT, 12/12/91
 916 *
 917 * !PF_FORKNOEXEC check to conform completely to POSIX.
 
 918 */
 919SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 920{
 921	struct task_struct *p;
 922	struct task_struct *group_leader = current->group_leader;
 923	struct pid *pgrp;
 924	int err;
 925
 926	if (!pid)
 927		pid = task_pid_vnr(group_leader);
 928	if (!pgid)
 929		pgid = pid;
 930	if (pgid < 0)
 931		return -EINVAL;
 932	rcu_read_lock();
 933
 934	/* From this point forward we keep holding onto the tasklist lock
 935	 * so that our parent does not change from under us. -DaveM
 936	 */
 937	write_lock_irq(&tasklist_lock);
 938
 939	err = -ESRCH;
 940	p = find_task_by_vpid(pid);
 941	if (!p)
 942		goto out;
 943
 944	err = -EINVAL;
 945	if (!thread_group_leader(p))
 946		goto out;
 947
 948	if (same_thread_group(p->real_parent, group_leader)) {
 949		err = -EPERM;
 950		if (task_session(p) != task_session(group_leader))
 951			goto out;
 952		err = -EACCES;
 953		if (!(p->flags & PF_FORKNOEXEC))
 954			goto out;
 955	} else {
 956		err = -ESRCH;
 957		if (p != group_leader)
 958			goto out;
 959	}
 960
 961	err = -EPERM;
 962	if (p->signal->leader)
 963		goto out;
 964
 965	pgrp = task_pid(p);
 966	if (pgid != pid) {
 967		struct task_struct *g;
 968
 969		pgrp = find_vpid(pgid);
 970		g = pid_task(pgrp, PIDTYPE_PGID);
 971		if (!g || task_session(g) != task_session(group_leader))
 972			goto out;
 973	}
 974
 975	err = security_task_setpgid(p, pgid);
 976	if (err)
 977		goto out;
 978
 979	if (task_pgrp(p) != pgrp)
 980		change_pid(p, PIDTYPE_PGID, pgrp);
 981
 982	err = 0;
 983out:
 984	/* All paths lead to here, thus we are safe. -DaveM */
 985	write_unlock_irq(&tasklist_lock);
 986	rcu_read_unlock();
 987	return err;
 988}
 989
 990SYSCALL_DEFINE1(getpgid, pid_t, pid)
 991{
 992	struct task_struct *p;
 993	struct pid *grp;
 994	int retval;
 995
 996	rcu_read_lock();
 997	if (!pid)
 998		grp = task_pgrp(current);
 999	else {
1000		retval = -ESRCH;
1001		p = find_task_by_vpid(pid);
1002		if (!p)
1003			goto out;
1004		grp = task_pgrp(p);
1005		if (!grp)
1006			goto out;
1007
1008		retval = security_task_getpgid(p);
1009		if (retval)
1010			goto out;
1011	}
1012	retval = pid_vnr(grp);
1013out:
1014	rcu_read_unlock();
1015	return retval;
1016}
1017
1018#ifdef __ARCH_WANT_SYS_GETPGRP
1019
1020SYSCALL_DEFINE0(getpgrp)
1021{
1022	return sys_getpgid(0);
1023}
1024
1025#endif
1026
1027SYSCALL_DEFINE1(getsid, pid_t, pid)
1028{
1029	struct task_struct *p;
1030	struct pid *sid;
1031	int retval;
1032
1033	rcu_read_lock();
1034	if (!pid)
1035		sid = task_session(current);
1036	else {
1037		retval = -ESRCH;
1038		p = find_task_by_vpid(pid);
1039		if (!p)
1040			goto out;
1041		sid = task_session(p);
1042		if (!sid)
1043			goto out;
1044
1045		retval = security_task_getsid(p);
1046		if (retval)
1047			goto out;
1048	}
1049	retval = pid_vnr(sid);
1050out:
1051	rcu_read_unlock();
1052	return retval;
1053}
1054
1055static void set_special_pids(struct pid *pid)
1056{
1057	struct task_struct *curr = current->group_leader;
1058
1059	if (task_session(curr) != pid)
1060		change_pid(curr, PIDTYPE_SID, pid);
1061
1062	if (task_pgrp(curr) != pid)
1063		change_pid(curr, PIDTYPE_PGID, pid);
1064}
1065
1066SYSCALL_DEFINE0(setsid)
1067{
1068	struct task_struct *group_leader = current->group_leader;
1069	struct pid *sid = task_pid(group_leader);
1070	pid_t session = pid_vnr(sid);
1071	int err = -EPERM;
1072
1073	write_lock_irq(&tasklist_lock);
1074	/* Fail if I am already a session leader */
1075	if (group_leader->signal->leader)
1076		goto out;
1077
1078	/* Fail if a process group id already exists that equals the
1079	 * proposed session id.
1080	 */
1081	if (pid_task(sid, PIDTYPE_PGID))
1082		goto out;
1083
1084	group_leader->signal->leader = 1;
1085	set_special_pids(sid);
1086
1087	proc_clear_tty(group_leader);
1088
1089	err = session;
1090out:
1091	write_unlock_irq(&tasklist_lock);
1092	if (err > 0) {
1093		proc_sid_connector(group_leader);
1094		sched_autogroup_create_attach(group_leader);
1095	}
1096	return err;
1097}
1098
1099DECLARE_RWSEM(uts_sem);
1100
1101#ifdef COMPAT_UTS_MACHINE
1102#define override_architecture(name) \
1103	(personality(current->personality) == PER_LINUX32 && \
1104	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1105		      sizeof(COMPAT_UTS_MACHINE)))
1106#else
1107#define override_architecture(name)	0
1108#endif
1109
1110/*
1111 * Work around broken programs that cannot handle "Linux 3.0".
1112 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1113 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
1114 */
1115static int override_release(char __user *release, size_t len)
1116{
1117	int ret = 0;
 
1118
1119	if (current->personality & UNAME26) {
1120		const char *rest = UTS_RELEASE;
1121		char buf[65] = { 0 };
1122		int ndots = 0;
1123		unsigned v;
1124		size_t copy;
1125
1126		while (*rest) {
1127			if (*rest == '.' && ++ndots >= 3)
1128				break;
1129			if (!isdigit(*rest) && *rest != '.')
1130				break;
1131			rest++;
1132		}
1133		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1134		copy = clamp_t(size_t, len, 1, sizeof(buf));
1135		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1136		ret = copy_to_user(release, buf, copy + 1);
1137	}
1138	return ret;
1139}
1140
1141SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1142{
1143	int errno = 0;
1144
1145	down_read(&uts_sem);
1146	if (copy_to_user(name, utsname(), sizeof *name))
1147		errno = -EFAULT;
1148	up_read(&uts_sem);
1149
1150	if (!errno && override_release(name->release, sizeof(name->release)))
1151		errno = -EFAULT;
1152	if (!errno && override_architecture(name))
1153		errno = -EFAULT;
1154	return errno;
1155}
1156
1157#ifdef __ARCH_WANT_SYS_OLD_UNAME
1158/*
1159 * Old cruft
1160 */
1161SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1162{
1163	int error = 0;
1164
1165	if (!name)
1166		return -EFAULT;
1167
1168	down_read(&uts_sem);
1169	if (copy_to_user(name, utsname(), sizeof(*name)))
1170		error = -EFAULT;
1171	up_read(&uts_sem);
1172
1173	if (!error && override_release(name->release, sizeof(name->release)))
1174		error = -EFAULT;
1175	if (!error && override_architecture(name))
1176		error = -EFAULT;
1177	return error;
1178}
1179
1180SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1181{
1182	int error;
1183
1184	if (!name)
1185		return -EFAULT;
1186	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1187		return -EFAULT;
1188
1189	down_read(&uts_sem);
1190	error = __copy_to_user(&name->sysname, &utsname()->sysname,
1191			       __OLD_UTS_LEN);
1192	error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1193	error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1194				__OLD_UTS_LEN);
1195	error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1196	error |= __copy_to_user(&name->release, &utsname()->release,
1197				__OLD_UTS_LEN);
1198	error |= __put_user(0, name->release + __OLD_UTS_LEN);
1199	error |= __copy_to_user(&name->version, &utsname()->version,
1200				__OLD_UTS_LEN);
1201	error |= __put_user(0, name->version + __OLD_UTS_LEN);
1202	error |= __copy_to_user(&name->machine, &utsname()->machine,
1203				__OLD_UTS_LEN);
1204	error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1205	up_read(&uts_sem);
1206
1207	if (!error && override_architecture(name))
1208		error = -EFAULT;
1209	if (!error && override_release(name->release, sizeof(name->release)))
1210		error = -EFAULT;
1211	return error ? -EFAULT : 0;
1212}
1213#endif
1214
1215SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1216{
1217	int errno;
1218	char tmp[__NEW_UTS_LEN];
1219
1220	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1221		return -EPERM;
1222
1223	if (len < 0 || len > __NEW_UTS_LEN)
1224		return -EINVAL;
1225	down_write(&uts_sem);
1226	errno = -EFAULT;
1227	if (!copy_from_user(tmp, name, len)) {
1228		struct new_utsname *u = utsname();
1229
1230		memcpy(u->nodename, tmp, len);
1231		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1232		errno = 0;
1233		uts_proc_notify(UTS_PROC_HOSTNAME);
1234	}
1235	up_write(&uts_sem);
1236	return errno;
1237}
1238
1239#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1240
1241SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1242{
1243	int i, errno;
1244	struct new_utsname *u;
1245
1246	if (len < 0)
1247		return -EINVAL;
1248	down_read(&uts_sem);
1249	u = utsname();
1250	i = 1 + strlen(u->nodename);
1251	if (i > len)
1252		i = len;
1253	errno = 0;
1254	if (copy_to_user(name, u->nodename, i))
1255		errno = -EFAULT;
1256	up_read(&uts_sem);
1257	return errno;
1258}
1259
1260#endif
1261
1262/*
1263 * Only setdomainname; getdomainname can be implemented by calling
1264 * uname()
1265 */
1266SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1267{
1268	int errno;
1269	char tmp[__NEW_UTS_LEN];
1270
1271	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1272		return -EPERM;
1273	if (len < 0 || len > __NEW_UTS_LEN)
1274		return -EINVAL;
1275
1276	down_write(&uts_sem);
1277	errno = -EFAULT;
1278	if (!copy_from_user(tmp, name, len)) {
1279		struct new_utsname *u = utsname();
1280
1281		memcpy(u->domainname, tmp, len);
1282		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1283		errno = 0;
1284		uts_proc_notify(UTS_PROC_DOMAINNAME);
1285	}
1286	up_write(&uts_sem);
1287	return errno;
1288}
1289
1290SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1291{
1292	struct rlimit value;
1293	int ret;
1294
1295	ret = do_prlimit(current, resource, NULL, &value);
1296	if (!ret)
1297		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1298
1299	return ret;
1300}
1301
1302#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1303
1304/*
1305 *	Back compatibility for getrlimit. Needed for some apps.
1306 */
 
1307SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1308		struct rlimit __user *, rlim)
1309{
1310	struct rlimit x;
1311	if (resource >= RLIM_NLIMITS)
1312		return -EINVAL;
1313
1314	task_lock(current->group_leader);
1315	x = current->signal->rlim[resource];
1316	task_unlock(current->group_leader);
1317	if (x.rlim_cur > 0x7FFFFFFF)
1318		x.rlim_cur = 0x7FFFFFFF;
1319	if (x.rlim_max > 0x7FFFFFFF)
1320		x.rlim_max = 0x7FFFFFFF;
1321	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1322}
1323
1324#endif
1325
1326static inline bool rlim64_is_infinity(__u64 rlim64)
1327{
1328#if BITS_PER_LONG < 64
1329	return rlim64 >= ULONG_MAX;
1330#else
1331	return rlim64 == RLIM64_INFINITY;
1332#endif
1333}
1334
1335static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1336{
1337	if (rlim->rlim_cur == RLIM_INFINITY)
1338		rlim64->rlim_cur = RLIM64_INFINITY;
1339	else
1340		rlim64->rlim_cur = rlim->rlim_cur;
1341	if (rlim->rlim_max == RLIM_INFINITY)
1342		rlim64->rlim_max = RLIM64_INFINITY;
1343	else
1344		rlim64->rlim_max = rlim->rlim_max;
1345}
1346
1347static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1348{
1349	if (rlim64_is_infinity(rlim64->rlim_cur))
1350		rlim->rlim_cur = RLIM_INFINITY;
1351	else
1352		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1353	if (rlim64_is_infinity(rlim64->rlim_max))
1354		rlim->rlim_max = RLIM_INFINITY;
1355	else
1356		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1357}
1358
1359/* make sure you are allowed to change @tsk limits before calling this */
1360int do_prlimit(struct task_struct *tsk, unsigned int resource,
1361		struct rlimit *new_rlim, struct rlimit *old_rlim)
1362{
1363	struct rlimit *rlim;
1364	int retval = 0;
1365
1366	if (resource >= RLIM_NLIMITS)
1367		return -EINVAL;
1368	if (new_rlim) {
1369		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1370			return -EINVAL;
1371		if (resource == RLIMIT_NOFILE &&
1372				new_rlim->rlim_max > sysctl_nr_open)
1373			return -EPERM;
1374	}
1375
1376	/* protect tsk->signal and tsk->sighand from disappearing */
1377	read_lock(&tasklist_lock);
1378	if (!tsk->sighand) {
1379		retval = -ESRCH;
1380		goto out;
1381	}
1382
1383	rlim = tsk->signal->rlim + resource;
1384	task_lock(tsk->group_leader);
1385	if (new_rlim) {
1386		/* Keep the capable check against init_user_ns until
1387		   cgroups can contain all limits */
1388		if (new_rlim->rlim_max > rlim->rlim_max &&
1389				!capable(CAP_SYS_RESOURCE))
1390			retval = -EPERM;
1391		if (!retval)
1392			retval = security_task_setrlimit(tsk->group_leader,
1393					resource, new_rlim);
1394		if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1395			/*
1396			 * The caller is asking for an immediate RLIMIT_CPU
1397			 * expiry.  But we use the zero value to mean "it was
1398			 * never set".  So let's cheat and make it one second
1399			 * instead
1400			 */
1401			new_rlim->rlim_cur = 1;
1402		}
1403	}
1404	if (!retval) {
1405		if (old_rlim)
1406			*old_rlim = *rlim;
1407		if (new_rlim)
1408			*rlim = *new_rlim;
1409	}
1410	task_unlock(tsk->group_leader);
1411
1412	/*
1413	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error
1414	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
1415	 * very long-standing error, and fixing it now risks breakage of
1416	 * applications, so we live with it
1417	 */
1418	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1419			 new_rlim->rlim_cur != RLIM_INFINITY)
1420		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1421out:
1422	read_unlock(&tasklist_lock);
1423	return retval;
1424}
1425
1426/* rcu lock must be held */
1427static int check_prlimit_permission(struct task_struct *task)
1428{
1429	const struct cred *cred = current_cred(), *tcred;
1430
1431	if (current == task)
1432		return 0;
1433
1434	tcred = __task_cred(task);
1435	if (uid_eq(cred->uid, tcred->euid) &&
1436	    uid_eq(cred->uid, tcred->suid) &&
1437	    uid_eq(cred->uid, tcred->uid)  &&
1438	    gid_eq(cred->gid, tcred->egid) &&
1439	    gid_eq(cred->gid, tcred->sgid) &&
1440	    gid_eq(cred->gid, tcred->gid))
1441		return 0;
1442	if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1443		return 0;
1444
1445	return -EPERM;
1446}
1447
1448SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1449		const struct rlimit64 __user *, new_rlim,
1450		struct rlimit64 __user *, old_rlim)
1451{
1452	struct rlimit64 old64, new64;
1453	struct rlimit old, new;
1454	struct task_struct *tsk;
1455	int ret;
1456
1457	if (new_rlim) {
1458		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1459			return -EFAULT;
1460		rlim64_to_rlim(&new64, &new);
1461	}
1462
1463	rcu_read_lock();
1464	tsk = pid ? find_task_by_vpid(pid) : current;
1465	if (!tsk) {
1466		rcu_read_unlock();
1467		return -ESRCH;
1468	}
1469	ret = check_prlimit_permission(tsk);
1470	if (ret) {
1471		rcu_read_unlock();
1472		return ret;
1473	}
1474	get_task_struct(tsk);
1475	rcu_read_unlock();
1476
1477	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1478			old_rlim ? &old : NULL);
1479
1480	if (!ret && old_rlim) {
1481		rlim_to_rlim64(&old, &old64);
1482		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1483			ret = -EFAULT;
1484	}
1485
1486	put_task_struct(tsk);
1487	return ret;
1488}
1489
1490SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1491{
1492	struct rlimit new_rlim;
1493
1494	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1495		return -EFAULT;
1496	return do_prlimit(current, resource, &new_rlim, NULL);
1497}
1498
1499/*
1500 * It would make sense to put struct rusage in the task_struct,
1501 * except that would make the task_struct be *really big*.  After
1502 * task_struct gets moved into malloc'ed memory, it would
1503 * make sense to do this.  It will make moving the rest of the information
1504 * a lot simpler!  (Which we're not doing right now because we're not
1505 * measuring them yet).
1506 *
1507 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1508 * races with threads incrementing their own counters.  But since word
1509 * reads are atomic, we either get new values or old values and we don't
1510 * care which for the sums.  We always take the siglock to protect reading
1511 * the c* fields from p->signal from races with exit.c updating those
1512 * fields when reaping, so a sample either gets all the additions of a
1513 * given child after it's reaped, or none so this sample is before reaping.
1514 *
1515 * Locking:
1516 * We need to take the siglock for CHILDEREN, SELF and BOTH
1517 * for  the cases current multithreaded, non-current single threaded
1518 * non-current multithreaded.  Thread traversal is now safe with
1519 * the siglock held.
1520 * Strictly speaking, we donot need to take the siglock if we are current and
1521 * single threaded,  as no one else can take our signal_struct away, no one
1522 * else can  reap the  children to update signal->c* counters, and no one else
1523 * can race with the signal-> fields. If we do not take any lock, the
1524 * signal-> fields could be read out of order while another thread was just
1525 * exiting. So we should  place a read memory barrier when we avoid the lock.
1526 * On the writer side,  write memory barrier is implied in  __exit_signal
1527 * as __exit_signal releases  the siglock spinlock after updating the signal->
1528 * fields. But we don't do this yet to keep things simple.
1529 *
1530 */
1531
1532static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1533{
1534	r->ru_nvcsw += t->nvcsw;
1535	r->ru_nivcsw += t->nivcsw;
1536	r->ru_minflt += t->min_flt;
1537	r->ru_majflt += t->maj_flt;
1538	r->ru_inblock += task_io_get_inblock(t);
1539	r->ru_oublock += task_io_get_oublock(t);
1540}
1541
1542static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1543{
1544	struct task_struct *t;
1545	unsigned long flags;
1546	cputime_t tgutime, tgstime, utime, stime;
1547	unsigned long maxrss = 0;
1548
1549	memset((char *)r, 0, sizeof (*r));
1550	utime = stime = 0;
1551
1552	if (who == RUSAGE_THREAD) {
1553		task_cputime_adjusted(current, &utime, &stime);
1554		accumulate_thread_rusage(p, r);
1555		maxrss = p->signal->maxrss;
1556		goto out;
1557	}
1558
1559	if (!lock_task_sighand(p, &flags))
1560		return;
1561
1562	switch (who) {
1563	case RUSAGE_BOTH:
1564	case RUSAGE_CHILDREN:
1565		utime = p->signal->cutime;
1566		stime = p->signal->cstime;
1567		r->ru_nvcsw = p->signal->cnvcsw;
1568		r->ru_nivcsw = p->signal->cnivcsw;
1569		r->ru_minflt = p->signal->cmin_flt;
1570		r->ru_majflt = p->signal->cmaj_flt;
1571		r->ru_inblock = p->signal->cinblock;
1572		r->ru_oublock = p->signal->coublock;
1573		maxrss = p->signal->cmaxrss;
1574
1575		if (who == RUSAGE_CHILDREN)
1576			break;
1577
1578	case RUSAGE_SELF:
1579		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1580		utime += tgutime;
1581		stime += tgstime;
1582		r->ru_nvcsw += p->signal->nvcsw;
1583		r->ru_nivcsw += p->signal->nivcsw;
1584		r->ru_minflt += p->signal->min_flt;
1585		r->ru_majflt += p->signal->maj_flt;
1586		r->ru_inblock += p->signal->inblock;
1587		r->ru_oublock += p->signal->oublock;
1588		if (maxrss < p->signal->maxrss)
1589			maxrss = p->signal->maxrss;
1590		t = p;
1591		do {
1592			accumulate_thread_rusage(t, r);
1593		} while_each_thread(p, t);
1594		break;
1595
1596	default:
1597		BUG();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1598	}
1599	unlock_task_sighand(p, &flags);
1600
1601out:
1602	cputime_to_timeval(utime, &r->ru_utime);
1603	cputime_to_timeval(stime, &r->ru_stime);
1604
1605	if (who != RUSAGE_CHILDREN) {
1606		struct mm_struct *mm = get_task_mm(p);
1607
1608		if (mm) {
1609			setmax_mm_hiwater_rss(&maxrss, mm);
1610			mmput(mm);
1611		}
1612	}
1613	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1614}
1615
1616int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1617{
1618	struct rusage r;
1619
1620	k_getrusage(p, who, &r);
1621	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1622}
1623
1624SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1625{
1626	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1627	    who != RUSAGE_THREAD)
1628		return -EINVAL;
1629	return getrusage(current, who, ru);
1630}
1631
1632#ifdef CONFIG_COMPAT
1633COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1634{
1635	struct rusage r;
1636
1637	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1638	    who != RUSAGE_THREAD)
1639		return -EINVAL;
1640
1641	k_getrusage(current, who, &r);
1642	return put_compat_rusage(&r, ru);
1643}
1644#endif
1645
1646SYSCALL_DEFINE1(umask, int, mask)
1647{
1648	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1649	return mask;
1650}
1651
 
1652static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1653{
1654	struct fd exe;
1655	struct file *old_exe, *exe_file;
1656	struct inode *inode;
1657	int err;
1658
1659	exe = fdget(fd);
1660	if (!exe.file)
1661		return -EBADF;
1662
1663	inode = file_inode(exe.file);
1664
1665	/*
1666	 * Because the original mm->exe_file points to executable file, make
1667	 * sure that this one is executable as well, to avoid breaking an
1668	 * overall picture.
1669	 */
1670	err = -EACCES;
1671	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
 
1672		goto exit;
1673
1674	err = inode_permission(inode, MAY_EXEC);
1675	if (err)
1676		goto exit;
1677
 
 
1678	/*
1679	 * Forbid mm->exe_file change if old file still mapped.
1680	 */
1681	exe_file = get_mm_exe_file(mm);
1682	err = -EBUSY;
1683	if (exe_file) {
1684		struct vm_area_struct *vma;
1685
1686		down_read(&mm->mmap_sem);
1687		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1688			if (!vma->vm_file)
1689				continue;
1690			if (path_equal(&vma->vm_file->f_path,
1691				       &exe_file->f_path))
1692				goto exit_err;
1693		}
1694
1695		up_read(&mm->mmap_sem);
1696		fput(exe_file);
1697	}
1698
1699	/*
1700	 * The symlink can be changed only once, just to disallow arbitrary
1701	 * transitions malicious software might bring in. This means one
1702	 * could make a snapshot over all processes running and monitor
1703	 * /proc/pid/exe changes to notice unusual activity if needed.
1704	 */
1705	err = -EPERM;
1706	if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1707		goto exit;
1708
1709	err = 0;
1710	/* set the new file, lockless */
1711	get_file(exe.file);
1712	old_exe = xchg(&mm->exe_file, exe.file);
1713	if (old_exe)
1714		fput(old_exe);
1715exit:
1716	fdput(exe);
1717	return err;
1718exit_err:
1719	up_read(&mm->mmap_sem);
1720	fput(exe_file);
1721	goto exit;
1722}
1723
1724/*
1725 * WARNING: we don't require any capability here so be very careful
1726 * in what is allowed for modification from userspace.
1727 */
1728static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1729{
1730	unsigned long mmap_max_addr = TASK_SIZE;
1731	struct mm_struct *mm = current->mm;
1732	int error = -EINVAL, i;
1733
1734	static const unsigned char offsets[] = {
1735		offsetof(struct prctl_mm_map, start_code),
1736		offsetof(struct prctl_mm_map, end_code),
1737		offsetof(struct prctl_mm_map, start_data),
1738		offsetof(struct prctl_mm_map, end_data),
1739		offsetof(struct prctl_mm_map, start_brk),
1740		offsetof(struct prctl_mm_map, brk),
1741		offsetof(struct prctl_mm_map, start_stack),
1742		offsetof(struct prctl_mm_map, arg_start),
1743		offsetof(struct prctl_mm_map, arg_end),
1744		offsetof(struct prctl_mm_map, env_start),
1745		offsetof(struct prctl_mm_map, env_end),
1746	};
1747
1748	/*
1749	 * Make sure the members are not somewhere outside
1750	 * of allowed address space.
1751	 */
1752	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1753		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1754
1755		if ((unsigned long)val >= mmap_max_addr ||
1756		    (unsigned long)val < mmap_min_addr)
1757			goto out;
1758	}
1759
1760	/*
1761	 * Make sure the pairs are ordered.
1762	 */
1763#define __prctl_check_order(__m1, __op, __m2)				\
1764	((unsigned long)prctl_map->__m1 __op				\
1765	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1766	error  = __prctl_check_order(start_code, <, end_code);
1767	error |= __prctl_check_order(start_data, <, end_data);
1768	error |= __prctl_check_order(start_brk, <=, brk);
1769	error |= __prctl_check_order(arg_start, <=, arg_end);
1770	error |= __prctl_check_order(env_start, <=, env_end);
1771	if (error)
1772		goto out;
1773#undef __prctl_check_order
1774
1775	error = -EINVAL;
1776
1777	/*
1778	 * @brk should be after @end_data in traditional maps.
1779	 */
1780	if (prctl_map->start_brk <= prctl_map->end_data ||
1781	    prctl_map->brk <= prctl_map->end_data)
1782		goto out;
1783
1784	/*
1785	 * Neither we should allow to override limits if they set.
1786	 */
1787	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1788			      prctl_map->start_brk, prctl_map->end_data,
1789			      prctl_map->start_data))
1790			goto out;
1791
1792	/*
1793	 * Someone is trying to cheat the auxv vector.
1794	 */
1795	if (prctl_map->auxv_size) {
1796		if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1797			goto out;
1798	}
1799
1800	/*
1801	 * Finally, make sure the caller has the rights to
1802	 * change /proc/pid/exe link: only local root should
1803	 * be allowed to.
1804	 */
1805	if (prctl_map->exe_fd != (u32)-1) {
1806		struct user_namespace *ns = current_user_ns();
1807		const struct cred *cred = current_cred();
1808
1809		if (!uid_eq(cred->uid, make_kuid(ns, 0)) ||
1810		    !gid_eq(cred->gid, make_kgid(ns, 0)))
1811			goto out;
1812	}
1813
1814	error = 0;
1815out:
1816	return error;
1817}
1818
1819#ifdef CONFIG_CHECKPOINT_RESTORE
1820static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1821{
1822	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1823	unsigned long user_auxv[AT_VECTOR_SIZE];
1824	struct mm_struct *mm = current->mm;
1825	int error;
1826
1827	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1828	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1829
1830	if (opt == PR_SET_MM_MAP_SIZE)
1831		return put_user((unsigned int)sizeof(prctl_map),
1832				(unsigned int __user *)addr);
1833
1834	if (data_size != sizeof(prctl_map))
1835		return -EINVAL;
1836
1837	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1838		return -EFAULT;
1839
1840	error = validate_prctl_map(&prctl_map);
1841	if (error)
1842		return error;
1843
1844	if (prctl_map.auxv_size) {
1845		memset(user_auxv, 0, sizeof(user_auxv));
1846		if (copy_from_user(user_auxv,
1847				   (const void __user *)prctl_map.auxv,
1848				   prctl_map.auxv_size))
1849			return -EFAULT;
1850
1851		/* Last entry must be AT_NULL as specification requires */
1852		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1853		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1854	}
1855
1856	if (prctl_map.exe_fd != (u32)-1) {
1857		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
1858		if (error)
1859			return error;
1860	}
1861
1862	down_write(&mm->mmap_sem);
1863
1864	/*
1865	 * We don't validate if these members are pointing to
1866	 * real present VMAs because application may have correspond
1867	 * VMAs already unmapped and kernel uses these members for statistics
1868	 * output in procfs mostly, except
1869	 *
1870	 *  - @start_brk/@brk which are used in do_brk but kernel lookups
1871	 *    for VMAs when updating these memvers so anything wrong written
1872	 *    here cause kernel to swear at userspace program but won't lead
1873	 *    to any problem in kernel itself
1874	 */
1875
1876	mm->start_code	= prctl_map.start_code;
1877	mm->end_code	= prctl_map.end_code;
1878	mm->start_data	= prctl_map.start_data;
1879	mm->end_data	= prctl_map.end_data;
1880	mm->start_brk	= prctl_map.start_brk;
1881	mm->brk		= prctl_map.brk;
1882	mm->start_stack	= prctl_map.start_stack;
1883	mm->arg_start	= prctl_map.arg_start;
1884	mm->arg_end	= prctl_map.arg_end;
1885	mm->env_start	= prctl_map.env_start;
1886	mm->env_end	= prctl_map.env_end;
1887
1888	/*
1889	 * Note this update of @saved_auxv is lockless thus
1890	 * if someone reads this member in procfs while we're
1891	 * updating -- it may get partly updated results. It's
1892	 * known and acceptable trade off: we leave it as is to
1893	 * not introduce additional locks here making the kernel
1894	 * more complex.
1895	 */
1896	if (prctl_map.auxv_size)
1897		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
1898
1899	up_write(&mm->mmap_sem);
1900	return 0;
1901}
1902#endif /* CONFIG_CHECKPOINT_RESTORE */
1903
1904static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
1905			  unsigned long len)
1906{
1907	/*
1908	 * This doesn't move the auxiliary vector itself since it's pinned to
1909	 * mm_struct, but it permits filling the vector with new values.  It's
1910	 * up to the caller to provide sane values here, otherwise userspace
1911	 * tools which use this vector might be unhappy.
1912	 */
1913	unsigned long user_auxv[AT_VECTOR_SIZE];
1914
1915	if (len > sizeof(user_auxv))
1916		return -EINVAL;
1917
1918	if (copy_from_user(user_auxv, (const void __user *)addr, len))
1919		return -EFAULT;
1920
1921	/* Make sure the last entry is always AT_NULL */
1922	user_auxv[AT_VECTOR_SIZE - 2] = 0;
1923	user_auxv[AT_VECTOR_SIZE - 1] = 0;
1924
1925	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1926
1927	task_lock(current);
1928	memcpy(mm->saved_auxv, user_auxv, len);
1929	task_unlock(current);
1930
1931	return 0;
1932}
1933
1934static int prctl_set_mm(int opt, unsigned long addr,
1935			unsigned long arg4, unsigned long arg5)
1936{
 
1937	struct mm_struct *mm = current->mm;
1938	struct prctl_mm_map prctl_map;
1939	struct vm_area_struct *vma;
1940	int error;
1941
1942	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
1943			      opt != PR_SET_MM_MAP &&
1944			      opt != PR_SET_MM_MAP_SIZE)))
1945		return -EINVAL;
1946
1947#ifdef CONFIG_CHECKPOINT_RESTORE
1948	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
1949		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
1950#endif
1951
1952	if (!capable(CAP_SYS_RESOURCE))
1953		return -EPERM;
1954
1955	if (opt == PR_SET_MM_EXE_FILE)
1956		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1957
1958	if (opt == PR_SET_MM_AUXV)
1959		return prctl_set_auxv(mm, addr, arg4);
1960
1961	if (addr >= TASK_SIZE || addr < mmap_min_addr)
1962		return -EINVAL;
1963
1964	error = -EINVAL;
1965
1966	down_write(&mm->mmap_sem);
1967	vma = find_vma(mm, addr);
1968
1969	prctl_map.start_code	= mm->start_code;
1970	prctl_map.end_code	= mm->end_code;
1971	prctl_map.start_data	= mm->start_data;
1972	prctl_map.end_data	= mm->end_data;
1973	prctl_map.start_brk	= mm->start_brk;
1974	prctl_map.brk		= mm->brk;
1975	prctl_map.start_stack	= mm->start_stack;
1976	prctl_map.arg_start	= mm->arg_start;
1977	prctl_map.arg_end	= mm->arg_end;
1978	prctl_map.env_start	= mm->env_start;
1979	prctl_map.env_end	= mm->env_end;
1980	prctl_map.auxv		= NULL;
1981	prctl_map.auxv_size	= 0;
1982	prctl_map.exe_fd	= -1;
1983
1984	switch (opt) {
1985	case PR_SET_MM_START_CODE:
1986		prctl_map.start_code = addr;
1987		break;
1988	case PR_SET_MM_END_CODE:
1989		prctl_map.end_code = addr;
1990		break;
1991	case PR_SET_MM_START_DATA:
1992		prctl_map.start_data = addr;
1993		break;
1994	case PR_SET_MM_END_DATA:
1995		prctl_map.end_data = addr;
1996		break;
1997	case PR_SET_MM_START_STACK:
1998		prctl_map.start_stack = addr;
1999		break;
 
2000	case PR_SET_MM_START_BRK:
2001		prctl_map.start_brk = addr;
 
 
 
 
 
 
 
 
2002		break;
 
2003	case PR_SET_MM_BRK:
2004		prctl_map.brk = addr;
2005		break;
2006	case PR_SET_MM_ARG_START:
2007		prctl_map.arg_start = addr;
2008		break;
2009	case PR_SET_MM_ARG_END:
2010		prctl_map.arg_end = addr;
2011		break;
2012	case PR_SET_MM_ENV_START:
2013		prctl_map.env_start = addr;
2014		break;
2015	case PR_SET_MM_ENV_END:
2016		prctl_map.env_end = addr;
2017		break;
2018	default:
2019		goto out;
2020	}
2021
2022	error = validate_prctl_map(&prctl_map);
2023	if (error)
2024		goto out;
 
 
 
 
2025
2026	switch (opt) {
2027	/*
2028	 * If command line arguments and environment
2029	 * are placed somewhere else on stack, we can
2030	 * set them up here, ARG_START/END to setup
2031	 * command line argumets and ENV_START/END
2032	 * for environment.
2033	 */
2034	case PR_SET_MM_START_STACK:
2035	case PR_SET_MM_ARG_START:
2036	case PR_SET_MM_ARG_END:
2037	case PR_SET_MM_ENV_START:
2038	case PR_SET_MM_ENV_END:
2039		if (!vma) {
2040			error = -EFAULT;
2041			goto out;
2042		}
2043	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2044
2045	mm->start_code	= prctl_map.start_code;
2046	mm->end_code	= prctl_map.end_code;
2047	mm->start_data	= prctl_map.start_data;
2048	mm->end_data	= prctl_map.end_data;
2049	mm->start_brk	= prctl_map.start_brk;
2050	mm->brk		= prctl_map.brk;
2051	mm->start_stack	= prctl_map.start_stack;
2052	mm->arg_start	= prctl_map.arg_start;
2053	mm->arg_end	= prctl_map.arg_end;
2054	mm->env_start	= prctl_map.env_start;
2055	mm->env_end	= prctl_map.env_end;
 
 
 
 
2056
2057	error = 0;
2058out:
2059	up_write(&mm->mmap_sem);
2060	return error;
2061}
2062
2063#ifdef CONFIG_CHECKPOINT_RESTORE
2064static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2065{
2066	return put_user(me->clear_child_tid, tid_addr);
2067}
2068#else
 
 
 
 
 
 
2069static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2070{
2071	return -EINVAL;
2072}
2073#endif
2074
2075SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2076		unsigned long, arg4, unsigned long, arg5)
2077{
2078	struct task_struct *me = current;
2079	unsigned char comm[sizeof(me->comm)];
2080	long error;
2081
2082	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2083	if (error != -ENOSYS)
2084		return error;
2085
2086	error = 0;
2087	switch (option) {
2088	case PR_SET_PDEATHSIG:
2089		if (!valid_signal(arg2)) {
2090			error = -EINVAL;
 
 
 
 
2091			break;
2092		}
2093		me->pdeath_signal = arg2;
2094		break;
2095	case PR_GET_PDEATHSIG:
2096		error = put_user(me->pdeath_signal, (int __user *)arg2);
2097		break;
2098	case PR_GET_DUMPABLE:
2099		error = get_dumpable(me->mm);
2100		break;
2101	case PR_SET_DUMPABLE:
2102		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2103			error = -EINVAL;
 
2104			break;
2105		}
2106		set_dumpable(me->mm, arg2);
2107		break;
2108
2109	case PR_SET_UNALIGN:
2110		error = SET_UNALIGN_CTL(me, arg2);
2111		break;
2112	case PR_GET_UNALIGN:
2113		error = GET_UNALIGN_CTL(me, arg2);
2114		break;
2115	case PR_SET_FPEMU:
2116		error = SET_FPEMU_CTL(me, arg2);
2117		break;
2118	case PR_GET_FPEMU:
2119		error = GET_FPEMU_CTL(me, arg2);
2120		break;
2121	case PR_SET_FPEXC:
2122		error = SET_FPEXC_CTL(me, arg2);
2123		break;
2124	case PR_GET_FPEXC:
2125		error = GET_FPEXC_CTL(me, arg2);
2126		break;
2127	case PR_GET_TIMING:
2128		error = PR_TIMING_STATISTICAL;
2129		break;
2130	case PR_SET_TIMING:
2131		if (arg2 != PR_TIMING_STATISTICAL)
2132			error = -EINVAL;
2133		break;
2134	case PR_SET_NAME:
2135		comm[sizeof(me->comm) - 1] = 0;
2136		if (strncpy_from_user(comm, (char __user *)arg2,
2137				      sizeof(me->comm) - 1) < 0)
2138			return -EFAULT;
2139		set_task_comm(me, comm);
2140		proc_comm_connector(me);
2141		break;
2142	case PR_GET_NAME:
2143		get_task_comm(comm, me);
2144		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2145			return -EFAULT;
2146		break;
2147	case PR_GET_ENDIAN:
2148		error = GET_ENDIAN(me, arg2);
2149		break;
2150	case PR_SET_ENDIAN:
2151		error = SET_ENDIAN(me, arg2);
2152		break;
2153	case PR_GET_SECCOMP:
2154		error = prctl_get_seccomp();
2155		break;
2156	case PR_SET_SECCOMP:
2157		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2158		break;
2159	case PR_GET_TSC:
2160		error = GET_TSC_CTL(arg2);
2161		break;
2162	case PR_SET_TSC:
2163		error = SET_TSC_CTL(arg2);
2164		break;
2165	case PR_TASK_PERF_EVENTS_DISABLE:
2166		error = perf_event_task_disable();
2167		break;
2168	case PR_TASK_PERF_EVENTS_ENABLE:
2169		error = perf_event_task_enable();
2170		break;
2171	case PR_GET_TIMERSLACK:
2172		if (current->timer_slack_ns > ULONG_MAX)
2173			error = ULONG_MAX;
2174		else
 
 
2175			error = current->timer_slack_ns;
2176		break;
2177	case PR_SET_TIMERSLACK:
2178		if (arg2 <= 0)
2179			current->timer_slack_ns =
2180					current->default_timer_slack_ns;
2181		else
2182			current->timer_slack_ns = arg2;
2183		break;
2184	case PR_MCE_KILL:
2185		if (arg4 | arg5)
2186			return -EINVAL;
2187		switch (arg2) {
2188		case PR_MCE_KILL_CLEAR:
2189			if (arg3 != 0)
2190				return -EINVAL;
2191			current->flags &= ~PF_MCE_PROCESS;
2192			break;
2193		case PR_MCE_KILL_SET:
2194			current->flags |= PF_MCE_PROCESS;
2195			if (arg3 == PR_MCE_KILL_EARLY)
2196				current->flags |= PF_MCE_EARLY;
2197			else if (arg3 == PR_MCE_KILL_LATE)
2198				current->flags &= ~PF_MCE_EARLY;
2199			else if (arg3 == PR_MCE_KILL_DEFAULT)
2200				current->flags &=
 
 
 
 
 
 
 
 
 
2201						~(PF_MCE_EARLY|PF_MCE_PROCESS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2202			else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2203				return -EINVAL;
 
 
2204			break;
 
 
 
 
2205		default:
2206			return -EINVAL;
2207		}
2208		break;
2209	case PR_MCE_KILL_GET:
2210		if (arg2 | arg3 | arg4 | arg5)
2211			return -EINVAL;
2212		if (current->flags & PF_MCE_PROCESS)
2213			error = (current->flags & PF_MCE_EARLY) ?
2214				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2215		else
2216			error = PR_MCE_KILL_DEFAULT;
2217		break;
2218	case PR_SET_MM:
2219		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2220		break;
2221	case PR_GET_TID_ADDRESS:
2222		error = prctl_get_tid_address(me, (int __user **)arg2);
2223		break;
2224	case PR_SET_CHILD_SUBREAPER:
2225		me->signal->is_child_subreaper = !!arg2;
2226		break;
2227	case PR_GET_CHILD_SUBREAPER:
2228		error = put_user(me->signal->is_child_subreaper,
2229				 (int __user *)arg2);
2230		break;
2231	case PR_SET_NO_NEW_PRIVS:
2232		if (arg2 != 1 || arg3 || arg4 || arg5)
2233			return -EINVAL;
2234
2235		task_set_no_new_privs(current);
2236		break;
2237	case PR_GET_NO_NEW_PRIVS:
2238		if (arg2 || arg3 || arg4 || arg5)
2239			return -EINVAL;
2240		return task_no_new_privs(current) ? 1 : 0;
2241	case PR_GET_THP_DISABLE:
2242		if (arg2 || arg3 || arg4 || arg5)
2243			return -EINVAL;
2244		error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2245		break;
2246	case PR_SET_THP_DISABLE:
2247		if (arg3 || arg4 || arg5)
2248			return -EINVAL;
2249		down_write(&me->mm->mmap_sem);
2250		if (arg2)
2251			me->mm->def_flags |= VM_NOHUGEPAGE;
2252		else
2253			me->mm->def_flags &= ~VM_NOHUGEPAGE;
2254		up_write(&me->mm->mmap_sem);
2255		break;
2256	case PR_MPX_ENABLE_MANAGEMENT:
2257		if (arg2 || arg3 || arg4 || arg5)
2258			return -EINVAL;
2259		error = MPX_ENABLE_MANAGEMENT();
2260		break;
2261	case PR_MPX_DISABLE_MANAGEMENT:
2262		if (arg2 || arg3 || arg4 || arg5)
2263			return -EINVAL;
2264		error = MPX_DISABLE_MANAGEMENT();
2265		break;
2266	case PR_SET_FP_MODE:
2267		error = SET_FP_MODE(me, arg2);
2268		break;
2269	case PR_GET_FP_MODE:
2270		error = GET_FP_MODE(me);
2271		break;
2272	default:
2273		error = -EINVAL;
2274		break;
2275	}
2276	return error;
2277}
2278
2279SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2280		struct getcpu_cache __user *, unused)
2281{
2282	int err = 0;
2283	int cpu = raw_smp_processor_id();
2284
2285	if (cpup)
2286		err |= put_user(cpu, cpup);
2287	if (nodep)
2288		err |= put_user(cpu_to_node(cpu), nodep);
2289	return err ? -EFAULT : 0;
2290}
2291
 
 
 
 
 
 
 
2292/**
2293 * do_sysinfo - fill in sysinfo struct
2294 * @info: pointer to buffer to fill
 
 
 
2295 */
2296static int do_sysinfo(struct sysinfo *info)
2297{
2298	unsigned long mem_total, sav_total;
2299	unsigned int mem_unit, bitcount;
2300	struct timespec tp;
2301
2302	memset(info, 0, sizeof(struct sysinfo));
2303
2304	get_monotonic_boottime(&tp);
2305	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2306
2307	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2308
2309	info->procs = nr_threads;
2310
2311	si_meminfo(info);
2312	si_swapinfo(info);
2313
2314	/*
2315	 * If the sum of all the available memory (i.e. ram + swap)
2316	 * is less than can be stored in a 32 bit unsigned long then
2317	 * we can be binary compatible with 2.2.x kernels.  If not,
2318	 * well, in that case 2.2.x was broken anyways...
2319	 *
2320	 *  -Erik Andersen <andersee@debian.org>
2321	 */
2322
2323	mem_total = info->totalram + info->totalswap;
2324	if (mem_total < info->totalram || mem_total < info->totalswap)
 
2325		goto out;
2326	bitcount = 0;
2327	mem_unit = info->mem_unit;
2328	while (mem_unit > 1) {
2329		bitcount++;
2330		mem_unit >>= 1;
2331		sav_total = mem_total;
2332		mem_total <<= 1;
2333		if (mem_total < sav_total)
2334			goto out;
2335	}
2336
2337	/*
2338	 * If mem_total did not overflow, multiply all memory values by
2339	 * info->mem_unit and set it to 1.  This leaves things compatible
2340	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2341	 * kernels...
2342	 */
2343
2344	info->mem_unit = 1;
2345	info->totalram <<= bitcount;
2346	info->freeram <<= bitcount;
2347	info->sharedram <<= bitcount;
2348	info->bufferram <<= bitcount;
2349	info->totalswap <<= bitcount;
2350	info->freeswap <<= bitcount;
2351	info->totalhigh <<= bitcount;
2352	info->freehigh <<= bitcount;
2353
2354out:
2355	return 0;
2356}
2357
2358SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2359{
2360	struct sysinfo val;
2361
2362	do_sysinfo(&val);
2363
2364	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2365		return -EFAULT;
2366
2367	return 0;
2368}
2369
2370#ifdef CONFIG_COMPAT
2371struct compat_sysinfo {
2372	s32 uptime;
2373	u32 loads[3];
2374	u32 totalram;
2375	u32 freeram;
2376	u32 sharedram;
2377	u32 bufferram;
2378	u32 totalswap;
2379	u32 freeswap;
2380	u16 procs;
2381	u16 pad;
2382	u32 totalhigh;
2383	u32 freehigh;
2384	u32 mem_unit;
2385	char _f[20-2*sizeof(u32)-sizeof(int)];
2386};
2387
2388COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2389{
2390	struct sysinfo s;
2391
2392	do_sysinfo(&s);
2393
2394	/* Check to see if any memory value is too large for 32-bit and scale
2395	 *  down if needed
2396	 */
2397	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2398		int bitcount = 0;
2399
2400		while (s.mem_unit < PAGE_SIZE) {
2401			s.mem_unit <<= 1;
2402			bitcount++;
2403		}
2404
2405		s.totalram >>= bitcount;
2406		s.freeram >>= bitcount;
2407		s.sharedram >>= bitcount;
2408		s.bufferram >>= bitcount;
2409		s.totalswap >>= bitcount;
2410		s.freeswap >>= bitcount;
2411		s.totalhigh >>= bitcount;
2412		s.freehigh >>= bitcount;
2413	}
2414
2415	if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2416	    __put_user(s.uptime, &info->uptime) ||
2417	    __put_user(s.loads[0], &info->loads[0]) ||
2418	    __put_user(s.loads[1], &info->loads[1]) ||
2419	    __put_user(s.loads[2], &info->loads[2]) ||
2420	    __put_user(s.totalram, &info->totalram) ||
2421	    __put_user(s.freeram, &info->freeram) ||
2422	    __put_user(s.sharedram, &info->sharedram) ||
2423	    __put_user(s.bufferram, &info->bufferram) ||
2424	    __put_user(s.totalswap, &info->totalswap) ||
2425	    __put_user(s.freeswap, &info->freeswap) ||
2426	    __put_user(s.procs, &info->procs) ||
2427	    __put_user(s.totalhigh, &info->totalhigh) ||
2428	    __put_user(s.freehigh, &info->freehigh) ||
2429	    __put_user(s.mem_unit, &info->mem_unit))
2430		return -EFAULT;
2431
2432	return 0;
2433}
2434#endif /* CONFIG_COMPAT */
v3.5.6
   1/*
   2 *  linux/kernel/sys.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7#include <linux/export.h>
   8#include <linux/mm.h>
   9#include <linux/utsname.h>
  10#include <linux/mman.h>
  11#include <linux/reboot.h>
  12#include <linux/prctl.h>
  13#include <linux/highuid.h>
  14#include <linux/fs.h>
  15#include <linux/kmod.h>
  16#include <linux/perf_event.h>
  17#include <linux/resource.h>
  18#include <linux/kernel.h>
  19#include <linux/kexec.h>
  20#include <linux/workqueue.h>
  21#include <linux/capability.h>
  22#include <linux/device.h>
  23#include <linux/key.h>
  24#include <linux/times.h>
  25#include <linux/posix-timers.h>
  26#include <linux/security.h>
  27#include <linux/dcookies.h>
  28#include <linux/suspend.h>
  29#include <linux/tty.h>
  30#include <linux/signal.h>
  31#include <linux/cn_proc.h>
  32#include <linux/getcpu.h>
  33#include <linux/task_io_accounting_ops.h>
  34#include <linux/seccomp.h>
  35#include <linux/cpu.h>
  36#include <linux/personality.h>
  37#include <linux/ptrace.h>
  38#include <linux/fs_struct.h>
  39#include <linux/file.h>
  40#include <linux/mount.h>
  41#include <linux/gfp.h>
  42#include <linux/syscore_ops.h>
  43#include <linux/version.h>
  44#include <linux/ctype.h>
  45
  46#include <linux/compat.h>
  47#include <linux/syscalls.h>
  48#include <linux/kprobes.h>
  49#include <linux/user_namespace.h>
 
 
 
 
 
 
  50
  51#include <linux/kmsg_dump.h>
  52/* Move somewhere else to avoid recompiling? */
  53#include <generated/utsrelease.h>
  54
  55#include <asm/uaccess.h>
  56#include <asm/io.h>
  57#include <asm/unistd.h>
  58
  59#ifndef SET_UNALIGN_CTL
  60# define SET_UNALIGN_CTL(a,b)	(-EINVAL)
  61#endif
  62#ifndef GET_UNALIGN_CTL
  63# define GET_UNALIGN_CTL(a,b)	(-EINVAL)
  64#endif
  65#ifndef SET_FPEMU_CTL
  66# define SET_FPEMU_CTL(a,b)	(-EINVAL)
  67#endif
  68#ifndef GET_FPEMU_CTL
  69# define GET_FPEMU_CTL(a,b)	(-EINVAL)
  70#endif
  71#ifndef SET_FPEXC_CTL
  72# define SET_FPEXC_CTL(a,b)	(-EINVAL)
  73#endif
  74#ifndef GET_FPEXC_CTL
  75# define GET_FPEXC_CTL(a,b)	(-EINVAL)
  76#endif
  77#ifndef GET_ENDIAN
  78# define GET_ENDIAN(a,b)	(-EINVAL)
  79#endif
  80#ifndef SET_ENDIAN
  81# define SET_ENDIAN(a,b)	(-EINVAL)
  82#endif
  83#ifndef GET_TSC_CTL
  84# define GET_TSC_CTL(a)		(-EINVAL)
  85#endif
  86#ifndef SET_TSC_CTL
  87# define SET_TSC_CTL(a)		(-EINVAL)
  88#endif
 
 
 
 
 
 
 
 
 
 
 
 
  89
  90/*
  91 * this is where the system-wide overflow UID and GID are defined, for
  92 * architectures that now have 32-bit UID/GID but didn't in the past
  93 */
  94
  95int overflowuid = DEFAULT_OVERFLOWUID;
  96int overflowgid = DEFAULT_OVERFLOWGID;
  97
  98EXPORT_SYMBOL(overflowuid);
  99EXPORT_SYMBOL(overflowgid);
 100
 101/*
 102 * the same as above, but for filesystems which can only store a 16-bit
 103 * UID and GID. as such, this is needed on all architectures
 104 */
 105
 106int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
 107int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
 108
 109EXPORT_SYMBOL(fs_overflowuid);
 110EXPORT_SYMBOL(fs_overflowgid);
 111
 112/*
 113 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
 114 */
 115
 116int C_A_D = 1;
 117struct pid *cad_pid;
 118EXPORT_SYMBOL(cad_pid);
 119
 120/*
 121 * If set, this is used for preparing the system to power off.
 122 */
 123
 124void (*pm_power_off_prepare)(void);
 125
 126/*
 127 * Returns true if current's euid is same as p's uid or euid,
 128 * or has CAP_SYS_NICE to p's user_ns.
 129 *
 130 * Called with rcu_read_lock, creds are safe
 131 */
 132static bool set_one_prio_perm(struct task_struct *p)
 133{
 134	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
 135
 136	if (uid_eq(pcred->uid,  cred->euid) ||
 137	    uid_eq(pcred->euid, cred->euid))
 138		return true;
 139	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
 140		return true;
 141	return false;
 142}
 143
 144/*
 145 * set the priority of a task
 146 * - the caller must hold the RCU read lock
 147 */
 148static int set_one_prio(struct task_struct *p, int niceval, int error)
 149{
 150	int no_nice;
 151
 152	if (!set_one_prio_perm(p)) {
 153		error = -EPERM;
 154		goto out;
 155	}
 156	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
 157		error = -EACCES;
 158		goto out;
 159	}
 160	no_nice = security_task_setnice(p, niceval);
 161	if (no_nice) {
 162		error = no_nice;
 163		goto out;
 164	}
 165	if (error == -ESRCH)
 166		error = 0;
 167	set_user_nice(p, niceval);
 168out:
 169	return error;
 170}
 171
 172SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 173{
 174	struct task_struct *g, *p;
 175	struct user_struct *user;
 176	const struct cred *cred = current_cred();
 177	int error = -EINVAL;
 178	struct pid *pgrp;
 179	kuid_t uid;
 180
 181	if (which > PRIO_USER || which < PRIO_PROCESS)
 182		goto out;
 183
 184	/* normalize: avoid signed division (rounding problems) */
 185	error = -ESRCH;
 186	if (niceval < -20)
 187		niceval = -20;
 188	if (niceval > 19)
 189		niceval = 19;
 190
 191	rcu_read_lock();
 192	read_lock(&tasklist_lock);
 193	switch (which) {
 194		case PRIO_PROCESS:
 195			if (who)
 196				p = find_task_by_vpid(who);
 197			else
 198				p = current;
 199			if (p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200				error = set_one_prio(p, niceval, error);
 201			break;
 202		case PRIO_PGRP:
 203			if (who)
 204				pgrp = find_vpid(who);
 205			else
 206				pgrp = task_pgrp(current);
 207			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 208				error = set_one_prio(p, niceval, error);
 209			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 210			break;
 211		case PRIO_USER:
 212			uid = make_kuid(cred->user_ns, who);
 213			user = cred->user;
 214			if (!who)
 215				uid = cred->uid;
 216			else if (!uid_eq(uid, cred->uid) &&
 217				 !(user = find_user(uid)))
 218				goto out_unlock;	/* No processes for this user */
 219
 220			do_each_thread(g, p) {
 221				if (uid_eq(task_uid(p), uid))
 222					error = set_one_prio(p, niceval, error);
 223			} while_each_thread(g, p);
 224			if (!uid_eq(uid, cred->uid))
 225				free_uid(user);		/* For find_user() */
 226			break;
 227	}
 228out_unlock:
 229	read_unlock(&tasklist_lock);
 230	rcu_read_unlock();
 231out:
 232	return error;
 233}
 234
 235/*
 236 * Ugh. To avoid negative return values, "getpriority()" will
 237 * not return the normal nice-value, but a negated value that
 238 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
 239 * to stay compatible.
 240 */
 241SYSCALL_DEFINE2(getpriority, int, which, int, who)
 242{
 243	struct task_struct *g, *p;
 244	struct user_struct *user;
 245	const struct cred *cred = current_cred();
 246	long niceval, retval = -ESRCH;
 247	struct pid *pgrp;
 248	kuid_t uid;
 249
 250	if (which > PRIO_USER || which < PRIO_PROCESS)
 251		return -EINVAL;
 252
 253	rcu_read_lock();
 254	read_lock(&tasklist_lock);
 255	switch (which) {
 256		case PRIO_PROCESS:
 257			if (who)
 258				p = find_task_by_vpid(who);
 259			else
 260				p = current;
 261			if (p) {
 262				niceval = 20 - task_nice(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263				if (niceval > retval)
 264					retval = niceval;
 265			}
 266			break;
 267		case PRIO_PGRP:
 268			if (who)
 269				pgrp = find_vpid(who);
 270			else
 271				pgrp = task_pgrp(current);
 272			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 273				niceval = 20 - task_nice(p);
 274				if (niceval > retval)
 275					retval = niceval;
 276			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 277			break;
 278		case PRIO_USER:
 279			uid = make_kuid(cred->user_ns, who);
 280			user = cred->user;
 281			if (!who)
 282				uid = cred->uid;
 283			else if (!uid_eq(uid, cred->uid) &&
 284				 !(user = find_user(uid)))
 285				goto out_unlock;	/* No processes for this user */
 286
 287			do_each_thread(g, p) {
 288				if (uid_eq(task_uid(p), uid)) {
 289					niceval = 20 - task_nice(p);
 290					if (niceval > retval)
 291						retval = niceval;
 292				}
 293			} while_each_thread(g, p);
 294			if (!uid_eq(uid, cred->uid))
 295				free_uid(user);		/* for find_user() */
 296			break;
 297	}
 298out_unlock:
 299	read_unlock(&tasklist_lock);
 300	rcu_read_unlock();
 301
 302	return retval;
 303}
 304
 305/**
 306 *	emergency_restart - reboot the system
 307 *
 308 *	Without shutting down any hardware or taking any locks
 309 *	reboot the system.  This is called when we know we are in
 310 *	trouble so this is our best effort to reboot.  This is
 311 *	safe to call in interrupt context.
 312 */
 313void emergency_restart(void)
 314{
 315	kmsg_dump(KMSG_DUMP_EMERG);
 316	machine_emergency_restart();
 317}
 318EXPORT_SYMBOL_GPL(emergency_restart);
 319
 320void kernel_restart_prepare(char *cmd)
 321{
 322	blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
 323	system_state = SYSTEM_RESTART;
 324	usermodehelper_disable();
 325	device_shutdown();
 326	syscore_shutdown();
 327}
 328
 329/**
 330 *	register_reboot_notifier - Register function to be called at reboot time
 331 *	@nb: Info about notifier function to be called
 332 *
 333 *	Registers a function with the list of functions
 334 *	to be called at reboot time.
 335 *
 336 *	Currently always returns zero, as blocking_notifier_chain_register()
 337 *	always returns zero.
 338 */
 339int register_reboot_notifier(struct notifier_block *nb)
 340{
 341	return blocking_notifier_chain_register(&reboot_notifier_list, nb);
 342}
 343EXPORT_SYMBOL(register_reboot_notifier);
 344
 345/**
 346 *	unregister_reboot_notifier - Unregister previously registered reboot notifier
 347 *	@nb: Hook to be unregistered
 348 *
 349 *	Unregisters a previously registered reboot
 350 *	notifier function.
 351 *
 352 *	Returns zero on success, or %-ENOENT on failure.
 353 */
 354int unregister_reboot_notifier(struct notifier_block *nb)
 355{
 356	return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
 357}
 358EXPORT_SYMBOL(unregister_reboot_notifier);
 359
 360/**
 361 *	kernel_restart - reboot the system
 362 *	@cmd: pointer to buffer containing command to execute for restart
 363 *		or %NULL
 364 *
 365 *	Shutdown everything and perform a clean reboot.
 366 *	This is not safe to call in interrupt context.
 367 */
 368void kernel_restart(char *cmd)
 369{
 370	kernel_restart_prepare(cmd);
 371	if (!cmd)
 372		printk(KERN_EMERG "Restarting system.\n");
 373	else
 374		printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
 375	kmsg_dump(KMSG_DUMP_RESTART);
 376	machine_restart(cmd);
 377}
 378EXPORT_SYMBOL_GPL(kernel_restart);
 379
 380static void kernel_shutdown_prepare(enum system_states state)
 381{
 382	blocking_notifier_call_chain(&reboot_notifier_list,
 383		(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
 384	system_state = state;
 385	usermodehelper_disable();
 386	device_shutdown();
 387}
 388/**
 389 *	kernel_halt - halt the system
 390 *
 391 *	Shutdown everything and perform a clean system halt.
 392 */
 393void kernel_halt(void)
 394{
 395	kernel_shutdown_prepare(SYSTEM_HALT);
 396	syscore_shutdown();
 397	printk(KERN_EMERG "System halted.\n");
 398	kmsg_dump(KMSG_DUMP_HALT);
 399	machine_halt();
 400}
 401
 402EXPORT_SYMBOL_GPL(kernel_halt);
 403
 404/**
 405 *	kernel_power_off - power_off the system
 406 *
 407 *	Shutdown everything and perform a clean system power_off.
 408 */
 409void kernel_power_off(void)
 410{
 411	kernel_shutdown_prepare(SYSTEM_POWER_OFF);
 412	if (pm_power_off_prepare)
 413		pm_power_off_prepare();
 414	disable_nonboot_cpus();
 415	syscore_shutdown();
 416	printk(KERN_EMERG "Power down.\n");
 417	kmsg_dump(KMSG_DUMP_POWEROFF);
 418	machine_power_off();
 419}
 420EXPORT_SYMBOL_GPL(kernel_power_off);
 421
 422static DEFINE_MUTEX(reboot_mutex);
 423
 424/*
 425 * Reboot system call: for obvious reasons only root may call it,
 426 * and even root needs to set up some magic numbers in the registers
 427 * so that some mistake won't make this reboot the whole machine.
 428 * You can also set the meaning of the ctrl-alt-del-key here.
 429 *
 430 * reboot doesn't sync: do that yourself before calling this.
 431 */
 432SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
 433		void __user *, arg)
 434{
 435	char buffer[256];
 436	int ret = 0;
 437
 438	/* We only trust the superuser with rebooting the system. */
 439	if (!capable(CAP_SYS_BOOT))
 440		return -EPERM;
 441
 442	/* For safety, we require "magic" arguments. */
 443	if (magic1 != LINUX_REBOOT_MAGIC1 ||
 444	    (magic2 != LINUX_REBOOT_MAGIC2 &&
 445	                magic2 != LINUX_REBOOT_MAGIC2A &&
 446			magic2 != LINUX_REBOOT_MAGIC2B &&
 447	                magic2 != LINUX_REBOOT_MAGIC2C))
 448		return -EINVAL;
 449
 450	/*
 451	 * If pid namespaces are enabled and the current task is in a child
 452	 * pid_namespace, the command is handled by reboot_pid_ns() which will
 453	 * call do_exit().
 454	 */
 455	ret = reboot_pid_ns(task_active_pid_ns(current), cmd);
 456	if (ret)
 457		return ret;
 458
 459	/* Instead of trying to make the power_off code look like
 460	 * halt when pm_power_off is not set do it the easy way.
 461	 */
 462	if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
 463		cmd = LINUX_REBOOT_CMD_HALT;
 464
 465	mutex_lock(&reboot_mutex);
 466	switch (cmd) {
 467	case LINUX_REBOOT_CMD_RESTART:
 468		kernel_restart(NULL);
 469		break;
 470
 471	case LINUX_REBOOT_CMD_CAD_ON:
 472		C_A_D = 1;
 473		break;
 474
 475	case LINUX_REBOOT_CMD_CAD_OFF:
 476		C_A_D = 0;
 477		break;
 478
 479	case LINUX_REBOOT_CMD_HALT:
 480		kernel_halt();
 481		do_exit(0);
 482		panic("cannot halt");
 483
 484	case LINUX_REBOOT_CMD_POWER_OFF:
 485		kernel_power_off();
 486		do_exit(0);
 487		break;
 488
 489	case LINUX_REBOOT_CMD_RESTART2:
 490		if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
 491			ret = -EFAULT;
 492			break;
 493		}
 494		buffer[sizeof(buffer) - 1] = '\0';
 495
 496		kernel_restart(buffer);
 497		break;
 498
 499#ifdef CONFIG_KEXEC
 500	case LINUX_REBOOT_CMD_KEXEC:
 501		ret = kernel_kexec();
 502		break;
 503#endif
 504
 505#ifdef CONFIG_HIBERNATION
 506	case LINUX_REBOOT_CMD_SW_SUSPEND:
 507		ret = hibernate();
 508		break;
 509#endif
 510
 511	default:
 512		ret = -EINVAL;
 513		break;
 514	}
 515	mutex_unlock(&reboot_mutex);
 516	return ret;
 517}
 518
 519static void deferred_cad(struct work_struct *dummy)
 520{
 521	kernel_restart(NULL);
 522}
 523
 524/*
 525 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
 526 * As it's called within an interrupt, it may NOT sync: the only choice
 527 * is whether to reboot at once, or just ignore the ctrl-alt-del.
 528 */
 529void ctrl_alt_del(void)
 530{
 531	static DECLARE_WORK(cad_work, deferred_cad);
 532
 533	if (C_A_D)
 534		schedule_work(&cad_work);
 535	else
 536		kill_cad_pid(SIGINT, 1);
 537}
 538	
 539/*
 540 * Unprivileged users may change the real gid to the effective gid
 541 * or vice versa.  (BSD-style)
 542 *
 543 * If you set the real gid at all, or set the effective gid to a value not
 544 * equal to the real gid, then the saved gid is set to the new effective gid.
 545 *
 546 * This makes it possible for a setgid program to completely drop its
 547 * privileges, which is often a useful assertion to make when you are doing
 548 * a security audit over a program.
 549 *
 550 * The general idea is that a program which uses just setregid() will be
 551 * 100% compatible with BSD.  A program which uses just setgid() will be
 552 * 100% compatible with POSIX with saved IDs. 
 553 *
 554 * SMP: There are not races, the GIDs are checked only by filesystem
 555 *      operations (as far as semantic preservation is concerned).
 556 */
 
 557SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
 558{
 559	struct user_namespace *ns = current_user_ns();
 560	const struct cred *old;
 561	struct cred *new;
 562	int retval;
 563	kgid_t krgid, kegid;
 564
 565	krgid = make_kgid(ns, rgid);
 566	kegid = make_kgid(ns, egid);
 567
 568	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 569		return -EINVAL;
 570	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 571		return -EINVAL;
 572
 573	new = prepare_creds();
 574	if (!new)
 575		return -ENOMEM;
 576	old = current_cred();
 577
 578	retval = -EPERM;
 579	if (rgid != (gid_t) -1) {
 580		if (gid_eq(old->gid, krgid) ||
 581		    gid_eq(old->egid, krgid) ||
 582		    nsown_capable(CAP_SETGID))
 583			new->gid = krgid;
 584		else
 585			goto error;
 586	}
 587	if (egid != (gid_t) -1) {
 588		if (gid_eq(old->gid, kegid) ||
 589		    gid_eq(old->egid, kegid) ||
 590		    gid_eq(old->sgid, kegid) ||
 591		    nsown_capable(CAP_SETGID))
 592			new->egid = kegid;
 593		else
 594			goto error;
 595	}
 596
 597	if (rgid != (gid_t) -1 ||
 598	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
 599		new->sgid = new->egid;
 600	new->fsgid = new->egid;
 601
 602	return commit_creds(new);
 603
 604error:
 605	abort_creds(new);
 606	return retval;
 607}
 608
 609/*
 610 * setgid() is implemented like SysV w/ SAVED_IDS 
 611 *
 612 * SMP: Same implicit races as above.
 613 */
 614SYSCALL_DEFINE1(setgid, gid_t, gid)
 615{
 616	struct user_namespace *ns = current_user_ns();
 617	const struct cred *old;
 618	struct cred *new;
 619	int retval;
 620	kgid_t kgid;
 621
 622	kgid = make_kgid(ns, gid);
 623	if (!gid_valid(kgid))
 624		return -EINVAL;
 625
 626	new = prepare_creds();
 627	if (!new)
 628		return -ENOMEM;
 629	old = current_cred();
 630
 631	retval = -EPERM;
 632	if (nsown_capable(CAP_SETGID))
 633		new->gid = new->egid = new->sgid = new->fsgid = kgid;
 634	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
 635		new->egid = new->fsgid = kgid;
 636	else
 637		goto error;
 638
 639	return commit_creds(new);
 640
 641error:
 642	abort_creds(new);
 643	return retval;
 644}
 645
 646/*
 647 * change the user struct in a credentials set to match the new UID
 648 */
 649static int set_user(struct cred *new)
 650{
 651	struct user_struct *new_user;
 652
 653	new_user = alloc_uid(new->uid);
 654	if (!new_user)
 655		return -EAGAIN;
 656
 657	/*
 658	 * We don't fail in case of NPROC limit excess here because too many
 659	 * poorly written programs don't check set*uid() return code, assuming
 660	 * it never fails if called by root.  We may still enforce NPROC limit
 661	 * for programs doing set*uid()+execve() by harmlessly deferring the
 662	 * failure to the execve() stage.
 663	 */
 664	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
 665			new_user != INIT_USER)
 666		current->flags |= PF_NPROC_EXCEEDED;
 667	else
 668		current->flags &= ~PF_NPROC_EXCEEDED;
 669
 670	free_uid(new->user);
 671	new->user = new_user;
 672	return 0;
 673}
 674
 675/*
 676 * Unprivileged users may change the real uid to the effective uid
 677 * or vice versa.  (BSD-style)
 678 *
 679 * If you set the real uid at all, or set the effective uid to a value not
 680 * equal to the real uid, then the saved uid is set to the new effective uid.
 681 *
 682 * This makes it possible for a setuid program to completely drop its
 683 * privileges, which is often a useful assertion to make when you are doing
 684 * a security audit over a program.
 685 *
 686 * The general idea is that a program which uses just setreuid() will be
 687 * 100% compatible with BSD.  A program which uses just setuid() will be
 688 * 100% compatible with POSIX with saved IDs. 
 689 */
 690SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 691{
 692	struct user_namespace *ns = current_user_ns();
 693	const struct cred *old;
 694	struct cred *new;
 695	int retval;
 696	kuid_t kruid, keuid;
 697
 698	kruid = make_kuid(ns, ruid);
 699	keuid = make_kuid(ns, euid);
 700
 701	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 702		return -EINVAL;
 703	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 704		return -EINVAL;
 705
 706	new = prepare_creds();
 707	if (!new)
 708		return -ENOMEM;
 709	old = current_cred();
 710
 711	retval = -EPERM;
 712	if (ruid != (uid_t) -1) {
 713		new->uid = kruid;
 714		if (!uid_eq(old->uid, kruid) &&
 715		    !uid_eq(old->euid, kruid) &&
 716		    !nsown_capable(CAP_SETUID))
 717			goto error;
 718	}
 719
 720	if (euid != (uid_t) -1) {
 721		new->euid = keuid;
 722		if (!uid_eq(old->uid, keuid) &&
 723		    !uid_eq(old->euid, keuid) &&
 724		    !uid_eq(old->suid, keuid) &&
 725		    !nsown_capable(CAP_SETUID))
 726			goto error;
 727	}
 728
 729	if (!uid_eq(new->uid, old->uid)) {
 730		retval = set_user(new);
 731		if (retval < 0)
 732			goto error;
 733	}
 734	if (ruid != (uid_t) -1 ||
 735	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
 736		new->suid = new->euid;
 737	new->fsuid = new->euid;
 738
 739	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
 740	if (retval < 0)
 741		goto error;
 742
 743	return commit_creds(new);
 744
 745error:
 746	abort_creds(new);
 747	return retval;
 748}
 749		
 750/*
 751 * setuid() is implemented like SysV with SAVED_IDS 
 752 * 
 753 * Note that SAVED_ID's is deficient in that a setuid root program
 754 * like sendmail, for example, cannot set its uid to be a normal 
 755 * user and then switch back, because if you're root, setuid() sets
 756 * the saved uid too.  If you don't like this, blame the bright people
 757 * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
 758 * will allow a root program to temporarily drop privileges and be able to
 759 * regain them by swapping the real and effective uid.  
 760 */
 761SYSCALL_DEFINE1(setuid, uid_t, uid)
 762{
 763	struct user_namespace *ns = current_user_ns();
 764	const struct cred *old;
 765	struct cred *new;
 766	int retval;
 767	kuid_t kuid;
 768
 769	kuid = make_kuid(ns, uid);
 770	if (!uid_valid(kuid))
 771		return -EINVAL;
 772
 773	new = prepare_creds();
 774	if (!new)
 775		return -ENOMEM;
 776	old = current_cred();
 777
 778	retval = -EPERM;
 779	if (nsown_capable(CAP_SETUID)) {
 780		new->suid = new->uid = kuid;
 781		if (!uid_eq(kuid, old->uid)) {
 782			retval = set_user(new);
 783			if (retval < 0)
 784				goto error;
 785		}
 786	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
 787		goto error;
 788	}
 789
 790	new->fsuid = new->euid = kuid;
 791
 792	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
 793	if (retval < 0)
 794		goto error;
 795
 796	return commit_creds(new);
 797
 798error:
 799	abort_creds(new);
 800	return retval;
 801}
 802
 803
 804/*
 805 * This function implements a generic ability to update ruid, euid,
 806 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 807 */
 808SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 809{
 810	struct user_namespace *ns = current_user_ns();
 811	const struct cred *old;
 812	struct cred *new;
 813	int retval;
 814	kuid_t kruid, keuid, ksuid;
 815
 816	kruid = make_kuid(ns, ruid);
 817	keuid = make_kuid(ns, euid);
 818	ksuid = make_kuid(ns, suid);
 819
 820	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 821		return -EINVAL;
 822
 823	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 824		return -EINVAL;
 825
 826	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
 827		return -EINVAL;
 828
 829	new = prepare_creds();
 830	if (!new)
 831		return -ENOMEM;
 832
 833	old = current_cred();
 834
 835	retval = -EPERM;
 836	if (!nsown_capable(CAP_SETUID)) {
 837		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
 838		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
 839			goto error;
 840		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
 841		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
 842			goto error;
 843		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
 844		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
 845			goto error;
 846	}
 847
 848	if (ruid != (uid_t) -1) {
 849		new->uid = kruid;
 850		if (!uid_eq(kruid, old->uid)) {
 851			retval = set_user(new);
 852			if (retval < 0)
 853				goto error;
 854		}
 855	}
 856	if (euid != (uid_t) -1)
 857		new->euid = keuid;
 858	if (suid != (uid_t) -1)
 859		new->suid = ksuid;
 860	new->fsuid = new->euid;
 861
 862	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
 863	if (retval < 0)
 864		goto error;
 865
 866	return commit_creds(new);
 867
 868error:
 869	abort_creds(new);
 870	return retval;
 871}
 872
 873SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
 874{
 875	const struct cred *cred = current_cred();
 876	int retval;
 877	uid_t ruid, euid, suid;
 878
 879	ruid = from_kuid_munged(cred->user_ns, cred->uid);
 880	euid = from_kuid_munged(cred->user_ns, cred->euid);
 881	suid = from_kuid_munged(cred->user_ns, cred->suid);
 882
 883	if (!(retval   = put_user(ruid, ruidp)) &&
 884	    !(retval   = put_user(euid, euidp)))
 885		retval = put_user(suid, suidp);
 886
 
 
 887	return retval;
 888}
 889
 890/*
 891 * Same as above, but for rgid, egid, sgid.
 892 */
 893SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
 894{
 895	struct user_namespace *ns = current_user_ns();
 896	const struct cred *old;
 897	struct cred *new;
 898	int retval;
 899	kgid_t krgid, kegid, ksgid;
 900
 901	krgid = make_kgid(ns, rgid);
 902	kegid = make_kgid(ns, egid);
 903	ksgid = make_kgid(ns, sgid);
 904
 905	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 906		return -EINVAL;
 907	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 908		return -EINVAL;
 909	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
 910		return -EINVAL;
 911
 912	new = prepare_creds();
 913	if (!new)
 914		return -ENOMEM;
 915	old = current_cred();
 916
 917	retval = -EPERM;
 918	if (!nsown_capable(CAP_SETGID)) {
 919		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
 920		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
 921			goto error;
 922		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
 923		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
 924			goto error;
 925		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
 926		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
 927			goto error;
 928	}
 929
 930	if (rgid != (gid_t) -1)
 931		new->gid = krgid;
 932	if (egid != (gid_t) -1)
 933		new->egid = kegid;
 934	if (sgid != (gid_t) -1)
 935		new->sgid = ksgid;
 936	new->fsgid = new->egid;
 937
 938	return commit_creds(new);
 939
 940error:
 941	abort_creds(new);
 942	return retval;
 943}
 944
 945SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
 946{
 947	const struct cred *cred = current_cred();
 948	int retval;
 949	gid_t rgid, egid, sgid;
 950
 951	rgid = from_kgid_munged(cred->user_ns, cred->gid);
 952	egid = from_kgid_munged(cred->user_ns, cred->egid);
 953	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
 954
 955	if (!(retval   = put_user(rgid, rgidp)) &&
 956	    !(retval   = put_user(egid, egidp)))
 957		retval = put_user(sgid, sgidp);
 
 
 
 958
 959	return retval;
 960}
 961
 962
 963/*
 964 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
 965 * is used for "access()" and for the NFS daemon (letting nfsd stay at
 966 * whatever uid it wants to). It normally shadows "euid", except when
 967 * explicitly set by setfsuid() or for access..
 968 */
 969SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 970{
 971	const struct cred *old;
 972	struct cred *new;
 973	uid_t old_fsuid;
 974	kuid_t kuid;
 975
 976	old = current_cred();
 977	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
 978
 979	kuid = make_kuid(old->user_ns, uid);
 980	if (!uid_valid(kuid))
 981		return old_fsuid;
 982
 983	new = prepare_creds();
 984	if (!new)
 985		return old_fsuid;
 986
 987	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
 988	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
 989	    nsown_capable(CAP_SETUID)) {
 990		if (!uid_eq(kuid, old->fsuid)) {
 991			new->fsuid = kuid;
 992			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
 993				goto change_okay;
 994		}
 995	}
 996
 997	abort_creds(new);
 998	return old_fsuid;
 999
1000change_okay:
1001	commit_creds(new);
1002	return old_fsuid;
1003}
1004
1005/*
1006 * Samma på svenska..
1007 */
1008SYSCALL_DEFINE1(setfsgid, gid_t, gid)
1009{
1010	const struct cred *old;
1011	struct cred *new;
1012	gid_t old_fsgid;
1013	kgid_t kgid;
1014
1015	old = current_cred();
1016	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
1017
1018	kgid = make_kgid(old->user_ns, gid);
1019	if (!gid_valid(kgid))
1020		return old_fsgid;
1021
1022	new = prepare_creds();
1023	if (!new)
1024		return old_fsgid;
1025
1026	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
1027	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
1028	    nsown_capable(CAP_SETGID)) {
1029		if (!gid_eq(kgid, old->fsgid)) {
1030			new->fsgid = kgid;
1031			goto change_okay;
1032		}
1033	}
1034
1035	abort_creds(new);
1036	return old_fsgid;
1037
1038change_okay:
1039	commit_creds(new);
1040	return old_fsgid;
1041}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1042
1043void do_sys_times(struct tms *tms)
1044{
1045	cputime_t tgutime, tgstime, cutime, cstime;
1046
1047	spin_lock_irq(&current->sighand->siglock);
1048	thread_group_times(current, &tgutime, &tgstime);
1049	cutime = current->signal->cutime;
1050	cstime = current->signal->cstime;
1051	spin_unlock_irq(&current->sighand->siglock);
1052	tms->tms_utime = cputime_to_clock_t(tgutime);
1053	tms->tms_stime = cputime_to_clock_t(tgstime);
1054	tms->tms_cutime = cputime_to_clock_t(cutime);
1055	tms->tms_cstime = cputime_to_clock_t(cstime);
1056}
1057
1058SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1059{
1060	if (tbuf) {
1061		struct tms tmp;
1062
1063		do_sys_times(&tmp);
1064		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1065			return -EFAULT;
1066	}
1067	force_successful_syscall_return();
1068	return (long) jiffies_64_to_clock_t(get_jiffies_64());
1069}
1070
1071/*
1072 * This needs some heavy checking ...
1073 * I just haven't the stomach for it. I also don't fully
1074 * understand sessions/pgrp etc. Let somebody who does explain it.
1075 *
1076 * OK, I think I have the protection semantics right.... this is really
1077 * only important on a multi-user system anyway, to make sure one user
1078 * can't send a signal to a process owned by another.  -TYT, 12/12/91
1079 *
1080 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1081 * LBT 04.03.94
1082 */
1083SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1084{
1085	struct task_struct *p;
1086	struct task_struct *group_leader = current->group_leader;
1087	struct pid *pgrp;
1088	int err;
1089
1090	if (!pid)
1091		pid = task_pid_vnr(group_leader);
1092	if (!pgid)
1093		pgid = pid;
1094	if (pgid < 0)
1095		return -EINVAL;
1096	rcu_read_lock();
1097
1098	/* From this point forward we keep holding onto the tasklist lock
1099	 * so that our parent does not change from under us. -DaveM
1100	 */
1101	write_lock_irq(&tasklist_lock);
1102
1103	err = -ESRCH;
1104	p = find_task_by_vpid(pid);
1105	if (!p)
1106		goto out;
1107
1108	err = -EINVAL;
1109	if (!thread_group_leader(p))
1110		goto out;
1111
1112	if (same_thread_group(p->real_parent, group_leader)) {
1113		err = -EPERM;
1114		if (task_session(p) != task_session(group_leader))
1115			goto out;
1116		err = -EACCES;
1117		if (p->did_exec)
1118			goto out;
1119	} else {
1120		err = -ESRCH;
1121		if (p != group_leader)
1122			goto out;
1123	}
1124
1125	err = -EPERM;
1126	if (p->signal->leader)
1127		goto out;
1128
1129	pgrp = task_pid(p);
1130	if (pgid != pid) {
1131		struct task_struct *g;
1132
1133		pgrp = find_vpid(pgid);
1134		g = pid_task(pgrp, PIDTYPE_PGID);
1135		if (!g || task_session(g) != task_session(group_leader))
1136			goto out;
1137	}
1138
1139	err = security_task_setpgid(p, pgid);
1140	if (err)
1141		goto out;
1142
1143	if (task_pgrp(p) != pgrp)
1144		change_pid(p, PIDTYPE_PGID, pgrp);
1145
1146	err = 0;
1147out:
1148	/* All paths lead to here, thus we are safe. -DaveM */
1149	write_unlock_irq(&tasklist_lock);
1150	rcu_read_unlock();
1151	return err;
1152}
1153
1154SYSCALL_DEFINE1(getpgid, pid_t, pid)
1155{
1156	struct task_struct *p;
1157	struct pid *grp;
1158	int retval;
1159
1160	rcu_read_lock();
1161	if (!pid)
1162		grp = task_pgrp(current);
1163	else {
1164		retval = -ESRCH;
1165		p = find_task_by_vpid(pid);
1166		if (!p)
1167			goto out;
1168		grp = task_pgrp(p);
1169		if (!grp)
1170			goto out;
1171
1172		retval = security_task_getpgid(p);
1173		if (retval)
1174			goto out;
1175	}
1176	retval = pid_vnr(grp);
1177out:
1178	rcu_read_unlock();
1179	return retval;
1180}
1181
1182#ifdef __ARCH_WANT_SYS_GETPGRP
1183
1184SYSCALL_DEFINE0(getpgrp)
1185{
1186	return sys_getpgid(0);
1187}
1188
1189#endif
1190
1191SYSCALL_DEFINE1(getsid, pid_t, pid)
1192{
1193	struct task_struct *p;
1194	struct pid *sid;
1195	int retval;
1196
1197	rcu_read_lock();
1198	if (!pid)
1199		sid = task_session(current);
1200	else {
1201		retval = -ESRCH;
1202		p = find_task_by_vpid(pid);
1203		if (!p)
1204			goto out;
1205		sid = task_session(p);
1206		if (!sid)
1207			goto out;
1208
1209		retval = security_task_getsid(p);
1210		if (retval)
1211			goto out;
1212	}
1213	retval = pid_vnr(sid);
1214out:
1215	rcu_read_unlock();
1216	return retval;
1217}
1218
 
 
 
 
 
 
 
 
 
 
 
1219SYSCALL_DEFINE0(setsid)
1220{
1221	struct task_struct *group_leader = current->group_leader;
1222	struct pid *sid = task_pid(group_leader);
1223	pid_t session = pid_vnr(sid);
1224	int err = -EPERM;
1225
1226	write_lock_irq(&tasklist_lock);
1227	/* Fail if I am already a session leader */
1228	if (group_leader->signal->leader)
1229		goto out;
1230
1231	/* Fail if a process group id already exists that equals the
1232	 * proposed session id.
1233	 */
1234	if (pid_task(sid, PIDTYPE_PGID))
1235		goto out;
1236
1237	group_leader->signal->leader = 1;
1238	__set_special_pids(sid);
1239
1240	proc_clear_tty(group_leader);
1241
1242	err = session;
1243out:
1244	write_unlock_irq(&tasklist_lock);
1245	if (err > 0) {
1246		proc_sid_connector(group_leader);
1247		sched_autogroup_create_attach(group_leader);
1248	}
1249	return err;
1250}
1251
1252DECLARE_RWSEM(uts_sem);
1253
1254#ifdef COMPAT_UTS_MACHINE
1255#define override_architecture(name) \
1256	(personality(current->personality) == PER_LINUX32 && \
1257	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1258		      sizeof(COMPAT_UTS_MACHINE)))
1259#else
1260#define override_architecture(name)	0
1261#endif
1262
1263/*
1264 * Work around broken programs that cannot handle "Linux 3.0".
1265 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
 
1266 */
1267static int override_release(char __user *release, int len)
1268{
1269	int ret = 0;
1270	char buf[65];
1271
1272	if (current->personality & UNAME26) {
1273		char *rest = UTS_RELEASE;
 
1274		int ndots = 0;
1275		unsigned v;
 
1276
1277		while (*rest) {
1278			if (*rest == '.' && ++ndots >= 3)
1279				break;
1280			if (!isdigit(*rest) && *rest != '.')
1281				break;
1282			rest++;
1283		}
1284		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1285		snprintf(buf, len, "2.6.%u%s", v, rest);
1286		ret = copy_to_user(release, buf, len);
 
1287	}
1288	return ret;
1289}
1290
1291SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1292{
1293	int errno = 0;
1294
1295	down_read(&uts_sem);
1296	if (copy_to_user(name, utsname(), sizeof *name))
1297		errno = -EFAULT;
1298	up_read(&uts_sem);
1299
1300	if (!errno && override_release(name->release, sizeof(name->release)))
1301		errno = -EFAULT;
1302	if (!errno && override_architecture(name))
1303		errno = -EFAULT;
1304	return errno;
1305}
1306
1307#ifdef __ARCH_WANT_SYS_OLD_UNAME
1308/*
1309 * Old cruft
1310 */
1311SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1312{
1313	int error = 0;
1314
1315	if (!name)
1316		return -EFAULT;
1317
1318	down_read(&uts_sem);
1319	if (copy_to_user(name, utsname(), sizeof(*name)))
1320		error = -EFAULT;
1321	up_read(&uts_sem);
1322
1323	if (!error && override_release(name->release, sizeof(name->release)))
1324		error = -EFAULT;
1325	if (!error && override_architecture(name))
1326		error = -EFAULT;
1327	return error;
1328}
1329
1330SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1331{
1332	int error;
1333
1334	if (!name)
1335		return -EFAULT;
1336	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1337		return -EFAULT;
1338
1339	down_read(&uts_sem);
1340	error = __copy_to_user(&name->sysname, &utsname()->sysname,
1341			       __OLD_UTS_LEN);
1342	error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1343	error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1344				__OLD_UTS_LEN);
1345	error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1346	error |= __copy_to_user(&name->release, &utsname()->release,
1347				__OLD_UTS_LEN);
1348	error |= __put_user(0, name->release + __OLD_UTS_LEN);
1349	error |= __copy_to_user(&name->version, &utsname()->version,
1350				__OLD_UTS_LEN);
1351	error |= __put_user(0, name->version + __OLD_UTS_LEN);
1352	error |= __copy_to_user(&name->machine, &utsname()->machine,
1353				__OLD_UTS_LEN);
1354	error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1355	up_read(&uts_sem);
1356
1357	if (!error && override_architecture(name))
1358		error = -EFAULT;
1359	if (!error && override_release(name->release, sizeof(name->release)))
1360		error = -EFAULT;
1361	return error ? -EFAULT : 0;
1362}
1363#endif
1364
1365SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1366{
1367	int errno;
1368	char tmp[__NEW_UTS_LEN];
1369
1370	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1371		return -EPERM;
1372
1373	if (len < 0 || len > __NEW_UTS_LEN)
1374		return -EINVAL;
1375	down_write(&uts_sem);
1376	errno = -EFAULT;
1377	if (!copy_from_user(tmp, name, len)) {
1378		struct new_utsname *u = utsname();
1379
1380		memcpy(u->nodename, tmp, len);
1381		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1382		errno = 0;
1383		uts_proc_notify(UTS_PROC_HOSTNAME);
1384	}
1385	up_write(&uts_sem);
1386	return errno;
1387}
1388
1389#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1390
1391SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1392{
1393	int i, errno;
1394	struct new_utsname *u;
1395
1396	if (len < 0)
1397		return -EINVAL;
1398	down_read(&uts_sem);
1399	u = utsname();
1400	i = 1 + strlen(u->nodename);
1401	if (i > len)
1402		i = len;
1403	errno = 0;
1404	if (copy_to_user(name, u->nodename, i))
1405		errno = -EFAULT;
1406	up_read(&uts_sem);
1407	return errno;
1408}
1409
1410#endif
1411
1412/*
1413 * Only setdomainname; getdomainname can be implemented by calling
1414 * uname()
1415 */
1416SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1417{
1418	int errno;
1419	char tmp[__NEW_UTS_LEN];
1420
1421	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1422		return -EPERM;
1423	if (len < 0 || len > __NEW_UTS_LEN)
1424		return -EINVAL;
1425
1426	down_write(&uts_sem);
1427	errno = -EFAULT;
1428	if (!copy_from_user(tmp, name, len)) {
1429		struct new_utsname *u = utsname();
1430
1431		memcpy(u->domainname, tmp, len);
1432		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1433		errno = 0;
1434		uts_proc_notify(UTS_PROC_DOMAINNAME);
1435	}
1436	up_write(&uts_sem);
1437	return errno;
1438}
1439
1440SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1441{
1442	struct rlimit value;
1443	int ret;
1444
1445	ret = do_prlimit(current, resource, NULL, &value);
1446	if (!ret)
1447		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1448
1449	return ret;
1450}
1451
1452#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1453
1454/*
1455 *	Back compatibility for getrlimit. Needed for some apps.
1456 */
1457 
1458SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1459		struct rlimit __user *, rlim)
1460{
1461	struct rlimit x;
1462	if (resource >= RLIM_NLIMITS)
1463		return -EINVAL;
1464
1465	task_lock(current->group_leader);
1466	x = current->signal->rlim[resource];
1467	task_unlock(current->group_leader);
1468	if (x.rlim_cur > 0x7FFFFFFF)
1469		x.rlim_cur = 0x7FFFFFFF;
1470	if (x.rlim_max > 0x7FFFFFFF)
1471		x.rlim_max = 0x7FFFFFFF;
1472	return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1473}
1474
1475#endif
1476
1477static inline bool rlim64_is_infinity(__u64 rlim64)
1478{
1479#if BITS_PER_LONG < 64
1480	return rlim64 >= ULONG_MAX;
1481#else
1482	return rlim64 == RLIM64_INFINITY;
1483#endif
1484}
1485
1486static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1487{
1488	if (rlim->rlim_cur == RLIM_INFINITY)
1489		rlim64->rlim_cur = RLIM64_INFINITY;
1490	else
1491		rlim64->rlim_cur = rlim->rlim_cur;
1492	if (rlim->rlim_max == RLIM_INFINITY)
1493		rlim64->rlim_max = RLIM64_INFINITY;
1494	else
1495		rlim64->rlim_max = rlim->rlim_max;
1496}
1497
1498static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1499{
1500	if (rlim64_is_infinity(rlim64->rlim_cur))
1501		rlim->rlim_cur = RLIM_INFINITY;
1502	else
1503		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1504	if (rlim64_is_infinity(rlim64->rlim_max))
1505		rlim->rlim_max = RLIM_INFINITY;
1506	else
1507		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1508}
1509
1510/* make sure you are allowed to change @tsk limits before calling this */
1511int do_prlimit(struct task_struct *tsk, unsigned int resource,
1512		struct rlimit *new_rlim, struct rlimit *old_rlim)
1513{
1514	struct rlimit *rlim;
1515	int retval = 0;
1516
1517	if (resource >= RLIM_NLIMITS)
1518		return -EINVAL;
1519	if (new_rlim) {
1520		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1521			return -EINVAL;
1522		if (resource == RLIMIT_NOFILE &&
1523				new_rlim->rlim_max > sysctl_nr_open)
1524			return -EPERM;
1525	}
1526
1527	/* protect tsk->signal and tsk->sighand from disappearing */
1528	read_lock(&tasklist_lock);
1529	if (!tsk->sighand) {
1530		retval = -ESRCH;
1531		goto out;
1532	}
1533
1534	rlim = tsk->signal->rlim + resource;
1535	task_lock(tsk->group_leader);
1536	if (new_rlim) {
1537		/* Keep the capable check against init_user_ns until
1538		   cgroups can contain all limits */
1539		if (new_rlim->rlim_max > rlim->rlim_max &&
1540				!capable(CAP_SYS_RESOURCE))
1541			retval = -EPERM;
1542		if (!retval)
1543			retval = security_task_setrlimit(tsk->group_leader,
1544					resource, new_rlim);
1545		if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1546			/*
1547			 * The caller is asking for an immediate RLIMIT_CPU
1548			 * expiry.  But we use the zero value to mean "it was
1549			 * never set".  So let's cheat and make it one second
1550			 * instead
1551			 */
1552			new_rlim->rlim_cur = 1;
1553		}
1554	}
1555	if (!retval) {
1556		if (old_rlim)
1557			*old_rlim = *rlim;
1558		if (new_rlim)
1559			*rlim = *new_rlim;
1560	}
1561	task_unlock(tsk->group_leader);
1562
1563	/*
1564	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error
1565	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
1566	 * very long-standing error, and fixing it now risks breakage of
1567	 * applications, so we live with it
1568	 */
1569	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1570			 new_rlim->rlim_cur != RLIM_INFINITY)
1571		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1572out:
1573	read_unlock(&tasklist_lock);
1574	return retval;
1575}
1576
1577/* rcu lock must be held */
1578static int check_prlimit_permission(struct task_struct *task)
1579{
1580	const struct cred *cred = current_cred(), *tcred;
1581
1582	if (current == task)
1583		return 0;
1584
1585	tcred = __task_cred(task);
1586	if (uid_eq(cred->uid, tcred->euid) &&
1587	    uid_eq(cred->uid, tcred->suid) &&
1588	    uid_eq(cred->uid, tcred->uid)  &&
1589	    gid_eq(cred->gid, tcred->egid) &&
1590	    gid_eq(cred->gid, tcred->sgid) &&
1591	    gid_eq(cred->gid, tcred->gid))
1592		return 0;
1593	if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1594		return 0;
1595
1596	return -EPERM;
1597}
1598
1599SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1600		const struct rlimit64 __user *, new_rlim,
1601		struct rlimit64 __user *, old_rlim)
1602{
1603	struct rlimit64 old64, new64;
1604	struct rlimit old, new;
1605	struct task_struct *tsk;
1606	int ret;
1607
1608	if (new_rlim) {
1609		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1610			return -EFAULT;
1611		rlim64_to_rlim(&new64, &new);
1612	}
1613
1614	rcu_read_lock();
1615	tsk = pid ? find_task_by_vpid(pid) : current;
1616	if (!tsk) {
1617		rcu_read_unlock();
1618		return -ESRCH;
1619	}
1620	ret = check_prlimit_permission(tsk);
1621	if (ret) {
1622		rcu_read_unlock();
1623		return ret;
1624	}
1625	get_task_struct(tsk);
1626	rcu_read_unlock();
1627
1628	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1629			old_rlim ? &old : NULL);
1630
1631	if (!ret && old_rlim) {
1632		rlim_to_rlim64(&old, &old64);
1633		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1634			ret = -EFAULT;
1635	}
1636
1637	put_task_struct(tsk);
1638	return ret;
1639}
1640
1641SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1642{
1643	struct rlimit new_rlim;
1644
1645	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1646		return -EFAULT;
1647	return do_prlimit(current, resource, &new_rlim, NULL);
1648}
1649
1650/*
1651 * It would make sense to put struct rusage in the task_struct,
1652 * except that would make the task_struct be *really big*.  After
1653 * task_struct gets moved into malloc'ed memory, it would
1654 * make sense to do this.  It will make moving the rest of the information
1655 * a lot simpler!  (Which we're not doing right now because we're not
1656 * measuring them yet).
1657 *
1658 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1659 * races with threads incrementing their own counters.  But since word
1660 * reads are atomic, we either get new values or old values and we don't
1661 * care which for the sums.  We always take the siglock to protect reading
1662 * the c* fields from p->signal from races with exit.c updating those
1663 * fields when reaping, so a sample either gets all the additions of a
1664 * given child after it's reaped, or none so this sample is before reaping.
1665 *
1666 * Locking:
1667 * We need to take the siglock for CHILDEREN, SELF and BOTH
1668 * for  the cases current multithreaded, non-current single threaded
1669 * non-current multithreaded.  Thread traversal is now safe with
1670 * the siglock held.
1671 * Strictly speaking, we donot need to take the siglock if we are current and
1672 * single threaded,  as no one else can take our signal_struct away, no one
1673 * else can  reap the  children to update signal->c* counters, and no one else
1674 * can race with the signal-> fields. If we do not take any lock, the
1675 * signal-> fields could be read out of order while another thread was just
1676 * exiting. So we should  place a read memory barrier when we avoid the lock.
1677 * On the writer side,  write memory barrier is implied in  __exit_signal
1678 * as __exit_signal releases  the siglock spinlock after updating the signal->
1679 * fields. But we don't do this yet to keep things simple.
1680 *
1681 */
1682
1683static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1684{
1685	r->ru_nvcsw += t->nvcsw;
1686	r->ru_nivcsw += t->nivcsw;
1687	r->ru_minflt += t->min_flt;
1688	r->ru_majflt += t->maj_flt;
1689	r->ru_inblock += task_io_get_inblock(t);
1690	r->ru_oublock += task_io_get_oublock(t);
1691}
1692
1693static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1694{
1695	struct task_struct *t;
1696	unsigned long flags;
1697	cputime_t tgutime, tgstime, utime, stime;
1698	unsigned long maxrss = 0;
1699
1700	memset((char *) r, 0, sizeof *r);
1701	utime = stime = 0;
1702
1703	if (who == RUSAGE_THREAD) {
1704		task_times(current, &utime, &stime);
1705		accumulate_thread_rusage(p, r);
1706		maxrss = p->signal->maxrss;
1707		goto out;
1708	}
1709
1710	if (!lock_task_sighand(p, &flags))
1711		return;
1712
1713	switch (who) {
1714		case RUSAGE_BOTH:
1715		case RUSAGE_CHILDREN:
1716			utime = p->signal->cutime;
1717			stime = p->signal->cstime;
1718			r->ru_nvcsw = p->signal->cnvcsw;
1719			r->ru_nivcsw = p->signal->cnivcsw;
1720			r->ru_minflt = p->signal->cmin_flt;
1721			r->ru_majflt = p->signal->cmaj_flt;
1722			r->ru_inblock = p->signal->cinblock;
1723			r->ru_oublock = p->signal->coublock;
1724			maxrss = p->signal->cmaxrss;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1725
1726			if (who == RUSAGE_CHILDREN)
1727				break;
1728
1729		case RUSAGE_SELF:
1730			thread_group_times(p, &tgutime, &tgstime);
1731			utime += tgutime;
1732			stime += tgstime;
1733			r->ru_nvcsw += p->signal->nvcsw;
1734			r->ru_nivcsw += p->signal->nivcsw;
1735			r->ru_minflt += p->signal->min_flt;
1736			r->ru_majflt += p->signal->maj_flt;
1737			r->ru_inblock += p->signal->inblock;
1738			r->ru_oublock += p->signal->oublock;
1739			if (maxrss < p->signal->maxrss)
1740				maxrss = p->signal->maxrss;
1741			t = p;
1742			do {
1743				accumulate_thread_rusage(t, r);
1744				t = next_thread(t);
1745			} while (t != p);
1746			break;
1747
1748		default:
1749			BUG();
1750	}
1751	unlock_task_sighand(p, &flags);
1752
1753out:
1754	cputime_to_timeval(utime, &r->ru_utime);
1755	cputime_to_timeval(stime, &r->ru_stime);
1756
1757	if (who != RUSAGE_CHILDREN) {
1758		struct mm_struct *mm = get_task_mm(p);
 
1759		if (mm) {
1760			setmax_mm_hiwater_rss(&maxrss, mm);
1761			mmput(mm);
1762		}
1763	}
1764	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1765}
1766
1767int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1768{
1769	struct rusage r;
 
1770	k_getrusage(p, who, &r);
1771	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1772}
1773
1774SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1775{
1776	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1777	    who != RUSAGE_THREAD)
1778		return -EINVAL;
1779	return getrusage(current, who, ru);
1780}
1781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1782SYSCALL_DEFINE1(umask, int, mask)
1783{
1784	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1785	return mask;
1786}
1787
1788#ifdef CONFIG_CHECKPOINT_RESTORE
1789static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1790{
1791	struct file *exe_file;
1792	struct dentry *dentry;
 
1793	int err;
1794
1795	exe_file = fget(fd);
1796	if (!exe_file)
1797		return -EBADF;
1798
1799	dentry = exe_file->f_path.dentry;
1800
1801	/*
1802	 * Because the original mm->exe_file points to executable file, make
1803	 * sure that this one is executable as well, to avoid breaking an
1804	 * overall picture.
1805	 */
1806	err = -EACCES;
1807	if (!S_ISREG(dentry->d_inode->i_mode)	||
1808	    exe_file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1809		goto exit;
1810
1811	err = inode_permission(dentry->d_inode, MAY_EXEC);
1812	if (err)
1813		goto exit;
1814
1815	down_write(&mm->mmap_sem);
1816
1817	/*
1818	 * Forbid mm->exe_file change if old file still mapped.
1819	 */
 
1820	err = -EBUSY;
1821	if (mm->exe_file) {
1822		struct vm_area_struct *vma;
1823
1824		for (vma = mm->mmap; vma; vma = vma->vm_next)
1825			if (vma->vm_file &&
1826			    path_equal(&vma->vm_file->f_path,
1827				       &mm->exe_file->f_path))
1828				goto exit_unlock;
 
 
 
 
 
 
1829	}
1830
1831	/*
1832	 * The symlink can be changed only once, just to disallow arbitrary
1833	 * transitions malicious software might bring in. This means one
1834	 * could make a snapshot over all processes running and monitor
1835	 * /proc/pid/exe changes to notice unusual activity if needed.
1836	 */
1837	err = -EPERM;
1838	if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1839		goto exit_unlock;
1840
1841	err = 0;
1842	set_mm_exe_file(mm, exe_file);
1843exit_unlock:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1844	up_write(&mm->mmap_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1845
1846exit:
1847	fput(exe_file);
1848	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1849}
1850
1851static int prctl_set_mm(int opt, unsigned long addr,
1852			unsigned long arg4, unsigned long arg5)
1853{
1854	unsigned long rlim = rlimit(RLIMIT_DATA);
1855	struct mm_struct *mm = current->mm;
 
1856	struct vm_area_struct *vma;
1857	int error;
1858
1859	if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
 
 
1860		return -EINVAL;
1861
 
 
 
 
 
1862	if (!capable(CAP_SYS_RESOURCE))
1863		return -EPERM;
1864
1865	if (opt == PR_SET_MM_EXE_FILE)
1866		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1867
 
 
 
1868	if (addr >= TASK_SIZE || addr < mmap_min_addr)
1869		return -EINVAL;
1870
1871	error = -EINVAL;
1872
1873	down_read(&mm->mmap_sem);
1874	vma = find_vma(mm, addr);
1875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1876	switch (opt) {
1877	case PR_SET_MM_START_CODE:
1878		mm->start_code = addr;
1879		break;
1880	case PR_SET_MM_END_CODE:
1881		mm->end_code = addr;
1882		break;
1883	case PR_SET_MM_START_DATA:
1884		mm->start_data = addr;
1885		break;
1886	case PR_SET_MM_END_DATA:
1887		mm->end_data = addr;
 
 
 
1888		break;
1889
1890	case PR_SET_MM_START_BRK:
1891		if (addr <= mm->end_data)
1892			goto out;
1893
1894		if (rlim < RLIM_INFINITY &&
1895		    (mm->brk - addr) +
1896		    (mm->end_data - mm->start_data) > rlim)
1897			goto out;
1898
1899		mm->start_brk = addr;
1900		break;
1901
1902	case PR_SET_MM_BRK:
1903		if (addr <= mm->end_data)
1904			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1905
1906		if (rlim < RLIM_INFINITY &&
1907		    (addr - mm->start_brk) +
1908		    (mm->end_data - mm->start_data) > rlim)
1909			goto out;
1910
1911		mm->brk = addr;
1912		break;
1913
 
1914	/*
1915	 * If command line arguments and environment
1916	 * are placed somewhere else on stack, we can
1917	 * set them up here, ARG_START/END to setup
1918	 * command line argumets and ENV_START/END
1919	 * for environment.
1920	 */
1921	case PR_SET_MM_START_STACK:
1922	case PR_SET_MM_ARG_START:
1923	case PR_SET_MM_ARG_END:
1924	case PR_SET_MM_ENV_START:
1925	case PR_SET_MM_ENV_END:
1926		if (!vma) {
1927			error = -EFAULT;
1928			goto out;
1929		}
1930		if (opt == PR_SET_MM_START_STACK)
1931			mm->start_stack = addr;
1932		else if (opt == PR_SET_MM_ARG_START)
1933			mm->arg_start = addr;
1934		else if (opt == PR_SET_MM_ARG_END)
1935			mm->arg_end = addr;
1936		else if (opt == PR_SET_MM_ENV_START)
1937			mm->env_start = addr;
1938		else if (opt == PR_SET_MM_ENV_END)
1939			mm->env_end = addr;
1940		break;
1941
1942	/*
1943	 * This doesn't move auxiliary vector itself
1944	 * since it's pinned to mm_struct, but allow
1945	 * to fill vector with new values. It's up
1946	 * to a caller to provide sane values here
1947	 * otherwise user space tools which use this
1948	 * vector might be unhappy.
1949	 */
1950	case PR_SET_MM_AUXV: {
1951		unsigned long user_auxv[AT_VECTOR_SIZE];
1952
1953		if (arg4 > sizeof(user_auxv))
1954			goto out;
1955		up_read(&mm->mmap_sem);
1956
1957		if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
1958			return -EFAULT;
1959
1960		/* Make sure the last entry is always AT_NULL */
1961		user_auxv[AT_VECTOR_SIZE - 2] = 0;
1962		user_auxv[AT_VECTOR_SIZE - 1] = 0;
1963
1964		BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1965
1966		task_lock(current);
1967		memcpy(mm->saved_auxv, user_auxv, arg4);
1968		task_unlock(current);
1969
1970		return 0;
1971	}
1972	default:
1973		goto out;
1974	}
1975
1976	error = 0;
1977out:
1978	up_read(&mm->mmap_sem);
1979	return error;
1980}
1981
 
1982static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1983{
1984	return put_user(me->clear_child_tid, tid_addr);
1985}
1986
1987#else /* CONFIG_CHECKPOINT_RESTORE */
1988static int prctl_set_mm(int opt, unsigned long addr,
1989			unsigned long arg4, unsigned long arg5)
1990{
1991	return -EINVAL;
1992}
1993static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1994{
1995	return -EINVAL;
1996}
1997#endif
1998
1999SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2000		unsigned long, arg4, unsigned long, arg5)
2001{
2002	struct task_struct *me = current;
2003	unsigned char comm[sizeof(me->comm)];
2004	long error;
2005
2006	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2007	if (error != -ENOSYS)
2008		return error;
2009
2010	error = 0;
2011	switch (option) {
2012		case PR_SET_PDEATHSIG:
2013			if (!valid_signal(arg2)) {
2014				error = -EINVAL;
2015				break;
2016			}
2017			me->pdeath_signal = arg2;
2018			error = 0;
2019			break;
2020		case PR_GET_PDEATHSIG:
2021			error = put_user(me->pdeath_signal, (int __user *)arg2);
2022			break;
2023		case PR_GET_DUMPABLE:
2024			error = get_dumpable(me->mm);
2025			break;
2026		case PR_SET_DUMPABLE:
2027			if (arg2 < 0 || arg2 > 1) {
2028				error = -EINVAL;
2029				break;
2030			}
2031			set_dumpable(me->mm, arg2);
2032			error = 0;
2033			break;
 
 
 
2034
2035		case PR_SET_UNALIGN:
2036			error = SET_UNALIGN_CTL(me, arg2);
2037			break;
2038		case PR_GET_UNALIGN:
2039			error = GET_UNALIGN_CTL(me, arg2);
2040			break;
2041		case PR_SET_FPEMU:
2042			error = SET_FPEMU_CTL(me, arg2);
2043			break;
2044		case PR_GET_FPEMU:
2045			error = GET_FPEMU_CTL(me, arg2);
2046			break;
2047		case PR_SET_FPEXC:
2048			error = SET_FPEXC_CTL(me, arg2);
2049			break;
2050		case PR_GET_FPEXC:
2051			error = GET_FPEXC_CTL(me, arg2);
2052			break;
2053		case PR_GET_TIMING:
2054			error = PR_TIMING_STATISTICAL;
2055			break;
2056		case PR_SET_TIMING:
2057			if (arg2 != PR_TIMING_STATISTICAL)
2058				error = -EINVAL;
2059			else
2060				error = 0;
2061			break;
2062
2063		case PR_SET_NAME:
2064			comm[sizeof(me->comm)-1] = 0;
2065			if (strncpy_from_user(comm, (char __user *)arg2,
2066					      sizeof(me->comm) - 1) < 0)
2067				return -EFAULT;
2068			set_task_comm(me, comm);
2069			proc_comm_connector(me);
2070			return 0;
2071		case PR_GET_NAME:
2072			get_task_comm(comm, me);
2073			if (copy_to_user((char __user *)arg2, comm,
2074					 sizeof(comm)))
2075				return -EFAULT;
2076			return 0;
2077		case PR_GET_ENDIAN:
2078			error = GET_ENDIAN(me, arg2);
2079			break;
2080		case PR_SET_ENDIAN:
2081			error = SET_ENDIAN(me, arg2);
2082			break;
2083
2084		case PR_GET_SECCOMP:
2085			error = prctl_get_seccomp();
2086			break;
2087		case PR_SET_SECCOMP:
2088			error = prctl_set_seccomp(arg2, (char __user *)arg3);
2089			break;
2090		case PR_GET_TSC:
2091			error = GET_TSC_CTL(arg2);
2092			break;
2093		case PR_SET_TSC:
2094			error = SET_TSC_CTL(arg2);
2095			break;
2096		case PR_TASK_PERF_EVENTS_DISABLE:
2097			error = perf_event_task_disable();
2098			break;
2099		case PR_TASK_PERF_EVENTS_ENABLE:
2100			error = perf_event_task_enable();
2101			break;
2102		case PR_GET_TIMERSLACK:
2103			error = current->timer_slack_ns;
2104			break;
2105		case PR_SET_TIMERSLACK:
2106			if (arg2 <= 0)
2107				current->timer_slack_ns =
2108					current->default_timer_slack_ns;
2109			else
2110				current->timer_slack_ns = arg2;
2111			error = 0;
 
 
 
 
 
 
 
 
2112			break;
2113		case PR_MCE_KILL:
2114			if (arg4 | arg5)
2115				return -EINVAL;
2116			switch (arg2) {
2117			case PR_MCE_KILL_CLEAR:
2118				if (arg3 != 0)
2119					return -EINVAL;
2120				current->flags &= ~PF_MCE_PROCESS;
2121				break;
2122			case PR_MCE_KILL_SET:
2123				current->flags |= PF_MCE_PROCESS;
2124				if (arg3 == PR_MCE_KILL_EARLY)
2125					current->flags |= PF_MCE_EARLY;
2126				else if (arg3 == PR_MCE_KILL_LATE)
2127					current->flags &= ~PF_MCE_EARLY;
2128				else if (arg3 == PR_MCE_KILL_DEFAULT)
2129					current->flags &=
2130						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2131				else
2132					return -EINVAL;
2133				break;
2134			default:
2135				return -EINVAL;
2136			}
2137			error = 0;
2138			break;
2139		case PR_MCE_KILL_GET:
2140			if (arg2 | arg3 | arg4 | arg5)
2141				return -EINVAL;
2142			if (current->flags & PF_MCE_PROCESS)
2143				error = (current->flags & PF_MCE_EARLY) ?
2144					PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2145			else
2146				error = PR_MCE_KILL_DEFAULT;
2147			break;
2148		case PR_SET_MM:
2149			error = prctl_set_mm(arg2, arg3, arg4, arg5);
2150			break;
2151		case PR_GET_TID_ADDRESS:
2152			error = prctl_get_tid_address(me, (int __user **)arg2);
2153			break;
2154		case PR_SET_CHILD_SUBREAPER:
2155			me->signal->is_child_subreaper = !!arg2;
2156			error = 0;
2157			break;
2158		case PR_GET_CHILD_SUBREAPER:
2159			error = put_user(me->signal->is_child_subreaper,
2160					 (int __user *) arg2);
2161			break;
2162		case PR_SET_NO_NEW_PRIVS:
2163			if (arg2 != 1 || arg3 || arg4 || arg5)
2164				return -EINVAL;
2165
2166			current->no_new_privs = 1;
2167			break;
2168		case PR_GET_NO_NEW_PRIVS:
2169			if (arg2 || arg3 || arg4 || arg5)
2170				return -EINVAL;
2171			return current->no_new_privs ? 1 : 0;
2172		default:
2173			error = -EINVAL;
2174			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2175	}
2176	return error;
2177}
2178
2179SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2180		struct getcpu_cache __user *, unused)
2181{
2182	int err = 0;
2183	int cpu = raw_smp_processor_id();
 
2184	if (cpup)
2185		err |= put_user(cpu, cpup);
2186	if (nodep)
2187		err |= put_user(cpu_to_node(cpu), nodep);
2188	return err ? -EFAULT : 0;
2189}
2190
2191char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
2192
2193static void argv_cleanup(struct subprocess_info *info)
2194{
2195	argv_free(info->argv);
2196}
2197
2198/**
2199 * orderly_poweroff - Trigger an orderly system poweroff
2200 * @force: force poweroff if command execution fails
2201 *
2202 * This may be called from any context to trigger a system shutdown.
2203 * If the orderly shutdown fails, it will force an immediate shutdown.
2204 */
2205int orderly_poweroff(bool force)
2206{
2207	int argc;
2208	char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
2209	static char *envp[] = {
2210		"HOME=/",
2211		"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
2212		NULL
2213	};
2214	int ret = -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2215
2216	if (argv == NULL) {
2217		printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
2218		       __func__, poweroff_cmd);
2219		goto out;
 
 
 
 
 
 
 
 
 
2220	}
2221
2222	ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT,
2223				      NULL, argv_cleanup, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224out:
2225	if (likely(!ret))
2226		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227
2228	if (ret == -ENOMEM)
2229		argv_free(argv);
 
 
2230
2231	if (force) {
2232		printk(KERN_WARNING "Failed to start orderly shutdown: "
2233		       "forcing the issue\n");
2234
2235		/* I guess this should try to kick off some daemon to
2236		   sync and poweroff asap.  Or not even bother syncing
2237		   if we're doing an emergency shutdown? */
2238		emergency_sync();
2239		kernel_power_off();
2240	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2241
2242	return ret;
2243}
2244EXPORT_SYMBOL_GPL(orderly_poweroff);