Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 *  linux/kernel/sys.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7#include <linux/export.h>
   8#include <linux/mm.h>
   9#include <linux/utsname.h>
  10#include <linux/mman.h>
  11#include <linux/reboot.h>
  12#include <linux/prctl.h>
  13#include <linux/highuid.h>
  14#include <linux/fs.h>
  15#include <linux/kmod.h>
  16#include <linux/perf_event.h>
  17#include <linux/resource.h>
  18#include <linux/kernel.h>
  19#include <linux/workqueue.h>
  20#include <linux/capability.h>
  21#include <linux/device.h>
  22#include <linux/key.h>
  23#include <linux/times.h>
  24#include <linux/posix-timers.h>
  25#include <linux/security.h>
  26#include <linux/dcookies.h>
  27#include <linux/suspend.h>
  28#include <linux/tty.h>
  29#include <linux/signal.h>
  30#include <linux/cn_proc.h>
  31#include <linux/getcpu.h>
  32#include <linux/task_io_accounting_ops.h>
  33#include <linux/seccomp.h>
  34#include <linux/cpu.h>
  35#include <linux/personality.h>
  36#include <linux/ptrace.h>
  37#include <linux/fs_struct.h>
  38#include <linux/file.h>
  39#include <linux/mount.h>
  40#include <linux/gfp.h>
  41#include <linux/syscore_ops.h>
  42#include <linux/version.h>
  43#include <linux/ctype.h>
  44
  45#include <linux/compat.h>
  46#include <linux/syscalls.h>
  47#include <linux/kprobes.h>
  48#include <linux/user_namespace.h>
 
  49#include <linux/binfmts.h>
  50
  51#include <linux/sched.h>
 
 
 
 
 
 
 
  52#include <linux/rcupdate.h>
  53#include <linux/uidgid.h>
  54#include <linux/cred.h>
  55
 
 
  56#include <linux/kmsg_dump.h>
  57/* Move somewhere else to avoid recompiling? */
  58#include <generated/utsrelease.h>
  59
  60#include <linux/uaccess.h>
  61#include <asm/io.h>
  62#include <asm/unistd.h>
  63
 
 
  64#ifndef SET_UNALIGN_CTL
  65# define SET_UNALIGN_CTL(a, b)	(-EINVAL)
  66#endif
  67#ifndef GET_UNALIGN_CTL
  68# define GET_UNALIGN_CTL(a, b)	(-EINVAL)
  69#endif
  70#ifndef SET_FPEMU_CTL
  71# define SET_FPEMU_CTL(a, b)	(-EINVAL)
  72#endif
  73#ifndef GET_FPEMU_CTL
  74# define GET_FPEMU_CTL(a, b)	(-EINVAL)
  75#endif
  76#ifndef SET_FPEXC_CTL
  77# define SET_FPEXC_CTL(a, b)	(-EINVAL)
  78#endif
  79#ifndef GET_FPEXC_CTL
  80# define GET_FPEXC_CTL(a, b)	(-EINVAL)
  81#endif
  82#ifndef GET_ENDIAN
  83# define GET_ENDIAN(a, b)	(-EINVAL)
  84#endif
  85#ifndef SET_ENDIAN
  86# define SET_ENDIAN(a, b)	(-EINVAL)
  87#endif
  88#ifndef GET_TSC_CTL
  89# define GET_TSC_CTL(a)		(-EINVAL)
  90#endif
  91#ifndef SET_TSC_CTL
  92# define SET_TSC_CTL(a)		(-EINVAL)
  93#endif
  94#ifndef MPX_ENABLE_MANAGEMENT
  95# define MPX_ENABLE_MANAGEMENT()	(-EINVAL)
  96#endif
  97#ifndef MPX_DISABLE_MANAGEMENT
  98# define MPX_DISABLE_MANAGEMENT()	(-EINVAL)
  99#endif
 100#ifndef GET_FP_MODE
 101# define GET_FP_MODE(a)		(-EINVAL)
 102#endif
 103#ifndef SET_FP_MODE
 104# define SET_FP_MODE(a,b)	(-EINVAL)
 105#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106
 107/*
 108 * this is where the system-wide overflow UID and GID are defined, for
 109 * architectures that now have 32-bit UID/GID but didn't in the past
 110 */
 111
 112int overflowuid = DEFAULT_OVERFLOWUID;
 113int overflowgid = DEFAULT_OVERFLOWGID;
 114
 115EXPORT_SYMBOL(overflowuid);
 116EXPORT_SYMBOL(overflowgid);
 117
 118/*
 119 * the same as above, but for filesystems which can only store a 16-bit
 120 * UID and GID. as such, this is needed on all architectures
 121 */
 122
 123int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
 124int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
 125
 126EXPORT_SYMBOL(fs_overflowuid);
 127EXPORT_SYMBOL(fs_overflowgid);
 128
 129/*
 130 * Returns true if current's euid is same as p's uid or euid,
 131 * or has CAP_SYS_NICE to p's user_ns.
 132 *
 133 * Called with rcu_read_lock, creds are safe
 134 */
 135static bool set_one_prio_perm(struct task_struct *p)
 136{
 137	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
 138
 139	if (uid_eq(pcred->uid,  cred->euid) ||
 140	    uid_eq(pcred->euid, cred->euid))
 141		return true;
 142	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
 143		return true;
 144	return false;
 145}
 146
 147/*
 148 * set the priority of a task
 149 * - the caller must hold the RCU read lock
 150 */
 151static int set_one_prio(struct task_struct *p, int niceval, int error)
 152{
 153	int no_nice;
 154
 155	if (!set_one_prio_perm(p)) {
 156		error = -EPERM;
 157		goto out;
 158	}
 159	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
 160		error = -EACCES;
 161		goto out;
 162	}
 163	no_nice = security_task_setnice(p, niceval);
 164	if (no_nice) {
 165		error = no_nice;
 166		goto out;
 167	}
 168	if (error == -ESRCH)
 169		error = 0;
 170	set_user_nice(p, niceval);
 171out:
 172	return error;
 173}
 174
 175SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 176{
 177	struct task_struct *g, *p;
 178	struct user_struct *user;
 179	const struct cred *cred = current_cred();
 180	int error = -EINVAL;
 181	struct pid *pgrp;
 182	kuid_t uid;
 183
 184	if (which > PRIO_USER || which < PRIO_PROCESS)
 185		goto out;
 186
 187	/* normalize: avoid signed division (rounding problems) */
 188	error = -ESRCH;
 189	if (niceval < MIN_NICE)
 190		niceval = MIN_NICE;
 191	if (niceval > MAX_NICE)
 192		niceval = MAX_NICE;
 193
 194	rcu_read_lock();
 195	read_lock(&tasklist_lock);
 196	switch (which) {
 197	case PRIO_PROCESS:
 198		if (who)
 199			p = find_task_by_vpid(who);
 200		else
 201			p = current;
 202		if (p)
 203			error = set_one_prio(p, niceval, error);
 204		break;
 205	case PRIO_PGRP:
 206		if (who)
 207			pgrp = find_vpid(who);
 208		else
 209			pgrp = task_pgrp(current);
 210		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 211			error = set_one_prio(p, niceval, error);
 212		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 213		break;
 214	case PRIO_USER:
 215		uid = make_kuid(cred->user_ns, who);
 216		user = cred->user;
 217		if (!who)
 218			uid = cred->uid;
 219		else if (!uid_eq(uid, cred->uid)) {
 220			user = find_user(uid);
 221			if (!user)
 222				goto out_unlock;	/* No processes for this user */
 223		}
 224		do_each_thread(g, p) {
 225			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
 226				error = set_one_prio(p, niceval, error);
 227		} while_each_thread(g, p);
 228		if (!uid_eq(uid, cred->uid))
 229			free_uid(user);		/* For find_user() */
 230		break;
 231	}
 232out_unlock:
 233	read_unlock(&tasklist_lock);
 234	rcu_read_unlock();
 235out:
 236	return error;
 237}
 238
 239/*
 240 * Ugh. To avoid negative return values, "getpriority()" will
 241 * not return the normal nice-value, but a negated value that
 242 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
 243 * to stay compatible.
 244 */
 245SYSCALL_DEFINE2(getpriority, int, which, int, who)
 246{
 247	struct task_struct *g, *p;
 248	struct user_struct *user;
 249	const struct cred *cred = current_cred();
 250	long niceval, retval = -ESRCH;
 251	struct pid *pgrp;
 252	kuid_t uid;
 253
 254	if (which > PRIO_USER || which < PRIO_PROCESS)
 255		return -EINVAL;
 256
 257	rcu_read_lock();
 258	read_lock(&tasklist_lock);
 259	switch (which) {
 260	case PRIO_PROCESS:
 261		if (who)
 262			p = find_task_by_vpid(who);
 263		else
 264			p = current;
 265		if (p) {
 266			niceval = nice_to_rlimit(task_nice(p));
 267			if (niceval > retval)
 268				retval = niceval;
 269		}
 270		break;
 271	case PRIO_PGRP:
 272		if (who)
 273			pgrp = find_vpid(who);
 274		else
 275			pgrp = task_pgrp(current);
 276		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 277			niceval = nice_to_rlimit(task_nice(p));
 278			if (niceval > retval)
 279				retval = niceval;
 280		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 281		break;
 282	case PRIO_USER:
 283		uid = make_kuid(cred->user_ns, who);
 284		user = cred->user;
 285		if (!who)
 286			uid = cred->uid;
 287		else if (!uid_eq(uid, cred->uid)) {
 288			user = find_user(uid);
 289			if (!user)
 290				goto out_unlock;	/* No processes for this user */
 291		}
 292		do_each_thread(g, p) {
 293			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
 294				niceval = nice_to_rlimit(task_nice(p));
 295				if (niceval > retval)
 296					retval = niceval;
 297			}
 298		} while_each_thread(g, p);
 299		if (!uid_eq(uid, cred->uid))
 300			free_uid(user);		/* for find_user() */
 301		break;
 302	}
 303out_unlock:
 304	read_unlock(&tasklist_lock);
 305	rcu_read_unlock();
 306
 307	return retval;
 308}
 309
 310/*
 311 * Unprivileged users may change the real gid to the effective gid
 312 * or vice versa.  (BSD-style)
 313 *
 314 * If you set the real gid at all, or set the effective gid to a value not
 315 * equal to the real gid, then the saved gid is set to the new effective gid.
 316 *
 317 * This makes it possible for a setgid program to completely drop its
 318 * privileges, which is often a useful assertion to make when you are doing
 319 * a security audit over a program.
 320 *
 321 * The general idea is that a program which uses just setregid() will be
 322 * 100% compatible with BSD.  A program which uses just setgid() will be
 323 * 100% compatible with POSIX with saved IDs.
 324 *
 325 * SMP: There are not races, the GIDs are checked only by filesystem
 326 *      operations (as far as semantic preservation is concerned).
 327 */
 328#ifdef CONFIG_MULTIUSER
 329SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
 330{
 331	struct user_namespace *ns = current_user_ns();
 332	const struct cred *old;
 333	struct cred *new;
 334	int retval;
 335	kgid_t krgid, kegid;
 336
 337	krgid = make_kgid(ns, rgid);
 338	kegid = make_kgid(ns, egid);
 339
 340	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 341		return -EINVAL;
 342	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 343		return -EINVAL;
 344
 345	new = prepare_creds();
 346	if (!new)
 347		return -ENOMEM;
 348	old = current_cred();
 349
 350	retval = -EPERM;
 351	if (rgid != (gid_t) -1) {
 352		if (gid_eq(old->gid, krgid) ||
 353		    gid_eq(old->egid, krgid) ||
 354		    ns_capable(old->user_ns, CAP_SETGID))
 355			new->gid = krgid;
 356		else
 357			goto error;
 358	}
 359	if (egid != (gid_t) -1) {
 360		if (gid_eq(old->gid, kegid) ||
 361		    gid_eq(old->egid, kegid) ||
 362		    gid_eq(old->sgid, kegid) ||
 363		    ns_capable(old->user_ns, CAP_SETGID))
 364			new->egid = kegid;
 365		else
 366			goto error;
 367	}
 368
 369	if (rgid != (gid_t) -1 ||
 370	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
 371		new->sgid = new->egid;
 372	new->fsgid = new->egid;
 373
 
 
 
 
 374	return commit_creds(new);
 375
 376error:
 377	abort_creds(new);
 378	return retval;
 379}
 380
 
 
 
 
 
 381/*
 382 * setgid() is implemented like SysV w/ SAVED_IDS
 383 *
 384 * SMP: Same implicit races as above.
 385 */
 386SYSCALL_DEFINE1(setgid, gid_t, gid)
 387{
 388	struct user_namespace *ns = current_user_ns();
 389	const struct cred *old;
 390	struct cred *new;
 391	int retval;
 392	kgid_t kgid;
 393
 394	kgid = make_kgid(ns, gid);
 395	if (!gid_valid(kgid))
 396		return -EINVAL;
 397
 398	new = prepare_creds();
 399	if (!new)
 400		return -ENOMEM;
 401	old = current_cred();
 402
 403	retval = -EPERM;
 404	if (ns_capable(old->user_ns, CAP_SETGID))
 405		new->gid = new->egid = new->sgid = new->fsgid = kgid;
 406	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
 407		new->egid = new->fsgid = kgid;
 408	else
 409		goto error;
 410
 
 
 
 
 411	return commit_creds(new);
 412
 413error:
 414	abort_creds(new);
 415	return retval;
 416}
 417
 
 
 
 
 
 418/*
 419 * change the user struct in a credentials set to match the new UID
 420 */
 421static int set_user(struct cred *new)
 422{
 423	struct user_struct *new_user;
 424
 425	new_user = alloc_uid(new->uid);
 426	if (!new_user)
 427		return -EAGAIN;
 428
 429	/*
 430	 * We don't fail in case of NPROC limit excess here because too many
 431	 * poorly written programs don't check set*uid() return code, assuming
 432	 * it never fails if called by root.  We may still enforce NPROC limit
 433	 * for programs doing set*uid()+execve() by harmlessly deferring the
 434	 * failure to the execve() stage.
 435	 */
 436	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
 437			new_user != INIT_USER)
 438		current->flags |= PF_NPROC_EXCEEDED;
 439	else
 440		current->flags &= ~PF_NPROC_EXCEEDED;
 441
 442	free_uid(new->user);
 443	new->user = new_user;
 444	return 0;
 445}
 446
 447/*
 448 * Unprivileged users may change the real uid to the effective uid
 449 * or vice versa.  (BSD-style)
 450 *
 451 * If you set the real uid at all, or set the effective uid to a value not
 452 * equal to the real uid, then the saved uid is set to the new effective uid.
 453 *
 454 * This makes it possible for a setuid program to completely drop its
 455 * privileges, which is often a useful assertion to make when you are doing
 456 * a security audit over a program.
 457 *
 458 * The general idea is that a program which uses just setreuid() will be
 459 * 100% compatible with BSD.  A program which uses just setuid() will be
 460 * 100% compatible with POSIX with saved IDs.
 461 */
 462SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 463{
 464	struct user_namespace *ns = current_user_ns();
 465	const struct cred *old;
 466	struct cred *new;
 467	int retval;
 468	kuid_t kruid, keuid;
 469
 470	kruid = make_kuid(ns, ruid);
 471	keuid = make_kuid(ns, euid);
 472
 473	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 474		return -EINVAL;
 475	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 476		return -EINVAL;
 477
 478	new = prepare_creds();
 479	if (!new)
 480		return -ENOMEM;
 481	old = current_cred();
 482
 483	retval = -EPERM;
 484	if (ruid != (uid_t) -1) {
 485		new->uid = kruid;
 486		if (!uid_eq(old->uid, kruid) &&
 487		    !uid_eq(old->euid, kruid) &&
 488		    !ns_capable(old->user_ns, CAP_SETUID))
 489			goto error;
 490	}
 491
 492	if (euid != (uid_t) -1) {
 493		new->euid = keuid;
 494		if (!uid_eq(old->uid, keuid) &&
 495		    !uid_eq(old->euid, keuid) &&
 496		    !uid_eq(old->suid, keuid) &&
 497		    !ns_capable(old->user_ns, CAP_SETUID))
 498			goto error;
 499	}
 500
 501	if (!uid_eq(new->uid, old->uid)) {
 502		retval = set_user(new);
 503		if (retval < 0)
 504			goto error;
 505	}
 506	if (ruid != (uid_t) -1 ||
 507	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
 508		new->suid = new->euid;
 509	new->fsuid = new->euid;
 510
 511	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
 512	if (retval < 0)
 513		goto error;
 514
 515	return commit_creds(new);
 516
 517error:
 518	abort_creds(new);
 519	return retval;
 520}
 521
 
 
 
 
 
 522/*
 523 * setuid() is implemented like SysV with SAVED_IDS
 524 *
 525 * Note that SAVED_ID's is deficient in that a setuid root program
 526 * like sendmail, for example, cannot set its uid to be a normal
 527 * user and then switch back, because if you're root, setuid() sets
 528 * the saved uid too.  If you don't like this, blame the bright people
 529 * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
 530 * will allow a root program to temporarily drop privileges and be able to
 531 * regain them by swapping the real and effective uid.
 532 */
 533SYSCALL_DEFINE1(setuid, uid_t, uid)
 534{
 535	struct user_namespace *ns = current_user_ns();
 536	const struct cred *old;
 537	struct cred *new;
 538	int retval;
 539	kuid_t kuid;
 540
 541	kuid = make_kuid(ns, uid);
 542	if (!uid_valid(kuid))
 543		return -EINVAL;
 544
 545	new = prepare_creds();
 546	if (!new)
 547		return -ENOMEM;
 548	old = current_cred();
 549
 550	retval = -EPERM;
 551	if (ns_capable(old->user_ns, CAP_SETUID)) {
 552		new->suid = new->uid = kuid;
 553		if (!uid_eq(kuid, old->uid)) {
 554			retval = set_user(new);
 555			if (retval < 0)
 556				goto error;
 557		}
 558	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
 559		goto error;
 560	}
 561
 562	new->fsuid = new->euid = kuid;
 563
 564	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
 565	if (retval < 0)
 566		goto error;
 567
 568	return commit_creds(new);
 569
 570error:
 571	abort_creds(new);
 572	return retval;
 573}
 574
 
 
 
 
 
 575
 576/*
 577 * This function implements a generic ability to update ruid, euid,
 578 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 579 */
 580SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 581{
 582	struct user_namespace *ns = current_user_ns();
 583	const struct cred *old;
 584	struct cred *new;
 585	int retval;
 586	kuid_t kruid, keuid, ksuid;
 587
 588	kruid = make_kuid(ns, ruid);
 589	keuid = make_kuid(ns, euid);
 590	ksuid = make_kuid(ns, suid);
 591
 592	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 593		return -EINVAL;
 594
 595	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 596		return -EINVAL;
 597
 598	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
 599		return -EINVAL;
 600
 601	new = prepare_creds();
 602	if (!new)
 603		return -ENOMEM;
 604
 605	old = current_cred();
 606
 607	retval = -EPERM;
 608	if (!ns_capable(old->user_ns, CAP_SETUID)) {
 609		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
 610		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
 611			goto error;
 612		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
 613		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
 614			goto error;
 615		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
 616		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
 617			goto error;
 618	}
 619
 620	if (ruid != (uid_t) -1) {
 621		new->uid = kruid;
 622		if (!uid_eq(kruid, old->uid)) {
 623			retval = set_user(new);
 624			if (retval < 0)
 625				goto error;
 626		}
 627	}
 628	if (euid != (uid_t) -1)
 629		new->euid = keuid;
 630	if (suid != (uid_t) -1)
 631		new->suid = ksuid;
 632	new->fsuid = new->euid;
 633
 634	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
 635	if (retval < 0)
 636		goto error;
 637
 638	return commit_creds(new);
 639
 640error:
 641	abort_creds(new);
 642	return retval;
 643}
 644
 
 
 
 
 
 645SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
 646{
 647	const struct cred *cred = current_cred();
 648	int retval;
 649	uid_t ruid, euid, suid;
 650
 651	ruid = from_kuid_munged(cred->user_ns, cred->uid);
 652	euid = from_kuid_munged(cred->user_ns, cred->euid);
 653	suid = from_kuid_munged(cred->user_ns, cred->suid);
 654
 655	retval = put_user(ruid, ruidp);
 656	if (!retval) {
 657		retval = put_user(euid, euidp);
 658		if (!retval)
 659			return put_user(suid, suidp);
 660	}
 661	return retval;
 662}
 663
 664/*
 665 * Same as above, but for rgid, egid, sgid.
 666 */
 667SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
 668{
 669	struct user_namespace *ns = current_user_ns();
 670	const struct cred *old;
 671	struct cred *new;
 672	int retval;
 673	kgid_t krgid, kegid, ksgid;
 674
 675	krgid = make_kgid(ns, rgid);
 676	kegid = make_kgid(ns, egid);
 677	ksgid = make_kgid(ns, sgid);
 678
 679	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 680		return -EINVAL;
 681	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 682		return -EINVAL;
 683	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
 684		return -EINVAL;
 685
 686	new = prepare_creds();
 687	if (!new)
 688		return -ENOMEM;
 689	old = current_cred();
 690
 691	retval = -EPERM;
 692	if (!ns_capable(old->user_ns, CAP_SETGID)) {
 693		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
 694		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
 695			goto error;
 696		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
 697		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
 698			goto error;
 699		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
 700		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
 701			goto error;
 702	}
 703
 704	if (rgid != (gid_t) -1)
 705		new->gid = krgid;
 706	if (egid != (gid_t) -1)
 707		new->egid = kegid;
 708	if (sgid != (gid_t) -1)
 709		new->sgid = ksgid;
 710	new->fsgid = new->egid;
 711
 
 
 
 
 712	return commit_creds(new);
 713
 714error:
 715	abort_creds(new);
 716	return retval;
 717}
 718
 
 
 
 
 
 719SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
 720{
 721	const struct cred *cred = current_cred();
 722	int retval;
 723	gid_t rgid, egid, sgid;
 724
 725	rgid = from_kgid_munged(cred->user_ns, cred->gid);
 726	egid = from_kgid_munged(cred->user_ns, cred->egid);
 727	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
 728
 729	retval = put_user(rgid, rgidp);
 730	if (!retval) {
 731		retval = put_user(egid, egidp);
 732		if (!retval)
 733			retval = put_user(sgid, sgidp);
 734	}
 735
 736	return retval;
 737}
 738
 739
 740/*
 741 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
 742 * is used for "access()" and for the NFS daemon (letting nfsd stay at
 743 * whatever uid it wants to). It normally shadows "euid", except when
 744 * explicitly set by setfsuid() or for access..
 745 */
 746SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 747{
 748	const struct cred *old;
 749	struct cred *new;
 750	uid_t old_fsuid;
 751	kuid_t kuid;
 752
 753	old = current_cred();
 754	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
 755
 756	kuid = make_kuid(old->user_ns, uid);
 757	if (!uid_valid(kuid))
 758		return old_fsuid;
 759
 760	new = prepare_creds();
 761	if (!new)
 762		return old_fsuid;
 763
 764	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
 765	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
 766	    ns_capable(old->user_ns, CAP_SETUID)) {
 767		if (!uid_eq(kuid, old->fsuid)) {
 768			new->fsuid = kuid;
 769			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
 770				goto change_okay;
 771		}
 772	}
 773
 774	abort_creds(new);
 775	return old_fsuid;
 776
 777change_okay:
 778	commit_creds(new);
 779	return old_fsuid;
 780}
 781
 
 
 
 
 
 782/*
 783 * Samma på svenska..
 784 */
 785SYSCALL_DEFINE1(setfsgid, gid_t, gid)
 786{
 787	const struct cred *old;
 788	struct cred *new;
 789	gid_t old_fsgid;
 790	kgid_t kgid;
 791
 792	old = current_cred();
 793	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
 794
 795	kgid = make_kgid(old->user_ns, gid);
 796	if (!gid_valid(kgid))
 797		return old_fsgid;
 798
 799	new = prepare_creds();
 800	if (!new)
 801		return old_fsgid;
 802
 803	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
 804	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
 805	    ns_capable(old->user_ns, CAP_SETGID)) {
 806		if (!gid_eq(kgid, old->fsgid)) {
 807			new->fsgid = kgid;
 808			goto change_okay;
 
 809		}
 810	}
 811
 812	abort_creds(new);
 813	return old_fsgid;
 814
 815change_okay:
 816	commit_creds(new);
 817	return old_fsgid;
 818}
 
 
 
 
 
 819#endif /* CONFIG_MULTIUSER */
 820
 821/**
 822 * sys_getpid - return the thread group id of the current process
 823 *
 824 * Note, despite the name, this returns the tgid not the pid.  The tgid and
 825 * the pid are identical unless CLONE_THREAD was specified on clone() in
 826 * which case the tgid is the same in all threads of the same group.
 827 *
 828 * This is SMP safe as current->tgid does not change.
 829 */
 830SYSCALL_DEFINE0(getpid)
 831{
 832	return task_tgid_vnr(current);
 833}
 834
 835/* Thread ID - the internal kernel "pid" */
 836SYSCALL_DEFINE0(gettid)
 837{
 838	return task_pid_vnr(current);
 839}
 840
 841/*
 842 * Accessing ->real_parent is not SMP-safe, it could
 843 * change from under us. However, we can use a stale
 844 * value of ->real_parent under rcu_read_lock(), see
 845 * release_task()->call_rcu(delayed_put_task_struct).
 846 */
 847SYSCALL_DEFINE0(getppid)
 848{
 849	int pid;
 850
 851	rcu_read_lock();
 852	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
 853	rcu_read_unlock();
 854
 855	return pid;
 856}
 857
 858SYSCALL_DEFINE0(getuid)
 859{
 860	/* Only we change this so SMP safe */
 861	return from_kuid_munged(current_user_ns(), current_uid());
 862}
 863
 864SYSCALL_DEFINE0(geteuid)
 865{
 866	/* Only we change this so SMP safe */
 867	return from_kuid_munged(current_user_ns(), current_euid());
 868}
 869
 870SYSCALL_DEFINE0(getgid)
 871{
 872	/* Only we change this so SMP safe */
 873	return from_kgid_munged(current_user_ns(), current_gid());
 874}
 875
 876SYSCALL_DEFINE0(getegid)
 877{
 878	/* Only we change this so SMP safe */
 879	return from_kgid_munged(current_user_ns(), current_egid());
 880}
 881
 882void do_sys_times(struct tms *tms)
 883{
 884	cputime_t tgutime, tgstime, cutime, cstime;
 885
 886	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
 887	cutime = current->signal->cutime;
 888	cstime = current->signal->cstime;
 889	tms->tms_utime = cputime_to_clock_t(tgutime);
 890	tms->tms_stime = cputime_to_clock_t(tgstime);
 891	tms->tms_cutime = cputime_to_clock_t(cutime);
 892	tms->tms_cstime = cputime_to_clock_t(cstime);
 893}
 894
 895SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
 896{
 897	if (tbuf) {
 898		struct tms tmp;
 899
 900		do_sys_times(&tmp);
 901		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
 902			return -EFAULT;
 903	}
 904	force_successful_syscall_return();
 905	return (long) jiffies_64_to_clock_t(get_jiffies_64());
 906}
 907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908/*
 909 * This needs some heavy checking ...
 910 * I just haven't the stomach for it. I also don't fully
 911 * understand sessions/pgrp etc. Let somebody who does explain it.
 912 *
 913 * OK, I think I have the protection semantics right.... this is really
 914 * only important on a multi-user system anyway, to make sure one user
 915 * can't send a signal to a process owned by another.  -TYT, 12/12/91
 916 *
 917 * !PF_FORKNOEXEC check to conform completely to POSIX.
 918 */
 919SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 920{
 921	struct task_struct *p;
 922	struct task_struct *group_leader = current->group_leader;
 923	struct pid *pgrp;
 924	int err;
 925
 926	if (!pid)
 927		pid = task_pid_vnr(group_leader);
 928	if (!pgid)
 929		pgid = pid;
 930	if (pgid < 0)
 931		return -EINVAL;
 932	rcu_read_lock();
 933
 934	/* From this point forward we keep holding onto the tasklist lock
 935	 * so that our parent does not change from under us. -DaveM
 936	 */
 937	write_lock_irq(&tasklist_lock);
 938
 939	err = -ESRCH;
 940	p = find_task_by_vpid(pid);
 941	if (!p)
 942		goto out;
 943
 944	err = -EINVAL;
 945	if (!thread_group_leader(p))
 946		goto out;
 947
 948	if (same_thread_group(p->real_parent, group_leader)) {
 949		err = -EPERM;
 950		if (task_session(p) != task_session(group_leader))
 951			goto out;
 952		err = -EACCES;
 953		if (!(p->flags & PF_FORKNOEXEC))
 954			goto out;
 955	} else {
 956		err = -ESRCH;
 957		if (p != group_leader)
 958			goto out;
 959	}
 960
 961	err = -EPERM;
 962	if (p->signal->leader)
 963		goto out;
 964
 965	pgrp = task_pid(p);
 966	if (pgid != pid) {
 967		struct task_struct *g;
 968
 969		pgrp = find_vpid(pgid);
 970		g = pid_task(pgrp, PIDTYPE_PGID);
 971		if (!g || task_session(g) != task_session(group_leader))
 972			goto out;
 973	}
 974
 975	err = security_task_setpgid(p, pgid);
 976	if (err)
 977		goto out;
 978
 979	if (task_pgrp(p) != pgrp)
 980		change_pid(p, PIDTYPE_PGID, pgrp);
 981
 982	err = 0;
 983out:
 984	/* All paths lead to here, thus we are safe. -DaveM */
 985	write_unlock_irq(&tasklist_lock);
 986	rcu_read_unlock();
 987	return err;
 988}
 989
 990SYSCALL_DEFINE1(getpgid, pid_t, pid)
 991{
 992	struct task_struct *p;
 993	struct pid *grp;
 994	int retval;
 995
 996	rcu_read_lock();
 997	if (!pid)
 998		grp = task_pgrp(current);
 999	else {
1000		retval = -ESRCH;
1001		p = find_task_by_vpid(pid);
1002		if (!p)
1003			goto out;
1004		grp = task_pgrp(p);
1005		if (!grp)
1006			goto out;
1007
1008		retval = security_task_getpgid(p);
1009		if (retval)
1010			goto out;
1011	}
1012	retval = pid_vnr(grp);
1013out:
1014	rcu_read_unlock();
1015	return retval;
1016}
1017
 
 
 
 
 
1018#ifdef __ARCH_WANT_SYS_GETPGRP
1019
1020SYSCALL_DEFINE0(getpgrp)
1021{
1022	return sys_getpgid(0);
1023}
1024
1025#endif
1026
1027SYSCALL_DEFINE1(getsid, pid_t, pid)
1028{
1029	struct task_struct *p;
1030	struct pid *sid;
1031	int retval;
1032
1033	rcu_read_lock();
1034	if (!pid)
1035		sid = task_session(current);
1036	else {
1037		retval = -ESRCH;
1038		p = find_task_by_vpid(pid);
1039		if (!p)
1040			goto out;
1041		sid = task_session(p);
1042		if (!sid)
1043			goto out;
1044
1045		retval = security_task_getsid(p);
1046		if (retval)
1047			goto out;
1048	}
1049	retval = pid_vnr(sid);
1050out:
1051	rcu_read_unlock();
1052	return retval;
1053}
1054
1055static void set_special_pids(struct pid *pid)
1056{
1057	struct task_struct *curr = current->group_leader;
1058
1059	if (task_session(curr) != pid)
1060		change_pid(curr, PIDTYPE_SID, pid);
1061
1062	if (task_pgrp(curr) != pid)
1063		change_pid(curr, PIDTYPE_PGID, pid);
1064}
1065
1066SYSCALL_DEFINE0(setsid)
1067{
1068	struct task_struct *group_leader = current->group_leader;
1069	struct pid *sid = task_pid(group_leader);
1070	pid_t session = pid_vnr(sid);
1071	int err = -EPERM;
1072
1073	write_lock_irq(&tasklist_lock);
1074	/* Fail if I am already a session leader */
1075	if (group_leader->signal->leader)
1076		goto out;
1077
1078	/* Fail if a process group id already exists that equals the
1079	 * proposed session id.
1080	 */
1081	if (pid_task(sid, PIDTYPE_PGID))
1082		goto out;
1083
1084	group_leader->signal->leader = 1;
1085	set_special_pids(sid);
1086
1087	proc_clear_tty(group_leader);
1088
1089	err = session;
1090out:
1091	write_unlock_irq(&tasklist_lock);
1092	if (err > 0) {
1093		proc_sid_connector(group_leader);
1094		sched_autogroup_create_attach(group_leader);
1095	}
1096	return err;
1097}
1098
 
 
 
 
 
1099DECLARE_RWSEM(uts_sem);
1100
1101#ifdef COMPAT_UTS_MACHINE
1102#define override_architecture(name) \
1103	(personality(current->personality) == PER_LINUX32 && \
1104	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1105		      sizeof(COMPAT_UTS_MACHINE)))
1106#else
1107#define override_architecture(name)	0
1108#endif
1109
1110/*
1111 * Work around broken programs that cannot handle "Linux 3.0".
1112 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1113 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
 
1114 */
1115static int override_release(char __user *release, size_t len)
1116{
1117	int ret = 0;
1118
1119	if (current->personality & UNAME26) {
1120		const char *rest = UTS_RELEASE;
1121		char buf[65] = { 0 };
1122		int ndots = 0;
1123		unsigned v;
1124		size_t copy;
1125
1126		while (*rest) {
1127			if (*rest == '.' && ++ndots >= 3)
1128				break;
1129			if (!isdigit(*rest) && *rest != '.')
1130				break;
1131			rest++;
1132		}
1133		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1134		copy = clamp_t(size_t, len, 1, sizeof(buf));
1135		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1136		ret = copy_to_user(release, buf, copy + 1);
1137	}
1138	return ret;
1139}
1140
1141SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1142{
1143	int errno = 0;
1144
1145	down_read(&uts_sem);
1146	if (copy_to_user(name, utsname(), sizeof *name))
1147		errno = -EFAULT;
1148	up_read(&uts_sem);
 
 
1149
1150	if (!errno && override_release(name->release, sizeof(name->release)))
1151		errno = -EFAULT;
1152	if (!errno && override_architecture(name))
1153		errno = -EFAULT;
1154	return errno;
1155}
1156
1157#ifdef __ARCH_WANT_SYS_OLD_UNAME
1158/*
1159 * Old cruft
1160 */
1161SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1162{
1163	int error = 0;
1164
1165	if (!name)
1166		return -EFAULT;
1167
1168	down_read(&uts_sem);
1169	if (copy_to_user(name, utsname(), sizeof(*name)))
1170		error = -EFAULT;
1171	up_read(&uts_sem);
 
 
1172
1173	if (!error && override_release(name->release, sizeof(name->release)))
1174		error = -EFAULT;
1175	if (!error && override_architecture(name))
1176		error = -EFAULT;
1177	return error;
1178}
1179
1180SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1181{
1182	int error;
1183
1184	if (!name)
1185		return -EFAULT;
1186	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1187		return -EFAULT;
1188
1189	down_read(&uts_sem);
1190	error = __copy_to_user(&name->sysname, &utsname()->sysname,
1191			       __OLD_UTS_LEN);
1192	error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1193	error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1194				__OLD_UTS_LEN);
1195	error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1196	error |= __copy_to_user(&name->release, &utsname()->release,
1197				__OLD_UTS_LEN);
1198	error |= __put_user(0, name->release + __OLD_UTS_LEN);
1199	error |= __copy_to_user(&name->version, &utsname()->version,
1200				__OLD_UTS_LEN);
1201	error |= __put_user(0, name->version + __OLD_UTS_LEN);
1202	error |= __copy_to_user(&name->machine, &utsname()->machine,
1203				__OLD_UTS_LEN);
1204	error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1205	up_read(&uts_sem);
 
 
1206
1207	if (!error && override_architecture(name))
1208		error = -EFAULT;
1209	if (!error && override_release(name->release, sizeof(name->release)))
1210		error = -EFAULT;
1211	return error ? -EFAULT : 0;
1212}
1213#endif
1214
1215SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1216{
1217	int errno;
1218	char tmp[__NEW_UTS_LEN];
1219
1220	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1221		return -EPERM;
1222
1223	if (len < 0 || len > __NEW_UTS_LEN)
1224		return -EINVAL;
1225	down_write(&uts_sem);
1226	errno = -EFAULT;
1227	if (!copy_from_user(tmp, name, len)) {
1228		struct new_utsname *u = utsname();
1229
 
 
1230		memcpy(u->nodename, tmp, len);
1231		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1232		errno = 0;
1233		uts_proc_notify(UTS_PROC_HOSTNAME);
 
1234	}
1235	up_write(&uts_sem);
1236	return errno;
1237}
1238
1239#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1240
1241SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1242{
1243	int i, errno;
1244	struct new_utsname *u;
 
1245
1246	if (len < 0)
1247		return -EINVAL;
1248	down_read(&uts_sem);
1249	u = utsname();
1250	i = 1 + strlen(u->nodename);
1251	if (i > len)
1252		i = len;
1253	errno = 0;
1254	if (copy_to_user(name, u->nodename, i))
1255		errno = -EFAULT;
1256	up_read(&uts_sem);
1257	return errno;
 
 
1258}
1259
1260#endif
1261
1262/*
1263 * Only setdomainname; getdomainname can be implemented by calling
1264 * uname()
1265 */
1266SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1267{
1268	int errno;
1269	char tmp[__NEW_UTS_LEN];
1270
1271	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1272		return -EPERM;
1273	if (len < 0 || len > __NEW_UTS_LEN)
1274		return -EINVAL;
1275
1276	down_write(&uts_sem);
1277	errno = -EFAULT;
1278	if (!copy_from_user(tmp, name, len)) {
1279		struct new_utsname *u = utsname();
1280
 
 
1281		memcpy(u->domainname, tmp, len);
1282		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1283		errno = 0;
1284		uts_proc_notify(UTS_PROC_DOMAINNAME);
 
1285	}
1286	up_write(&uts_sem);
1287	return errno;
1288}
1289
1290SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1291{
1292	struct rlimit value;
1293	int ret;
1294
1295	ret = do_prlimit(current, resource, NULL, &value);
1296	if (!ret)
1297		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1298
1299	return ret;
1300}
1301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1302#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1303
1304/*
1305 *	Back compatibility for getrlimit. Needed for some apps.
1306 */
1307SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1308		struct rlimit __user *, rlim)
1309{
1310	struct rlimit x;
1311	if (resource >= RLIM_NLIMITS)
1312		return -EINVAL;
1313
 
1314	task_lock(current->group_leader);
1315	x = current->signal->rlim[resource];
1316	task_unlock(current->group_leader);
1317	if (x.rlim_cur > 0x7FFFFFFF)
1318		x.rlim_cur = 0x7FFFFFFF;
1319	if (x.rlim_max > 0x7FFFFFFF)
1320		x.rlim_max = 0x7FFFFFFF;
1321	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1322}
1323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324#endif
1325
1326static inline bool rlim64_is_infinity(__u64 rlim64)
1327{
1328#if BITS_PER_LONG < 64
1329	return rlim64 >= ULONG_MAX;
1330#else
1331	return rlim64 == RLIM64_INFINITY;
1332#endif
1333}
1334
1335static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1336{
1337	if (rlim->rlim_cur == RLIM_INFINITY)
1338		rlim64->rlim_cur = RLIM64_INFINITY;
1339	else
1340		rlim64->rlim_cur = rlim->rlim_cur;
1341	if (rlim->rlim_max == RLIM_INFINITY)
1342		rlim64->rlim_max = RLIM64_INFINITY;
1343	else
1344		rlim64->rlim_max = rlim->rlim_max;
1345}
1346
1347static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1348{
1349	if (rlim64_is_infinity(rlim64->rlim_cur))
1350		rlim->rlim_cur = RLIM_INFINITY;
1351	else
1352		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1353	if (rlim64_is_infinity(rlim64->rlim_max))
1354		rlim->rlim_max = RLIM_INFINITY;
1355	else
1356		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1357}
1358
1359/* make sure you are allowed to change @tsk limits before calling this */
1360int do_prlimit(struct task_struct *tsk, unsigned int resource,
1361		struct rlimit *new_rlim, struct rlimit *old_rlim)
1362{
1363	struct rlimit *rlim;
1364	int retval = 0;
1365
1366	if (resource >= RLIM_NLIMITS)
1367		return -EINVAL;
1368	if (new_rlim) {
1369		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1370			return -EINVAL;
1371		if (resource == RLIMIT_NOFILE &&
1372				new_rlim->rlim_max > sysctl_nr_open)
1373			return -EPERM;
1374	}
1375
1376	/* protect tsk->signal and tsk->sighand from disappearing */
1377	read_lock(&tasklist_lock);
1378	if (!tsk->sighand) {
1379		retval = -ESRCH;
1380		goto out;
1381	}
1382
1383	rlim = tsk->signal->rlim + resource;
1384	task_lock(tsk->group_leader);
1385	if (new_rlim) {
1386		/* Keep the capable check against init_user_ns until
1387		   cgroups can contain all limits */
1388		if (new_rlim->rlim_max > rlim->rlim_max &&
1389				!capable(CAP_SYS_RESOURCE))
1390			retval = -EPERM;
1391		if (!retval)
1392			retval = security_task_setrlimit(tsk->group_leader,
1393					resource, new_rlim);
1394		if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1395			/*
1396			 * The caller is asking for an immediate RLIMIT_CPU
1397			 * expiry.  But we use the zero value to mean "it was
1398			 * never set".  So let's cheat and make it one second
1399			 * instead
1400			 */
1401			new_rlim->rlim_cur = 1;
1402		}
1403	}
1404	if (!retval) {
1405		if (old_rlim)
1406			*old_rlim = *rlim;
1407		if (new_rlim)
1408			*rlim = *new_rlim;
1409	}
1410	task_unlock(tsk->group_leader);
1411
1412	/*
1413	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error
1414	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
1415	 * very long-standing error, and fixing it now risks breakage of
1416	 * applications, so we live with it
1417	 */
1418	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1419	     new_rlim->rlim_cur != RLIM_INFINITY &&
1420	     IS_ENABLED(CONFIG_POSIX_TIMERS))
1421		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1422out:
1423	read_unlock(&tasklist_lock);
1424	return retval;
1425}
1426
1427/* rcu lock must be held */
1428static int check_prlimit_permission(struct task_struct *task)
 
1429{
1430	const struct cred *cred = current_cred(), *tcred;
 
1431
1432	if (current == task)
1433		return 0;
1434
1435	tcred = __task_cred(task);
1436	if (uid_eq(cred->uid, tcred->euid) &&
1437	    uid_eq(cred->uid, tcred->suid) &&
1438	    uid_eq(cred->uid, tcred->uid)  &&
1439	    gid_eq(cred->gid, tcred->egid) &&
1440	    gid_eq(cred->gid, tcred->sgid) &&
1441	    gid_eq(cred->gid, tcred->gid))
1442		return 0;
1443	if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1444		return 0;
1445
1446	return -EPERM;
1447}
1448
1449SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1450		const struct rlimit64 __user *, new_rlim,
1451		struct rlimit64 __user *, old_rlim)
1452{
1453	struct rlimit64 old64, new64;
1454	struct rlimit old, new;
1455	struct task_struct *tsk;
 
1456	int ret;
1457
 
 
 
1458	if (new_rlim) {
1459		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1460			return -EFAULT;
1461		rlim64_to_rlim(&new64, &new);
 
1462	}
1463
1464	rcu_read_lock();
1465	tsk = pid ? find_task_by_vpid(pid) : current;
1466	if (!tsk) {
1467		rcu_read_unlock();
1468		return -ESRCH;
1469	}
1470	ret = check_prlimit_permission(tsk);
1471	if (ret) {
1472		rcu_read_unlock();
1473		return ret;
1474	}
1475	get_task_struct(tsk);
1476	rcu_read_unlock();
1477
1478	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1479			old_rlim ? &old : NULL);
1480
1481	if (!ret && old_rlim) {
1482		rlim_to_rlim64(&old, &old64);
1483		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1484			ret = -EFAULT;
1485	}
1486
1487	put_task_struct(tsk);
1488	return ret;
1489}
1490
1491SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1492{
1493	struct rlimit new_rlim;
1494
1495	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1496		return -EFAULT;
1497	return do_prlimit(current, resource, &new_rlim, NULL);
1498}
1499
1500/*
1501 * It would make sense to put struct rusage in the task_struct,
1502 * except that would make the task_struct be *really big*.  After
1503 * task_struct gets moved into malloc'ed memory, it would
1504 * make sense to do this.  It will make moving the rest of the information
1505 * a lot simpler!  (Which we're not doing right now because we're not
1506 * measuring them yet).
1507 *
1508 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1509 * races with threads incrementing their own counters.  But since word
1510 * reads are atomic, we either get new values or old values and we don't
1511 * care which for the sums.  We always take the siglock to protect reading
1512 * the c* fields from p->signal from races with exit.c updating those
1513 * fields when reaping, so a sample either gets all the additions of a
1514 * given child after it's reaped, or none so this sample is before reaping.
1515 *
1516 * Locking:
1517 * We need to take the siglock for CHILDEREN, SELF and BOTH
1518 * for  the cases current multithreaded, non-current single threaded
1519 * non-current multithreaded.  Thread traversal is now safe with
1520 * the siglock held.
1521 * Strictly speaking, we donot need to take the siglock if we are current and
1522 * single threaded,  as no one else can take our signal_struct away, no one
1523 * else can  reap the  children to update signal->c* counters, and no one else
1524 * can race with the signal-> fields. If we do not take any lock, the
1525 * signal-> fields could be read out of order while another thread was just
1526 * exiting. So we should  place a read memory barrier when we avoid the lock.
1527 * On the writer side,  write memory barrier is implied in  __exit_signal
1528 * as __exit_signal releases  the siglock spinlock after updating the signal->
1529 * fields. But we don't do this yet to keep things simple.
1530 *
1531 */
1532
1533static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1534{
1535	r->ru_nvcsw += t->nvcsw;
1536	r->ru_nivcsw += t->nivcsw;
1537	r->ru_minflt += t->min_flt;
1538	r->ru_majflt += t->maj_flt;
1539	r->ru_inblock += task_io_get_inblock(t);
1540	r->ru_oublock += task_io_get_oublock(t);
1541}
1542
1543static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1544{
1545	struct task_struct *t;
1546	unsigned long flags;
1547	cputime_t tgutime, tgstime, utime, stime;
1548	unsigned long maxrss = 0;
1549
1550	memset((char *)r, 0, sizeof (*r));
1551	utime = stime = 0;
1552
1553	if (who == RUSAGE_THREAD) {
1554		task_cputime_adjusted(current, &utime, &stime);
1555		accumulate_thread_rusage(p, r);
1556		maxrss = p->signal->maxrss;
1557		goto out;
1558	}
1559
1560	if (!lock_task_sighand(p, &flags))
1561		return;
1562
1563	switch (who) {
1564	case RUSAGE_BOTH:
1565	case RUSAGE_CHILDREN:
1566		utime = p->signal->cutime;
1567		stime = p->signal->cstime;
1568		r->ru_nvcsw = p->signal->cnvcsw;
1569		r->ru_nivcsw = p->signal->cnivcsw;
1570		r->ru_minflt = p->signal->cmin_flt;
1571		r->ru_majflt = p->signal->cmaj_flt;
1572		r->ru_inblock = p->signal->cinblock;
1573		r->ru_oublock = p->signal->coublock;
1574		maxrss = p->signal->cmaxrss;
1575
1576		if (who == RUSAGE_CHILDREN)
1577			break;
 
1578
1579	case RUSAGE_SELF:
1580		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1581		utime += tgutime;
1582		stime += tgstime;
1583		r->ru_nvcsw += p->signal->nvcsw;
1584		r->ru_nivcsw += p->signal->nivcsw;
1585		r->ru_minflt += p->signal->min_flt;
1586		r->ru_majflt += p->signal->maj_flt;
1587		r->ru_inblock += p->signal->inblock;
1588		r->ru_oublock += p->signal->oublock;
1589		if (maxrss < p->signal->maxrss)
1590			maxrss = p->signal->maxrss;
1591		t = p;
1592		do {
1593			accumulate_thread_rusage(t, r);
1594		} while_each_thread(p, t);
1595		break;
1596
1597	default:
1598		BUG();
1599	}
1600	unlock_task_sighand(p, &flags);
1601
1602out:
1603	cputime_to_timeval(utime, &r->ru_utime);
1604	cputime_to_timeval(stime, &r->ru_stime);
1605
1606	if (who != RUSAGE_CHILDREN) {
1607		struct mm_struct *mm = get_task_mm(p);
1608
1609		if (mm) {
1610			setmax_mm_hiwater_rss(&maxrss, mm);
1611			mmput(mm);
1612		}
1613	}
1614	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1615}
1616
1617int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1618{
1619	struct rusage r;
1620
1621	k_getrusage(p, who, &r);
1622	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1623}
1624
1625SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1626{
1627	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1628	    who != RUSAGE_THREAD)
1629		return -EINVAL;
1630	return getrusage(current, who, ru);
 
 
1631}
1632
1633#ifdef CONFIG_COMPAT
1634COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1635{
1636	struct rusage r;
1637
1638	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1639	    who != RUSAGE_THREAD)
1640		return -EINVAL;
1641
1642	k_getrusage(current, who, &r);
1643	return put_compat_rusage(&r, ru);
1644}
1645#endif
1646
1647SYSCALL_DEFINE1(umask, int, mask)
1648{
1649	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1650	return mask;
1651}
1652
1653static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1654{
1655	struct fd exe;
1656	struct file *old_exe, *exe_file;
1657	struct inode *inode;
1658	int err;
1659
1660	exe = fdget(fd);
1661	if (!exe.file)
1662		return -EBADF;
1663
1664	inode = file_inode(exe.file);
1665
1666	/*
1667	 * Because the original mm->exe_file points to executable file, make
1668	 * sure that this one is executable as well, to avoid breaking an
1669	 * overall picture.
1670	 */
1671	err = -EACCES;
1672	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1673		goto exit;
1674
1675	err = inode_permission(inode, MAY_EXEC);
1676	if (err)
1677		goto exit;
1678
1679	/*
1680	 * Forbid mm->exe_file change if old file still mapped.
1681	 */
1682	exe_file = get_mm_exe_file(mm);
1683	err = -EBUSY;
1684	if (exe_file) {
1685		struct vm_area_struct *vma;
1686
1687		down_read(&mm->mmap_sem);
1688		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1689			if (!vma->vm_file)
1690				continue;
1691			if (path_equal(&vma->vm_file->f_path,
1692				       &exe_file->f_path))
1693				goto exit_err;
1694		}
1695
1696		up_read(&mm->mmap_sem);
1697		fput(exe_file);
1698	}
1699
1700	err = 0;
1701	/* set the new file, lockless */
1702	get_file(exe.file);
1703	old_exe = xchg(&mm->exe_file, exe.file);
1704	if (old_exe)
1705		fput(old_exe);
1706exit:
1707	fdput(exe);
1708	return err;
1709exit_err:
1710	up_read(&mm->mmap_sem);
1711	fput(exe_file);
1712	goto exit;
1713}
1714
1715/*
 
 
1716 * WARNING: we don't require any capability here so be very careful
1717 * in what is allowed for modification from userspace.
1718 */
1719static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1720{
1721	unsigned long mmap_max_addr = TASK_SIZE;
1722	struct mm_struct *mm = current->mm;
1723	int error = -EINVAL, i;
1724
1725	static const unsigned char offsets[] = {
1726		offsetof(struct prctl_mm_map, start_code),
1727		offsetof(struct prctl_mm_map, end_code),
1728		offsetof(struct prctl_mm_map, start_data),
1729		offsetof(struct prctl_mm_map, end_data),
1730		offsetof(struct prctl_mm_map, start_brk),
1731		offsetof(struct prctl_mm_map, brk),
1732		offsetof(struct prctl_mm_map, start_stack),
1733		offsetof(struct prctl_mm_map, arg_start),
1734		offsetof(struct prctl_mm_map, arg_end),
1735		offsetof(struct prctl_mm_map, env_start),
1736		offsetof(struct prctl_mm_map, env_end),
1737	};
1738
1739	/*
1740	 * Make sure the members are not somewhere outside
1741	 * of allowed address space.
1742	 */
1743	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1744		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1745
1746		if ((unsigned long)val >= mmap_max_addr ||
1747		    (unsigned long)val < mmap_min_addr)
1748			goto out;
1749	}
1750
1751	/*
1752	 * Make sure the pairs are ordered.
1753	 */
1754#define __prctl_check_order(__m1, __op, __m2)				\
1755	((unsigned long)prctl_map->__m1 __op				\
1756	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1757	error  = __prctl_check_order(start_code, <, end_code);
1758	error |= __prctl_check_order(start_data, <, end_data);
1759	error |= __prctl_check_order(start_brk, <=, brk);
1760	error |= __prctl_check_order(arg_start, <=, arg_end);
1761	error |= __prctl_check_order(env_start, <=, env_end);
1762	if (error)
1763		goto out;
1764#undef __prctl_check_order
1765
1766	error = -EINVAL;
1767
1768	/*
1769	 * @brk should be after @end_data in traditional maps.
1770	 */
1771	if (prctl_map->start_brk <= prctl_map->end_data ||
1772	    prctl_map->brk <= prctl_map->end_data)
1773		goto out;
1774
1775	/*
1776	 * Neither we should allow to override limits if they set.
1777	 */
1778	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1779			      prctl_map->start_brk, prctl_map->end_data,
1780			      prctl_map->start_data))
1781			goto out;
1782
1783	/*
1784	 * Someone is trying to cheat the auxv vector.
1785	 */
1786	if (prctl_map->auxv_size) {
1787		if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1788			goto out;
1789	}
1790
1791	/*
1792	 * Finally, make sure the caller has the rights to
1793	 * change /proc/pid/exe link: only local root should
1794	 * be allowed to.
1795	 */
1796	if (prctl_map->exe_fd != (u32)-1) {
1797		struct user_namespace *ns = current_user_ns();
1798		const struct cred *cred = current_cred();
1799
1800		if (!uid_eq(cred->uid, make_kuid(ns, 0)) ||
1801		    !gid_eq(cred->gid, make_kgid(ns, 0)))
1802			goto out;
1803	}
1804
1805	error = 0;
1806out:
1807	return error;
1808}
1809
1810#ifdef CONFIG_CHECKPOINT_RESTORE
1811static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1812{
1813	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1814	unsigned long user_auxv[AT_VECTOR_SIZE];
1815	struct mm_struct *mm = current->mm;
1816	int error;
1817
1818	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1819	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1820
1821	if (opt == PR_SET_MM_MAP_SIZE)
1822		return put_user((unsigned int)sizeof(prctl_map),
1823				(unsigned int __user *)addr);
1824
1825	if (data_size != sizeof(prctl_map))
1826		return -EINVAL;
1827
1828	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1829		return -EFAULT;
1830
1831	error = validate_prctl_map(&prctl_map);
1832	if (error)
1833		return error;
1834
1835	if (prctl_map.auxv_size) {
 
 
 
 
 
 
 
1836		memset(user_auxv, 0, sizeof(user_auxv));
1837		if (copy_from_user(user_auxv,
1838				   (const void __user *)prctl_map.auxv,
1839				   prctl_map.auxv_size))
1840			return -EFAULT;
1841
1842		/* Last entry must be AT_NULL as specification requires */
1843		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1844		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1845	}
1846
1847	if (prctl_map.exe_fd != (u32)-1) {
 
 
 
 
 
 
 
 
 
 
 
1848		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
1849		if (error)
1850			return error;
1851	}
1852
1853	down_write(&mm->mmap_sem);
 
 
 
 
1854
1855	/*
1856	 * We don't validate if these members are pointing to
1857	 * real present VMAs because application may have correspond
1858	 * VMAs already unmapped and kernel uses these members for statistics
1859	 * output in procfs mostly, except
1860	 *
1861	 *  - @start_brk/@brk which are used in do_brk but kernel lookups
1862	 *    for VMAs when updating these memvers so anything wrong written
1863	 *    here cause kernel to swear at userspace program but won't lead
1864	 *    to any problem in kernel itself
1865	 */
1866
 
1867	mm->start_code	= prctl_map.start_code;
1868	mm->end_code	= prctl_map.end_code;
1869	mm->start_data	= prctl_map.start_data;
1870	mm->end_data	= prctl_map.end_data;
1871	mm->start_brk	= prctl_map.start_brk;
1872	mm->brk		= prctl_map.brk;
1873	mm->start_stack	= prctl_map.start_stack;
1874	mm->arg_start	= prctl_map.arg_start;
1875	mm->arg_end	= prctl_map.arg_end;
1876	mm->env_start	= prctl_map.env_start;
1877	mm->env_end	= prctl_map.env_end;
 
1878
1879	/*
1880	 * Note this update of @saved_auxv is lockless thus
1881	 * if someone reads this member in procfs while we're
1882	 * updating -- it may get partly updated results. It's
1883	 * known and acceptable trade off: we leave it as is to
1884	 * not introduce additional locks here making the kernel
1885	 * more complex.
1886	 */
1887	if (prctl_map.auxv_size)
1888		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
1889
1890	up_write(&mm->mmap_sem);
1891	return 0;
1892}
1893#endif /* CONFIG_CHECKPOINT_RESTORE */
1894
1895static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
1896			  unsigned long len)
1897{
1898	/*
1899	 * This doesn't move the auxiliary vector itself since it's pinned to
1900	 * mm_struct, but it permits filling the vector with new values.  It's
1901	 * up to the caller to provide sane values here, otherwise userspace
1902	 * tools which use this vector might be unhappy.
1903	 */
1904	unsigned long user_auxv[AT_VECTOR_SIZE];
1905
1906	if (len > sizeof(user_auxv))
1907		return -EINVAL;
1908
1909	if (copy_from_user(user_auxv, (const void __user *)addr, len))
1910		return -EFAULT;
1911
1912	/* Make sure the last entry is always AT_NULL */
1913	user_auxv[AT_VECTOR_SIZE - 2] = 0;
1914	user_auxv[AT_VECTOR_SIZE - 1] = 0;
1915
1916	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1917
1918	task_lock(current);
1919	memcpy(mm->saved_auxv, user_auxv, len);
1920	task_unlock(current);
1921
1922	return 0;
1923}
1924
1925static int prctl_set_mm(int opt, unsigned long addr,
1926			unsigned long arg4, unsigned long arg5)
1927{
1928	struct mm_struct *mm = current->mm;
1929	struct prctl_mm_map prctl_map;
 
 
 
 
1930	struct vm_area_struct *vma;
1931	int error;
1932
1933	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
1934			      opt != PR_SET_MM_MAP &&
1935			      opt != PR_SET_MM_MAP_SIZE)))
1936		return -EINVAL;
1937
1938#ifdef CONFIG_CHECKPOINT_RESTORE
1939	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
1940		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
1941#endif
1942
1943	if (!capable(CAP_SYS_RESOURCE))
1944		return -EPERM;
1945
1946	if (opt == PR_SET_MM_EXE_FILE)
1947		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1948
1949	if (opt == PR_SET_MM_AUXV)
1950		return prctl_set_auxv(mm, addr, arg4);
1951
1952	if (addr >= TASK_SIZE || addr < mmap_min_addr)
1953		return -EINVAL;
1954
1955	error = -EINVAL;
1956
1957	down_write(&mm->mmap_sem);
 
 
 
 
 
1958	vma = find_vma(mm, addr);
1959
 
1960	prctl_map.start_code	= mm->start_code;
1961	prctl_map.end_code	= mm->end_code;
1962	prctl_map.start_data	= mm->start_data;
1963	prctl_map.end_data	= mm->end_data;
1964	prctl_map.start_brk	= mm->start_brk;
1965	prctl_map.brk		= mm->brk;
1966	prctl_map.start_stack	= mm->start_stack;
1967	prctl_map.arg_start	= mm->arg_start;
1968	prctl_map.arg_end	= mm->arg_end;
1969	prctl_map.env_start	= mm->env_start;
1970	prctl_map.env_end	= mm->env_end;
1971	prctl_map.auxv		= NULL;
1972	prctl_map.auxv_size	= 0;
1973	prctl_map.exe_fd	= -1;
1974
1975	switch (opt) {
1976	case PR_SET_MM_START_CODE:
1977		prctl_map.start_code = addr;
1978		break;
1979	case PR_SET_MM_END_CODE:
1980		prctl_map.end_code = addr;
1981		break;
1982	case PR_SET_MM_START_DATA:
1983		prctl_map.start_data = addr;
1984		break;
1985	case PR_SET_MM_END_DATA:
1986		prctl_map.end_data = addr;
1987		break;
1988	case PR_SET_MM_START_STACK:
1989		prctl_map.start_stack = addr;
1990		break;
1991	case PR_SET_MM_START_BRK:
1992		prctl_map.start_brk = addr;
1993		break;
1994	case PR_SET_MM_BRK:
1995		prctl_map.brk = addr;
1996		break;
1997	case PR_SET_MM_ARG_START:
1998		prctl_map.arg_start = addr;
1999		break;
2000	case PR_SET_MM_ARG_END:
2001		prctl_map.arg_end = addr;
2002		break;
2003	case PR_SET_MM_ENV_START:
2004		prctl_map.env_start = addr;
2005		break;
2006	case PR_SET_MM_ENV_END:
2007		prctl_map.env_end = addr;
2008		break;
2009	default:
2010		goto out;
2011	}
2012
2013	error = validate_prctl_map(&prctl_map);
2014	if (error)
2015		goto out;
2016
2017	switch (opt) {
2018	/*
2019	 * If command line arguments and environment
2020	 * are placed somewhere else on stack, we can
2021	 * set them up here, ARG_START/END to setup
2022	 * command line argumets and ENV_START/END
2023	 * for environment.
2024	 */
2025	case PR_SET_MM_START_STACK:
2026	case PR_SET_MM_ARG_START:
2027	case PR_SET_MM_ARG_END:
2028	case PR_SET_MM_ENV_START:
2029	case PR_SET_MM_ENV_END:
2030		if (!vma) {
2031			error = -EFAULT;
2032			goto out;
2033		}
2034	}
2035
2036	mm->start_code	= prctl_map.start_code;
2037	mm->end_code	= prctl_map.end_code;
2038	mm->start_data	= prctl_map.start_data;
2039	mm->end_data	= prctl_map.end_data;
2040	mm->start_brk	= prctl_map.start_brk;
2041	mm->brk		= prctl_map.brk;
2042	mm->start_stack	= prctl_map.start_stack;
2043	mm->arg_start	= prctl_map.arg_start;
2044	mm->arg_end	= prctl_map.arg_end;
2045	mm->env_start	= prctl_map.env_start;
2046	mm->env_end	= prctl_map.env_end;
2047
2048	error = 0;
2049out:
2050	up_write(&mm->mmap_sem);
 
2051	return error;
2052}
2053
2054#ifdef CONFIG_CHECKPOINT_RESTORE
2055static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2056{
2057	return put_user(me->clear_child_tid, tid_addr);
2058}
2059#else
2060static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2061{
2062	return -EINVAL;
2063}
2064#endif
2065
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2066SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2067		unsigned long, arg4, unsigned long, arg5)
2068{
2069	struct task_struct *me = current;
2070	unsigned char comm[sizeof(me->comm)];
2071	long error;
2072
2073	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2074	if (error != -ENOSYS)
2075		return error;
2076
2077	error = 0;
2078	switch (option) {
2079	case PR_SET_PDEATHSIG:
2080		if (!valid_signal(arg2)) {
2081			error = -EINVAL;
2082			break;
2083		}
2084		me->pdeath_signal = arg2;
2085		break;
2086	case PR_GET_PDEATHSIG:
2087		error = put_user(me->pdeath_signal, (int __user *)arg2);
2088		break;
2089	case PR_GET_DUMPABLE:
2090		error = get_dumpable(me->mm);
2091		break;
2092	case PR_SET_DUMPABLE:
2093		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2094			error = -EINVAL;
2095			break;
2096		}
2097		set_dumpable(me->mm, arg2);
2098		break;
2099
2100	case PR_SET_UNALIGN:
2101		error = SET_UNALIGN_CTL(me, arg2);
2102		break;
2103	case PR_GET_UNALIGN:
2104		error = GET_UNALIGN_CTL(me, arg2);
2105		break;
2106	case PR_SET_FPEMU:
2107		error = SET_FPEMU_CTL(me, arg2);
2108		break;
2109	case PR_GET_FPEMU:
2110		error = GET_FPEMU_CTL(me, arg2);
2111		break;
2112	case PR_SET_FPEXC:
2113		error = SET_FPEXC_CTL(me, arg2);
2114		break;
2115	case PR_GET_FPEXC:
2116		error = GET_FPEXC_CTL(me, arg2);
2117		break;
2118	case PR_GET_TIMING:
2119		error = PR_TIMING_STATISTICAL;
2120		break;
2121	case PR_SET_TIMING:
2122		if (arg2 != PR_TIMING_STATISTICAL)
2123			error = -EINVAL;
2124		break;
2125	case PR_SET_NAME:
2126		comm[sizeof(me->comm) - 1] = 0;
2127		if (strncpy_from_user(comm, (char __user *)arg2,
2128				      sizeof(me->comm) - 1) < 0)
2129			return -EFAULT;
2130		set_task_comm(me, comm);
2131		proc_comm_connector(me);
2132		break;
2133	case PR_GET_NAME:
2134		get_task_comm(comm, me);
2135		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2136			return -EFAULT;
2137		break;
2138	case PR_GET_ENDIAN:
2139		error = GET_ENDIAN(me, arg2);
2140		break;
2141	case PR_SET_ENDIAN:
2142		error = SET_ENDIAN(me, arg2);
2143		break;
2144	case PR_GET_SECCOMP:
2145		error = prctl_get_seccomp();
2146		break;
2147	case PR_SET_SECCOMP:
2148		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2149		break;
2150	case PR_GET_TSC:
2151		error = GET_TSC_CTL(arg2);
2152		break;
2153	case PR_SET_TSC:
2154		error = SET_TSC_CTL(arg2);
2155		break;
2156	case PR_TASK_PERF_EVENTS_DISABLE:
2157		error = perf_event_task_disable();
2158		break;
2159	case PR_TASK_PERF_EVENTS_ENABLE:
2160		error = perf_event_task_enable();
2161		break;
2162	case PR_GET_TIMERSLACK:
2163		if (current->timer_slack_ns > ULONG_MAX)
2164			error = ULONG_MAX;
2165		else
2166			error = current->timer_slack_ns;
2167		break;
2168	case PR_SET_TIMERSLACK:
2169		if (arg2 <= 0)
2170			current->timer_slack_ns =
2171					current->default_timer_slack_ns;
2172		else
2173			current->timer_slack_ns = arg2;
2174		break;
2175	case PR_MCE_KILL:
2176		if (arg4 | arg5)
2177			return -EINVAL;
2178		switch (arg2) {
2179		case PR_MCE_KILL_CLEAR:
2180			if (arg3 != 0)
2181				return -EINVAL;
2182			current->flags &= ~PF_MCE_PROCESS;
2183			break;
2184		case PR_MCE_KILL_SET:
2185			current->flags |= PF_MCE_PROCESS;
2186			if (arg3 == PR_MCE_KILL_EARLY)
2187				current->flags |= PF_MCE_EARLY;
2188			else if (arg3 == PR_MCE_KILL_LATE)
2189				current->flags &= ~PF_MCE_EARLY;
2190			else if (arg3 == PR_MCE_KILL_DEFAULT)
2191				current->flags &=
2192						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2193			else
2194				return -EINVAL;
2195			break;
2196		default:
2197			return -EINVAL;
2198		}
2199		break;
2200	case PR_MCE_KILL_GET:
2201		if (arg2 | arg3 | arg4 | arg5)
2202			return -EINVAL;
2203		if (current->flags & PF_MCE_PROCESS)
2204			error = (current->flags & PF_MCE_EARLY) ?
2205				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2206		else
2207			error = PR_MCE_KILL_DEFAULT;
2208		break;
2209	case PR_SET_MM:
2210		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2211		break;
2212	case PR_GET_TID_ADDRESS:
2213		error = prctl_get_tid_address(me, (int __user **)arg2);
2214		break;
2215	case PR_SET_CHILD_SUBREAPER:
2216		me->signal->is_child_subreaper = !!arg2;
 
 
 
 
2217		break;
2218	case PR_GET_CHILD_SUBREAPER:
2219		error = put_user(me->signal->is_child_subreaper,
2220				 (int __user *)arg2);
2221		break;
2222	case PR_SET_NO_NEW_PRIVS:
2223		if (arg2 != 1 || arg3 || arg4 || arg5)
2224			return -EINVAL;
2225
2226		task_set_no_new_privs(current);
2227		break;
2228	case PR_GET_NO_NEW_PRIVS:
2229		if (arg2 || arg3 || arg4 || arg5)
2230			return -EINVAL;
2231		return task_no_new_privs(current) ? 1 : 0;
2232	case PR_GET_THP_DISABLE:
2233		if (arg2 || arg3 || arg4 || arg5)
2234			return -EINVAL;
2235		error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2236		break;
2237	case PR_SET_THP_DISABLE:
2238		if (arg3 || arg4 || arg5)
2239			return -EINVAL;
2240		if (down_write_killable(&me->mm->mmap_sem))
2241			return -EINTR;
2242		if (arg2)
2243			me->mm->def_flags |= VM_NOHUGEPAGE;
2244		else
2245			me->mm->def_flags &= ~VM_NOHUGEPAGE;
2246		up_write(&me->mm->mmap_sem);
2247		break;
2248	case PR_MPX_ENABLE_MANAGEMENT:
2249		if (arg2 || arg3 || arg4 || arg5)
2250			return -EINVAL;
2251		error = MPX_ENABLE_MANAGEMENT();
2252		break;
2253	case PR_MPX_DISABLE_MANAGEMENT:
2254		if (arg2 || arg3 || arg4 || arg5)
2255			return -EINVAL;
2256		error = MPX_DISABLE_MANAGEMENT();
2257		break;
2258	case PR_SET_FP_MODE:
2259		error = SET_FP_MODE(me, arg2);
2260		break;
2261	case PR_GET_FP_MODE:
2262		error = GET_FP_MODE(me);
2263		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2264	default:
2265		error = -EINVAL;
2266		break;
2267	}
2268	return error;
2269}
2270
2271SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2272		struct getcpu_cache __user *, unused)
2273{
2274	int err = 0;
2275	int cpu = raw_smp_processor_id();
2276
2277	if (cpup)
2278		err |= put_user(cpu, cpup);
2279	if (nodep)
2280		err |= put_user(cpu_to_node(cpu), nodep);
2281	return err ? -EFAULT : 0;
2282}
2283
2284/**
2285 * do_sysinfo - fill in sysinfo struct
2286 * @info: pointer to buffer to fill
2287 */
2288static int do_sysinfo(struct sysinfo *info)
2289{
2290	unsigned long mem_total, sav_total;
2291	unsigned int mem_unit, bitcount;
2292	struct timespec tp;
2293
2294	memset(info, 0, sizeof(struct sysinfo));
2295
2296	get_monotonic_boottime(&tp);
 
2297	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2298
2299	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2300
2301	info->procs = nr_threads;
2302
2303	si_meminfo(info);
2304	si_swapinfo(info);
2305
2306	/*
2307	 * If the sum of all the available memory (i.e. ram + swap)
2308	 * is less than can be stored in a 32 bit unsigned long then
2309	 * we can be binary compatible with 2.2.x kernels.  If not,
2310	 * well, in that case 2.2.x was broken anyways...
2311	 *
2312	 *  -Erik Andersen <andersee@debian.org>
2313	 */
2314
2315	mem_total = info->totalram + info->totalswap;
2316	if (mem_total < info->totalram || mem_total < info->totalswap)
2317		goto out;
2318	bitcount = 0;
2319	mem_unit = info->mem_unit;
2320	while (mem_unit > 1) {
2321		bitcount++;
2322		mem_unit >>= 1;
2323		sav_total = mem_total;
2324		mem_total <<= 1;
2325		if (mem_total < sav_total)
2326			goto out;
2327	}
2328
2329	/*
2330	 * If mem_total did not overflow, multiply all memory values by
2331	 * info->mem_unit and set it to 1.  This leaves things compatible
2332	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2333	 * kernels...
2334	 */
2335
2336	info->mem_unit = 1;
2337	info->totalram <<= bitcount;
2338	info->freeram <<= bitcount;
2339	info->sharedram <<= bitcount;
2340	info->bufferram <<= bitcount;
2341	info->totalswap <<= bitcount;
2342	info->freeswap <<= bitcount;
2343	info->totalhigh <<= bitcount;
2344	info->freehigh <<= bitcount;
2345
2346out:
2347	return 0;
2348}
2349
2350SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2351{
2352	struct sysinfo val;
2353
2354	do_sysinfo(&val);
2355
2356	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2357		return -EFAULT;
2358
2359	return 0;
2360}
2361
2362#ifdef CONFIG_COMPAT
2363struct compat_sysinfo {
2364	s32 uptime;
2365	u32 loads[3];
2366	u32 totalram;
2367	u32 freeram;
2368	u32 sharedram;
2369	u32 bufferram;
2370	u32 totalswap;
2371	u32 freeswap;
2372	u16 procs;
2373	u16 pad;
2374	u32 totalhigh;
2375	u32 freehigh;
2376	u32 mem_unit;
2377	char _f[20-2*sizeof(u32)-sizeof(int)];
2378};
2379
2380COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2381{
2382	struct sysinfo s;
 
2383
2384	do_sysinfo(&s);
2385
2386	/* Check to see if any memory value is too large for 32-bit and scale
2387	 *  down if needed
2388	 */
2389	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2390		int bitcount = 0;
2391
2392		while (s.mem_unit < PAGE_SIZE) {
2393			s.mem_unit <<= 1;
2394			bitcount++;
2395		}
2396
2397		s.totalram >>= bitcount;
2398		s.freeram >>= bitcount;
2399		s.sharedram >>= bitcount;
2400		s.bufferram >>= bitcount;
2401		s.totalswap >>= bitcount;
2402		s.freeswap >>= bitcount;
2403		s.totalhigh >>= bitcount;
2404		s.freehigh >>= bitcount;
2405	}
2406
2407	if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2408	    __put_user(s.uptime, &info->uptime) ||
2409	    __put_user(s.loads[0], &info->loads[0]) ||
2410	    __put_user(s.loads[1], &info->loads[1]) ||
2411	    __put_user(s.loads[2], &info->loads[2]) ||
2412	    __put_user(s.totalram, &info->totalram) ||
2413	    __put_user(s.freeram, &info->freeram) ||
2414	    __put_user(s.sharedram, &info->sharedram) ||
2415	    __put_user(s.bufferram, &info->bufferram) ||
2416	    __put_user(s.totalswap, &info->totalswap) ||
2417	    __put_user(s.freeswap, &info->freeswap) ||
2418	    __put_user(s.procs, &info->procs) ||
2419	    __put_user(s.totalhigh, &info->totalhigh) ||
2420	    __put_user(s.freehigh, &info->freehigh) ||
2421	    __put_user(s.mem_unit, &info->mem_unit))
 
2422		return -EFAULT;
2423
2424	return 0;
2425}
2426#endif /* CONFIG_COMPAT */
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/kernel/sys.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 */
   7
   8#include <linux/export.h>
   9#include <linux/mm.h>
  10#include <linux/utsname.h>
  11#include <linux/mman.h>
  12#include <linux/reboot.h>
  13#include <linux/prctl.h>
  14#include <linux/highuid.h>
  15#include <linux/fs.h>
  16#include <linux/kmod.h>
  17#include <linux/perf_event.h>
  18#include <linux/resource.h>
  19#include <linux/kernel.h>
  20#include <linux/workqueue.h>
  21#include <linux/capability.h>
  22#include <linux/device.h>
  23#include <linux/key.h>
  24#include <linux/times.h>
  25#include <linux/posix-timers.h>
  26#include <linux/security.h>
  27#include <linux/dcookies.h>
  28#include <linux/suspend.h>
  29#include <linux/tty.h>
  30#include <linux/signal.h>
  31#include <linux/cn_proc.h>
  32#include <linux/getcpu.h>
  33#include <linux/task_io_accounting_ops.h>
  34#include <linux/seccomp.h>
  35#include <linux/cpu.h>
  36#include <linux/personality.h>
  37#include <linux/ptrace.h>
  38#include <linux/fs_struct.h>
  39#include <linux/file.h>
  40#include <linux/mount.h>
  41#include <linux/gfp.h>
  42#include <linux/syscore_ops.h>
  43#include <linux/version.h>
  44#include <linux/ctype.h>
  45
  46#include <linux/compat.h>
  47#include <linux/syscalls.h>
  48#include <linux/kprobes.h>
  49#include <linux/user_namespace.h>
  50#include <linux/time_namespace.h>
  51#include <linux/binfmts.h>
  52
  53#include <linux/sched.h>
  54#include <linux/sched/autogroup.h>
  55#include <linux/sched/loadavg.h>
  56#include <linux/sched/stat.h>
  57#include <linux/sched/mm.h>
  58#include <linux/sched/coredump.h>
  59#include <linux/sched/task.h>
  60#include <linux/sched/cputime.h>
  61#include <linux/rcupdate.h>
  62#include <linux/uidgid.h>
  63#include <linux/cred.h>
  64
  65#include <linux/nospec.h>
  66
  67#include <linux/kmsg_dump.h>
  68/* Move somewhere else to avoid recompiling? */
  69#include <generated/utsrelease.h>
  70
  71#include <linux/uaccess.h>
  72#include <asm/io.h>
  73#include <asm/unistd.h>
  74
  75#include "uid16.h"
  76
  77#ifndef SET_UNALIGN_CTL
  78# define SET_UNALIGN_CTL(a, b)	(-EINVAL)
  79#endif
  80#ifndef GET_UNALIGN_CTL
  81# define GET_UNALIGN_CTL(a, b)	(-EINVAL)
  82#endif
  83#ifndef SET_FPEMU_CTL
  84# define SET_FPEMU_CTL(a, b)	(-EINVAL)
  85#endif
  86#ifndef GET_FPEMU_CTL
  87# define GET_FPEMU_CTL(a, b)	(-EINVAL)
  88#endif
  89#ifndef SET_FPEXC_CTL
  90# define SET_FPEXC_CTL(a, b)	(-EINVAL)
  91#endif
  92#ifndef GET_FPEXC_CTL
  93# define GET_FPEXC_CTL(a, b)	(-EINVAL)
  94#endif
  95#ifndef GET_ENDIAN
  96# define GET_ENDIAN(a, b)	(-EINVAL)
  97#endif
  98#ifndef SET_ENDIAN
  99# define SET_ENDIAN(a, b)	(-EINVAL)
 100#endif
 101#ifndef GET_TSC_CTL
 102# define GET_TSC_CTL(a)		(-EINVAL)
 103#endif
 104#ifndef SET_TSC_CTL
 105# define SET_TSC_CTL(a)		(-EINVAL)
 106#endif
 
 
 
 
 
 
 107#ifndef GET_FP_MODE
 108# define GET_FP_MODE(a)		(-EINVAL)
 109#endif
 110#ifndef SET_FP_MODE
 111# define SET_FP_MODE(a,b)	(-EINVAL)
 112#endif
 113#ifndef SVE_SET_VL
 114# define SVE_SET_VL(a)		(-EINVAL)
 115#endif
 116#ifndef SVE_GET_VL
 117# define SVE_GET_VL()		(-EINVAL)
 118#endif
 119#ifndef PAC_RESET_KEYS
 120# define PAC_RESET_KEYS(a, b)	(-EINVAL)
 121#endif
 122#ifndef SET_TAGGED_ADDR_CTRL
 123# define SET_TAGGED_ADDR_CTRL(a)	(-EINVAL)
 124#endif
 125#ifndef GET_TAGGED_ADDR_CTRL
 126# define GET_TAGGED_ADDR_CTRL()		(-EINVAL)
 127#endif
 128
 129/*
 130 * this is where the system-wide overflow UID and GID are defined, for
 131 * architectures that now have 32-bit UID/GID but didn't in the past
 132 */
 133
 134int overflowuid = DEFAULT_OVERFLOWUID;
 135int overflowgid = DEFAULT_OVERFLOWGID;
 136
 137EXPORT_SYMBOL(overflowuid);
 138EXPORT_SYMBOL(overflowgid);
 139
 140/*
 141 * the same as above, but for filesystems which can only store a 16-bit
 142 * UID and GID. as such, this is needed on all architectures
 143 */
 144
 145int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
 146int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
 147
 148EXPORT_SYMBOL(fs_overflowuid);
 149EXPORT_SYMBOL(fs_overflowgid);
 150
 151/*
 152 * Returns true if current's euid is same as p's uid or euid,
 153 * or has CAP_SYS_NICE to p's user_ns.
 154 *
 155 * Called with rcu_read_lock, creds are safe
 156 */
 157static bool set_one_prio_perm(struct task_struct *p)
 158{
 159	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
 160
 161	if (uid_eq(pcred->uid,  cred->euid) ||
 162	    uid_eq(pcred->euid, cred->euid))
 163		return true;
 164	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
 165		return true;
 166	return false;
 167}
 168
 169/*
 170 * set the priority of a task
 171 * - the caller must hold the RCU read lock
 172 */
 173static int set_one_prio(struct task_struct *p, int niceval, int error)
 174{
 175	int no_nice;
 176
 177	if (!set_one_prio_perm(p)) {
 178		error = -EPERM;
 179		goto out;
 180	}
 181	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
 182		error = -EACCES;
 183		goto out;
 184	}
 185	no_nice = security_task_setnice(p, niceval);
 186	if (no_nice) {
 187		error = no_nice;
 188		goto out;
 189	}
 190	if (error == -ESRCH)
 191		error = 0;
 192	set_user_nice(p, niceval);
 193out:
 194	return error;
 195}
 196
 197SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 198{
 199	struct task_struct *g, *p;
 200	struct user_struct *user;
 201	const struct cred *cred = current_cred();
 202	int error = -EINVAL;
 203	struct pid *pgrp;
 204	kuid_t uid;
 205
 206	if (which > PRIO_USER || which < PRIO_PROCESS)
 207		goto out;
 208
 209	/* normalize: avoid signed division (rounding problems) */
 210	error = -ESRCH;
 211	if (niceval < MIN_NICE)
 212		niceval = MIN_NICE;
 213	if (niceval > MAX_NICE)
 214		niceval = MAX_NICE;
 215
 216	rcu_read_lock();
 217	read_lock(&tasklist_lock);
 218	switch (which) {
 219	case PRIO_PROCESS:
 220		if (who)
 221			p = find_task_by_vpid(who);
 222		else
 223			p = current;
 224		if (p)
 225			error = set_one_prio(p, niceval, error);
 226		break;
 227	case PRIO_PGRP:
 228		if (who)
 229			pgrp = find_vpid(who);
 230		else
 231			pgrp = task_pgrp(current);
 232		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 233			error = set_one_prio(p, niceval, error);
 234		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 235		break;
 236	case PRIO_USER:
 237		uid = make_kuid(cred->user_ns, who);
 238		user = cred->user;
 239		if (!who)
 240			uid = cred->uid;
 241		else if (!uid_eq(uid, cred->uid)) {
 242			user = find_user(uid);
 243			if (!user)
 244				goto out_unlock;	/* No processes for this user */
 245		}
 246		do_each_thread(g, p) {
 247			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
 248				error = set_one_prio(p, niceval, error);
 249		} while_each_thread(g, p);
 250		if (!uid_eq(uid, cred->uid))
 251			free_uid(user);		/* For find_user() */
 252		break;
 253	}
 254out_unlock:
 255	read_unlock(&tasklist_lock);
 256	rcu_read_unlock();
 257out:
 258	return error;
 259}
 260
 261/*
 262 * Ugh. To avoid negative return values, "getpriority()" will
 263 * not return the normal nice-value, but a negated value that
 264 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
 265 * to stay compatible.
 266 */
 267SYSCALL_DEFINE2(getpriority, int, which, int, who)
 268{
 269	struct task_struct *g, *p;
 270	struct user_struct *user;
 271	const struct cred *cred = current_cred();
 272	long niceval, retval = -ESRCH;
 273	struct pid *pgrp;
 274	kuid_t uid;
 275
 276	if (which > PRIO_USER || which < PRIO_PROCESS)
 277		return -EINVAL;
 278
 279	rcu_read_lock();
 280	read_lock(&tasklist_lock);
 281	switch (which) {
 282	case PRIO_PROCESS:
 283		if (who)
 284			p = find_task_by_vpid(who);
 285		else
 286			p = current;
 287		if (p) {
 288			niceval = nice_to_rlimit(task_nice(p));
 289			if (niceval > retval)
 290				retval = niceval;
 291		}
 292		break;
 293	case PRIO_PGRP:
 294		if (who)
 295			pgrp = find_vpid(who);
 296		else
 297			pgrp = task_pgrp(current);
 298		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 299			niceval = nice_to_rlimit(task_nice(p));
 300			if (niceval > retval)
 301				retval = niceval;
 302		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 303		break;
 304	case PRIO_USER:
 305		uid = make_kuid(cred->user_ns, who);
 306		user = cred->user;
 307		if (!who)
 308			uid = cred->uid;
 309		else if (!uid_eq(uid, cred->uid)) {
 310			user = find_user(uid);
 311			if (!user)
 312				goto out_unlock;	/* No processes for this user */
 313		}
 314		do_each_thread(g, p) {
 315			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
 316				niceval = nice_to_rlimit(task_nice(p));
 317				if (niceval > retval)
 318					retval = niceval;
 319			}
 320		} while_each_thread(g, p);
 321		if (!uid_eq(uid, cred->uid))
 322			free_uid(user);		/* for find_user() */
 323		break;
 324	}
 325out_unlock:
 326	read_unlock(&tasklist_lock);
 327	rcu_read_unlock();
 328
 329	return retval;
 330}
 331
 332/*
 333 * Unprivileged users may change the real gid to the effective gid
 334 * or vice versa.  (BSD-style)
 335 *
 336 * If you set the real gid at all, or set the effective gid to a value not
 337 * equal to the real gid, then the saved gid is set to the new effective gid.
 338 *
 339 * This makes it possible for a setgid program to completely drop its
 340 * privileges, which is often a useful assertion to make when you are doing
 341 * a security audit over a program.
 342 *
 343 * The general idea is that a program which uses just setregid() will be
 344 * 100% compatible with BSD.  A program which uses just setgid() will be
 345 * 100% compatible with POSIX with saved IDs.
 346 *
 347 * SMP: There are not races, the GIDs are checked only by filesystem
 348 *      operations (as far as semantic preservation is concerned).
 349 */
 350#ifdef CONFIG_MULTIUSER
 351long __sys_setregid(gid_t rgid, gid_t egid)
 352{
 353	struct user_namespace *ns = current_user_ns();
 354	const struct cred *old;
 355	struct cred *new;
 356	int retval;
 357	kgid_t krgid, kegid;
 358
 359	krgid = make_kgid(ns, rgid);
 360	kegid = make_kgid(ns, egid);
 361
 362	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 363		return -EINVAL;
 364	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 365		return -EINVAL;
 366
 367	new = prepare_creds();
 368	if (!new)
 369		return -ENOMEM;
 370	old = current_cred();
 371
 372	retval = -EPERM;
 373	if (rgid != (gid_t) -1) {
 374		if (gid_eq(old->gid, krgid) ||
 375		    gid_eq(old->egid, krgid) ||
 376		    ns_capable(old->user_ns, CAP_SETGID))
 377			new->gid = krgid;
 378		else
 379			goto error;
 380	}
 381	if (egid != (gid_t) -1) {
 382		if (gid_eq(old->gid, kegid) ||
 383		    gid_eq(old->egid, kegid) ||
 384		    gid_eq(old->sgid, kegid) ||
 385		    ns_capable(old->user_ns, CAP_SETGID))
 386			new->egid = kegid;
 387		else
 388			goto error;
 389	}
 390
 391	if (rgid != (gid_t) -1 ||
 392	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
 393		new->sgid = new->egid;
 394	new->fsgid = new->egid;
 395
 396	retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
 397	if (retval < 0)
 398		goto error;
 399
 400	return commit_creds(new);
 401
 402error:
 403	abort_creds(new);
 404	return retval;
 405}
 406
 407SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
 408{
 409	return __sys_setregid(rgid, egid);
 410}
 411
 412/*
 413 * setgid() is implemented like SysV w/ SAVED_IDS
 414 *
 415 * SMP: Same implicit races as above.
 416 */
 417long __sys_setgid(gid_t gid)
 418{
 419	struct user_namespace *ns = current_user_ns();
 420	const struct cred *old;
 421	struct cred *new;
 422	int retval;
 423	kgid_t kgid;
 424
 425	kgid = make_kgid(ns, gid);
 426	if (!gid_valid(kgid))
 427		return -EINVAL;
 428
 429	new = prepare_creds();
 430	if (!new)
 431		return -ENOMEM;
 432	old = current_cred();
 433
 434	retval = -EPERM;
 435	if (ns_capable(old->user_ns, CAP_SETGID))
 436		new->gid = new->egid = new->sgid = new->fsgid = kgid;
 437	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
 438		new->egid = new->fsgid = kgid;
 439	else
 440		goto error;
 441
 442	retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
 443	if (retval < 0)
 444		goto error;
 445
 446	return commit_creds(new);
 447
 448error:
 449	abort_creds(new);
 450	return retval;
 451}
 452
 453SYSCALL_DEFINE1(setgid, gid_t, gid)
 454{
 455	return __sys_setgid(gid);
 456}
 457
 458/*
 459 * change the user struct in a credentials set to match the new UID
 460 */
 461static int set_user(struct cred *new)
 462{
 463	struct user_struct *new_user;
 464
 465	new_user = alloc_uid(new->uid);
 466	if (!new_user)
 467		return -EAGAIN;
 468
 469	/*
 470	 * We don't fail in case of NPROC limit excess here because too many
 471	 * poorly written programs don't check set*uid() return code, assuming
 472	 * it never fails if called by root.  We may still enforce NPROC limit
 473	 * for programs doing set*uid()+execve() by harmlessly deferring the
 474	 * failure to the execve() stage.
 475	 */
 476	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
 477			new_user != INIT_USER)
 478		current->flags |= PF_NPROC_EXCEEDED;
 479	else
 480		current->flags &= ~PF_NPROC_EXCEEDED;
 481
 482	free_uid(new->user);
 483	new->user = new_user;
 484	return 0;
 485}
 486
 487/*
 488 * Unprivileged users may change the real uid to the effective uid
 489 * or vice versa.  (BSD-style)
 490 *
 491 * If you set the real uid at all, or set the effective uid to a value not
 492 * equal to the real uid, then the saved uid is set to the new effective uid.
 493 *
 494 * This makes it possible for a setuid program to completely drop its
 495 * privileges, which is often a useful assertion to make when you are doing
 496 * a security audit over a program.
 497 *
 498 * The general idea is that a program which uses just setreuid() will be
 499 * 100% compatible with BSD.  A program which uses just setuid() will be
 500 * 100% compatible with POSIX with saved IDs.
 501 */
 502long __sys_setreuid(uid_t ruid, uid_t euid)
 503{
 504	struct user_namespace *ns = current_user_ns();
 505	const struct cred *old;
 506	struct cred *new;
 507	int retval;
 508	kuid_t kruid, keuid;
 509
 510	kruid = make_kuid(ns, ruid);
 511	keuid = make_kuid(ns, euid);
 512
 513	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 514		return -EINVAL;
 515	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 516		return -EINVAL;
 517
 518	new = prepare_creds();
 519	if (!new)
 520		return -ENOMEM;
 521	old = current_cred();
 522
 523	retval = -EPERM;
 524	if (ruid != (uid_t) -1) {
 525		new->uid = kruid;
 526		if (!uid_eq(old->uid, kruid) &&
 527		    !uid_eq(old->euid, kruid) &&
 528		    !ns_capable_setid(old->user_ns, CAP_SETUID))
 529			goto error;
 530	}
 531
 532	if (euid != (uid_t) -1) {
 533		new->euid = keuid;
 534		if (!uid_eq(old->uid, keuid) &&
 535		    !uid_eq(old->euid, keuid) &&
 536		    !uid_eq(old->suid, keuid) &&
 537		    !ns_capable_setid(old->user_ns, CAP_SETUID))
 538			goto error;
 539	}
 540
 541	if (!uid_eq(new->uid, old->uid)) {
 542		retval = set_user(new);
 543		if (retval < 0)
 544			goto error;
 545	}
 546	if (ruid != (uid_t) -1 ||
 547	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
 548		new->suid = new->euid;
 549	new->fsuid = new->euid;
 550
 551	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
 552	if (retval < 0)
 553		goto error;
 554
 555	return commit_creds(new);
 556
 557error:
 558	abort_creds(new);
 559	return retval;
 560}
 561
 562SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 563{
 564	return __sys_setreuid(ruid, euid);
 565}
 566
 567/*
 568 * setuid() is implemented like SysV with SAVED_IDS
 569 *
 570 * Note that SAVED_ID's is deficient in that a setuid root program
 571 * like sendmail, for example, cannot set its uid to be a normal
 572 * user and then switch back, because if you're root, setuid() sets
 573 * the saved uid too.  If you don't like this, blame the bright people
 574 * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
 575 * will allow a root program to temporarily drop privileges and be able to
 576 * regain them by swapping the real and effective uid.
 577 */
 578long __sys_setuid(uid_t uid)
 579{
 580	struct user_namespace *ns = current_user_ns();
 581	const struct cred *old;
 582	struct cred *new;
 583	int retval;
 584	kuid_t kuid;
 585
 586	kuid = make_kuid(ns, uid);
 587	if (!uid_valid(kuid))
 588		return -EINVAL;
 589
 590	new = prepare_creds();
 591	if (!new)
 592		return -ENOMEM;
 593	old = current_cred();
 594
 595	retval = -EPERM;
 596	if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
 597		new->suid = new->uid = kuid;
 598		if (!uid_eq(kuid, old->uid)) {
 599			retval = set_user(new);
 600			if (retval < 0)
 601				goto error;
 602		}
 603	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
 604		goto error;
 605	}
 606
 607	new->fsuid = new->euid = kuid;
 608
 609	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
 610	if (retval < 0)
 611		goto error;
 612
 613	return commit_creds(new);
 614
 615error:
 616	abort_creds(new);
 617	return retval;
 618}
 619
 620SYSCALL_DEFINE1(setuid, uid_t, uid)
 621{
 622	return __sys_setuid(uid);
 623}
 624
 625
 626/*
 627 * This function implements a generic ability to update ruid, euid,
 628 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 629 */
 630long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
 631{
 632	struct user_namespace *ns = current_user_ns();
 633	const struct cred *old;
 634	struct cred *new;
 635	int retval;
 636	kuid_t kruid, keuid, ksuid;
 637
 638	kruid = make_kuid(ns, ruid);
 639	keuid = make_kuid(ns, euid);
 640	ksuid = make_kuid(ns, suid);
 641
 642	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 643		return -EINVAL;
 644
 645	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 646		return -EINVAL;
 647
 648	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
 649		return -EINVAL;
 650
 651	new = prepare_creds();
 652	if (!new)
 653		return -ENOMEM;
 654
 655	old = current_cred();
 656
 657	retval = -EPERM;
 658	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
 659		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
 660		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
 661			goto error;
 662		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
 663		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
 664			goto error;
 665		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
 666		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
 667			goto error;
 668	}
 669
 670	if (ruid != (uid_t) -1) {
 671		new->uid = kruid;
 672		if (!uid_eq(kruid, old->uid)) {
 673			retval = set_user(new);
 674			if (retval < 0)
 675				goto error;
 676		}
 677	}
 678	if (euid != (uid_t) -1)
 679		new->euid = keuid;
 680	if (suid != (uid_t) -1)
 681		new->suid = ksuid;
 682	new->fsuid = new->euid;
 683
 684	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
 685	if (retval < 0)
 686		goto error;
 687
 688	return commit_creds(new);
 689
 690error:
 691	abort_creds(new);
 692	return retval;
 693}
 694
 695SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 696{
 697	return __sys_setresuid(ruid, euid, suid);
 698}
 699
 700SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
 701{
 702	const struct cred *cred = current_cred();
 703	int retval;
 704	uid_t ruid, euid, suid;
 705
 706	ruid = from_kuid_munged(cred->user_ns, cred->uid);
 707	euid = from_kuid_munged(cred->user_ns, cred->euid);
 708	suid = from_kuid_munged(cred->user_ns, cred->suid);
 709
 710	retval = put_user(ruid, ruidp);
 711	if (!retval) {
 712		retval = put_user(euid, euidp);
 713		if (!retval)
 714			return put_user(suid, suidp);
 715	}
 716	return retval;
 717}
 718
 719/*
 720 * Same as above, but for rgid, egid, sgid.
 721 */
 722long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
 723{
 724	struct user_namespace *ns = current_user_ns();
 725	const struct cred *old;
 726	struct cred *new;
 727	int retval;
 728	kgid_t krgid, kegid, ksgid;
 729
 730	krgid = make_kgid(ns, rgid);
 731	kegid = make_kgid(ns, egid);
 732	ksgid = make_kgid(ns, sgid);
 733
 734	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 735		return -EINVAL;
 736	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 737		return -EINVAL;
 738	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
 739		return -EINVAL;
 740
 741	new = prepare_creds();
 742	if (!new)
 743		return -ENOMEM;
 744	old = current_cred();
 745
 746	retval = -EPERM;
 747	if (!ns_capable(old->user_ns, CAP_SETGID)) {
 748		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
 749		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
 750			goto error;
 751		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
 752		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
 753			goto error;
 754		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
 755		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
 756			goto error;
 757	}
 758
 759	if (rgid != (gid_t) -1)
 760		new->gid = krgid;
 761	if (egid != (gid_t) -1)
 762		new->egid = kegid;
 763	if (sgid != (gid_t) -1)
 764		new->sgid = ksgid;
 765	new->fsgid = new->egid;
 766
 767	retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
 768	if (retval < 0)
 769		goto error;
 770
 771	return commit_creds(new);
 772
 773error:
 774	abort_creds(new);
 775	return retval;
 776}
 777
 778SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
 779{
 780	return __sys_setresgid(rgid, egid, sgid);
 781}
 782
 783SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
 784{
 785	const struct cred *cred = current_cred();
 786	int retval;
 787	gid_t rgid, egid, sgid;
 788
 789	rgid = from_kgid_munged(cred->user_ns, cred->gid);
 790	egid = from_kgid_munged(cred->user_ns, cred->egid);
 791	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
 792
 793	retval = put_user(rgid, rgidp);
 794	if (!retval) {
 795		retval = put_user(egid, egidp);
 796		if (!retval)
 797			retval = put_user(sgid, sgidp);
 798	}
 799
 800	return retval;
 801}
 802
 803
 804/*
 805 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
 806 * is used for "access()" and for the NFS daemon (letting nfsd stay at
 807 * whatever uid it wants to). It normally shadows "euid", except when
 808 * explicitly set by setfsuid() or for access..
 809 */
 810long __sys_setfsuid(uid_t uid)
 811{
 812	const struct cred *old;
 813	struct cred *new;
 814	uid_t old_fsuid;
 815	kuid_t kuid;
 816
 817	old = current_cred();
 818	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
 819
 820	kuid = make_kuid(old->user_ns, uid);
 821	if (!uid_valid(kuid))
 822		return old_fsuid;
 823
 824	new = prepare_creds();
 825	if (!new)
 826		return old_fsuid;
 827
 828	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
 829	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
 830	    ns_capable_setid(old->user_ns, CAP_SETUID)) {
 831		if (!uid_eq(kuid, old->fsuid)) {
 832			new->fsuid = kuid;
 833			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
 834				goto change_okay;
 835		}
 836	}
 837
 838	abort_creds(new);
 839	return old_fsuid;
 840
 841change_okay:
 842	commit_creds(new);
 843	return old_fsuid;
 844}
 845
 846SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 847{
 848	return __sys_setfsuid(uid);
 849}
 850
 851/*
 852 * Samma på svenska..
 853 */
 854long __sys_setfsgid(gid_t gid)
 855{
 856	const struct cred *old;
 857	struct cred *new;
 858	gid_t old_fsgid;
 859	kgid_t kgid;
 860
 861	old = current_cred();
 862	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
 863
 864	kgid = make_kgid(old->user_ns, gid);
 865	if (!gid_valid(kgid))
 866		return old_fsgid;
 867
 868	new = prepare_creds();
 869	if (!new)
 870		return old_fsgid;
 871
 872	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
 873	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
 874	    ns_capable(old->user_ns, CAP_SETGID)) {
 875		if (!gid_eq(kgid, old->fsgid)) {
 876			new->fsgid = kgid;
 877			if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
 878				goto change_okay;
 879		}
 880	}
 881
 882	abort_creds(new);
 883	return old_fsgid;
 884
 885change_okay:
 886	commit_creds(new);
 887	return old_fsgid;
 888}
 889
 890SYSCALL_DEFINE1(setfsgid, gid_t, gid)
 891{
 892	return __sys_setfsgid(gid);
 893}
 894#endif /* CONFIG_MULTIUSER */
 895
 896/**
 897 * sys_getpid - return the thread group id of the current process
 898 *
 899 * Note, despite the name, this returns the tgid not the pid.  The tgid and
 900 * the pid are identical unless CLONE_THREAD was specified on clone() in
 901 * which case the tgid is the same in all threads of the same group.
 902 *
 903 * This is SMP safe as current->tgid does not change.
 904 */
 905SYSCALL_DEFINE0(getpid)
 906{
 907	return task_tgid_vnr(current);
 908}
 909
 910/* Thread ID - the internal kernel "pid" */
 911SYSCALL_DEFINE0(gettid)
 912{
 913	return task_pid_vnr(current);
 914}
 915
 916/*
 917 * Accessing ->real_parent is not SMP-safe, it could
 918 * change from under us. However, we can use a stale
 919 * value of ->real_parent under rcu_read_lock(), see
 920 * release_task()->call_rcu(delayed_put_task_struct).
 921 */
 922SYSCALL_DEFINE0(getppid)
 923{
 924	int pid;
 925
 926	rcu_read_lock();
 927	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
 928	rcu_read_unlock();
 929
 930	return pid;
 931}
 932
 933SYSCALL_DEFINE0(getuid)
 934{
 935	/* Only we change this so SMP safe */
 936	return from_kuid_munged(current_user_ns(), current_uid());
 937}
 938
 939SYSCALL_DEFINE0(geteuid)
 940{
 941	/* Only we change this so SMP safe */
 942	return from_kuid_munged(current_user_ns(), current_euid());
 943}
 944
 945SYSCALL_DEFINE0(getgid)
 946{
 947	/* Only we change this so SMP safe */
 948	return from_kgid_munged(current_user_ns(), current_gid());
 949}
 950
 951SYSCALL_DEFINE0(getegid)
 952{
 953	/* Only we change this so SMP safe */
 954	return from_kgid_munged(current_user_ns(), current_egid());
 955}
 956
 957static void do_sys_times(struct tms *tms)
 958{
 959	u64 tgutime, tgstime, cutime, cstime;
 960
 961	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
 962	cutime = current->signal->cutime;
 963	cstime = current->signal->cstime;
 964	tms->tms_utime = nsec_to_clock_t(tgutime);
 965	tms->tms_stime = nsec_to_clock_t(tgstime);
 966	tms->tms_cutime = nsec_to_clock_t(cutime);
 967	tms->tms_cstime = nsec_to_clock_t(cstime);
 968}
 969
 970SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
 971{
 972	if (tbuf) {
 973		struct tms tmp;
 974
 975		do_sys_times(&tmp);
 976		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
 977			return -EFAULT;
 978	}
 979	force_successful_syscall_return();
 980	return (long) jiffies_64_to_clock_t(get_jiffies_64());
 981}
 982
 983#ifdef CONFIG_COMPAT
 984static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
 985{
 986	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
 987}
 988
 989COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
 990{
 991	if (tbuf) {
 992		struct tms tms;
 993		struct compat_tms tmp;
 994
 995		do_sys_times(&tms);
 996		/* Convert our struct tms to the compat version. */
 997		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
 998		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
 999		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1000		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1001		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1002			return -EFAULT;
1003	}
1004	force_successful_syscall_return();
1005	return compat_jiffies_to_clock_t(jiffies);
1006}
1007#endif
1008
1009/*
1010 * This needs some heavy checking ...
1011 * I just haven't the stomach for it. I also don't fully
1012 * understand sessions/pgrp etc. Let somebody who does explain it.
1013 *
1014 * OK, I think I have the protection semantics right.... this is really
1015 * only important on a multi-user system anyway, to make sure one user
1016 * can't send a signal to a process owned by another.  -TYT, 12/12/91
1017 *
1018 * !PF_FORKNOEXEC check to conform completely to POSIX.
1019 */
1020SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1021{
1022	struct task_struct *p;
1023	struct task_struct *group_leader = current->group_leader;
1024	struct pid *pgrp;
1025	int err;
1026
1027	if (!pid)
1028		pid = task_pid_vnr(group_leader);
1029	if (!pgid)
1030		pgid = pid;
1031	if (pgid < 0)
1032		return -EINVAL;
1033	rcu_read_lock();
1034
1035	/* From this point forward we keep holding onto the tasklist lock
1036	 * so that our parent does not change from under us. -DaveM
1037	 */
1038	write_lock_irq(&tasklist_lock);
1039
1040	err = -ESRCH;
1041	p = find_task_by_vpid(pid);
1042	if (!p)
1043		goto out;
1044
1045	err = -EINVAL;
1046	if (!thread_group_leader(p))
1047		goto out;
1048
1049	if (same_thread_group(p->real_parent, group_leader)) {
1050		err = -EPERM;
1051		if (task_session(p) != task_session(group_leader))
1052			goto out;
1053		err = -EACCES;
1054		if (!(p->flags & PF_FORKNOEXEC))
1055			goto out;
1056	} else {
1057		err = -ESRCH;
1058		if (p != group_leader)
1059			goto out;
1060	}
1061
1062	err = -EPERM;
1063	if (p->signal->leader)
1064		goto out;
1065
1066	pgrp = task_pid(p);
1067	if (pgid != pid) {
1068		struct task_struct *g;
1069
1070		pgrp = find_vpid(pgid);
1071		g = pid_task(pgrp, PIDTYPE_PGID);
1072		if (!g || task_session(g) != task_session(group_leader))
1073			goto out;
1074	}
1075
1076	err = security_task_setpgid(p, pgid);
1077	if (err)
1078		goto out;
1079
1080	if (task_pgrp(p) != pgrp)
1081		change_pid(p, PIDTYPE_PGID, pgrp);
1082
1083	err = 0;
1084out:
1085	/* All paths lead to here, thus we are safe. -DaveM */
1086	write_unlock_irq(&tasklist_lock);
1087	rcu_read_unlock();
1088	return err;
1089}
1090
1091static int do_getpgid(pid_t pid)
1092{
1093	struct task_struct *p;
1094	struct pid *grp;
1095	int retval;
1096
1097	rcu_read_lock();
1098	if (!pid)
1099		grp = task_pgrp(current);
1100	else {
1101		retval = -ESRCH;
1102		p = find_task_by_vpid(pid);
1103		if (!p)
1104			goto out;
1105		grp = task_pgrp(p);
1106		if (!grp)
1107			goto out;
1108
1109		retval = security_task_getpgid(p);
1110		if (retval)
1111			goto out;
1112	}
1113	retval = pid_vnr(grp);
1114out:
1115	rcu_read_unlock();
1116	return retval;
1117}
1118
1119SYSCALL_DEFINE1(getpgid, pid_t, pid)
1120{
1121	return do_getpgid(pid);
1122}
1123
1124#ifdef __ARCH_WANT_SYS_GETPGRP
1125
1126SYSCALL_DEFINE0(getpgrp)
1127{
1128	return do_getpgid(0);
1129}
1130
1131#endif
1132
1133SYSCALL_DEFINE1(getsid, pid_t, pid)
1134{
1135	struct task_struct *p;
1136	struct pid *sid;
1137	int retval;
1138
1139	rcu_read_lock();
1140	if (!pid)
1141		sid = task_session(current);
1142	else {
1143		retval = -ESRCH;
1144		p = find_task_by_vpid(pid);
1145		if (!p)
1146			goto out;
1147		sid = task_session(p);
1148		if (!sid)
1149			goto out;
1150
1151		retval = security_task_getsid(p);
1152		if (retval)
1153			goto out;
1154	}
1155	retval = pid_vnr(sid);
1156out:
1157	rcu_read_unlock();
1158	return retval;
1159}
1160
1161static void set_special_pids(struct pid *pid)
1162{
1163	struct task_struct *curr = current->group_leader;
1164
1165	if (task_session(curr) != pid)
1166		change_pid(curr, PIDTYPE_SID, pid);
1167
1168	if (task_pgrp(curr) != pid)
1169		change_pid(curr, PIDTYPE_PGID, pid);
1170}
1171
1172int ksys_setsid(void)
1173{
1174	struct task_struct *group_leader = current->group_leader;
1175	struct pid *sid = task_pid(group_leader);
1176	pid_t session = pid_vnr(sid);
1177	int err = -EPERM;
1178
1179	write_lock_irq(&tasklist_lock);
1180	/* Fail if I am already a session leader */
1181	if (group_leader->signal->leader)
1182		goto out;
1183
1184	/* Fail if a process group id already exists that equals the
1185	 * proposed session id.
1186	 */
1187	if (pid_task(sid, PIDTYPE_PGID))
1188		goto out;
1189
1190	group_leader->signal->leader = 1;
1191	set_special_pids(sid);
1192
1193	proc_clear_tty(group_leader);
1194
1195	err = session;
1196out:
1197	write_unlock_irq(&tasklist_lock);
1198	if (err > 0) {
1199		proc_sid_connector(group_leader);
1200		sched_autogroup_create_attach(group_leader);
1201	}
1202	return err;
1203}
1204
1205SYSCALL_DEFINE0(setsid)
1206{
1207	return ksys_setsid();
1208}
1209
1210DECLARE_RWSEM(uts_sem);
1211
1212#ifdef COMPAT_UTS_MACHINE
1213#define override_architecture(name) \
1214	(personality(current->personality) == PER_LINUX32 && \
1215	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1216		      sizeof(COMPAT_UTS_MACHINE)))
1217#else
1218#define override_architecture(name)	0
1219#endif
1220
1221/*
1222 * Work around broken programs that cannot handle "Linux 3.0".
1223 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1224 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1225 * 2.6.60.
1226 */
1227static int override_release(char __user *release, size_t len)
1228{
1229	int ret = 0;
1230
1231	if (current->personality & UNAME26) {
1232		const char *rest = UTS_RELEASE;
1233		char buf[65] = { 0 };
1234		int ndots = 0;
1235		unsigned v;
1236		size_t copy;
1237
1238		while (*rest) {
1239			if (*rest == '.' && ++ndots >= 3)
1240				break;
1241			if (!isdigit(*rest) && *rest != '.')
1242				break;
1243			rest++;
1244		}
1245		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1246		copy = clamp_t(size_t, len, 1, sizeof(buf));
1247		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1248		ret = copy_to_user(release, buf, copy + 1);
1249	}
1250	return ret;
1251}
1252
1253SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1254{
1255	struct new_utsname tmp;
1256
1257	down_read(&uts_sem);
1258	memcpy(&tmp, utsname(), sizeof(tmp));
 
1259	up_read(&uts_sem);
1260	if (copy_to_user(name, &tmp, sizeof(tmp)))
1261		return -EFAULT;
1262
1263	if (override_release(name->release, sizeof(name->release)))
1264		return -EFAULT;
1265	if (override_architecture(name))
1266		return -EFAULT;
1267	return 0;
1268}
1269
1270#ifdef __ARCH_WANT_SYS_OLD_UNAME
1271/*
1272 * Old cruft
1273 */
1274SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1275{
1276	struct old_utsname tmp;
1277
1278	if (!name)
1279		return -EFAULT;
1280
1281	down_read(&uts_sem);
1282	memcpy(&tmp, utsname(), sizeof(tmp));
 
1283	up_read(&uts_sem);
1284	if (copy_to_user(name, &tmp, sizeof(tmp)))
1285		return -EFAULT;
1286
1287	if (override_release(name->release, sizeof(name->release)))
1288		return -EFAULT;
1289	if (override_architecture(name))
1290		return -EFAULT;
1291	return 0;
1292}
1293
1294SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1295{
1296	struct oldold_utsname tmp;
1297
1298	if (!name)
1299		return -EFAULT;
1300
1301	memset(&tmp, 0, sizeof(tmp));
1302
1303	down_read(&uts_sem);
1304	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1305	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1306	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1307	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1308	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
 
 
 
 
 
 
 
 
 
 
1309	up_read(&uts_sem);
1310	if (copy_to_user(name, &tmp, sizeof(tmp)))
1311		return -EFAULT;
1312
1313	if (override_architecture(name))
1314		return -EFAULT;
1315	if (override_release(name->release, sizeof(name->release)))
1316		return -EFAULT;
1317	return 0;
1318}
1319#endif
1320
1321SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1322{
1323	int errno;
1324	char tmp[__NEW_UTS_LEN];
1325
1326	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1327		return -EPERM;
1328
1329	if (len < 0 || len > __NEW_UTS_LEN)
1330		return -EINVAL;
 
1331	errno = -EFAULT;
1332	if (!copy_from_user(tmp, name, len)) {
1333		struct new_utsname *u;
1334
1335		down_write(&uts_sem);
1336		u = utsname();
1337		memcpy(u->nodename, tmp, len);
1338		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1339		errno = 0;
1340		uts_proc_notify(UTS_PROC_HOSTNAME);
1341		up_write(&uts_sem);
1342	}
 
1343	return errno;
1344}
1345
1346#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1347
1348SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1349{
1350	int i;
1351	struct new_utsname *u;
1352	char tmp[__NEW_UTS_LEN + 1];
1353
1354	if (len < 0)
1355		return -EINVAL;
1356	down_read(&uts_sem);
1357	u = utsname();
1358	i = 1 + strlen(u->nodename);
1359	if (i > len)
1360		i = len;
1361	memcpy(tmp, u->nodename, i);
 
 
1362	up_read(&uts_sem);
1363	if (copy_to_user(name, tmp, i))
1364		return -EFAULT;
1365	return 0;
1366}
1367
1368#endif
1369
1370/*
1371 * Only setdomainname; getdomainname can be implemented by calling
1372 * uname()
1373 */
1374SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1375{
1376	int errno;
1377	char tmp[__NEW_UTS_LEN];
1378
1379	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1380		return -EPERM;
1381	if (len < 0 || len > __NEW_UTS_LEN)
1382		return -EINVAL;
1383
 
1384	errno = -EFAULT;
1385	if (!copy_from_user(tmp, name, len)) {
1386		struct new_utsname *u;
1387
1388		down_write(&uts_sem);
1389		u = utsname();
1390		memcpy(u->domainname, tmp, len);
1391		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1392		errno = 0;
1393		uts_proc_notify(UTS_PROC_DOMAINNAME);
1394		up_write(&uts_sem);
1395	}
 
1396	return errno;
1397}
1398
1399SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1400{
1401	struct rlimit value;
1402	int ret;
1403
1404	ret = do_prlimit(current, resource, NULL, &value);
1405	if (!ret)
1406		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1407
1408	return ret;
1409}
1410
1411#ifdef CONFIG_COMPAT
1412
1413COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1414		       struct compat_rlimit __user *, rlim)
1415{
1416	struct rlimit r;
1417	struct compat_rlimit r32;
1418
1419	if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1420		return -EFAULT;
1421
1422	if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1423		r.rlim_cur = RLIM_INFINITY;
1424	else
1425		r.rlim_cur = r32.rlim_cur;
1426	if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1427		r.rlim_max = RLIM_INFINITY;
1428	else
1429		r.rlim_max = r32.rlim_max;
1430	return do_prlimit(current, resource, &r, NULL);
1431}
1432
1433COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1434		       struct compat_rlimit __user *, rlim)
1435{
1436	struct rlimit r;
1437	int ret;
1438
1439	ret = do_prlimit(current, resource, NULL, &r);
1440	if (!ret) {
1441		struct compat_rlimit r32;
1442		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1443			r32.rlim_cur = COMPAT_RLIM_INFINITY;
1444		else
1445			r32.rlim_cur = r.rlim_cur;
1446		if (r.rlim_max > COMPAT_RLIM_INFINITY)
1447			r32.rlim_max = COMPAT_RLIM_INFINITY;
1448		else
1449			r32.rlim_max = r.rlim_max;
1450
1451		if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1452			return -EFAULT;
1453	}
1454	return ret;
1455}
1456
1457#endif
1458
1459#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1460
1461/*
1462 *	Back compatibility for getrlimit. Needed for some apps.
1463 */
1464SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1465		struct rlimit __user *, rlim)
1466{
1467	struct rlimit x;
1468	if (resource >= RLIM_NLIMITS)
1469		return -EINVAL;
1470
1471	resource = array_index_nospec(resource, RLIM_NLIMITS);
1472	task_lock(current->group_leader);
1473	x = current->signal->rlim[resource];
1474	task_unlock(current->group_leader);
1475	if (x.rlim_cur > 0x7FFFFFFF)
1476		x.rlim_cur = 0x7FFFFFFF;
1477	if (x.rlim_max > 0x7FFFFFFF)
1478		x.rlim_max = 0x7FFFFFFF;
1479	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1480}
1481
1482#ifdef CONFIG_COMPAT
1483COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1484		       struct compat_rlimit __user *, rlim)
1485{
1486	struct rlimit r;
1487
1488	if (resource >= RLIM_NLIMITS)
1489		return -EINVAL;
1490
1491	resource = array_index_nospec(resource, RLIM_NLIMITS);
1492	task_lock(current->group_leader);
1493	r = current->signal->rlim[resource];
1494	task_unlock(current->group_leader);
1495	if (r.rlim_cur > 0x7FFFFFFF)
1496		r.rlim_cur = 0x7FFFFFFF;
1497	if (r.rlim_max > 0x7FFFFFFF)
1498		r.rlim_max = 0x7FFFFFFF;
1499
1500	if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1501	    put_user(r.rlim_max, &rlim->rlim_max))
1502		return -EFAULT;
1503	return 0;
1504}
1505#endif
1506
1507#endif
1508
1509static inline bool rlim64_is_infinity(__u64 rlim64)
1510{
1511#if BITS_PER_LONG < 64
1512	return rlim64 >= ULONG_MAX;
1513#else
1514	return rlim64 == RLIM64_INFINITY;
1515#endif
1516}
1517
1518static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1519{
1520	if (rlim->rlim_cur == RLIM_INFINITY)
1521		rlim64->rlim_cur = RLIM64_INFINITY;
1522	else
1523		rlim64->rlim_cur = rlim->rlim_cur;
1524	if (rlim->rlim_max == RLIM_INFINITY)
1525		rlim64->rlim_max = RLIM64_INFINITY;
1526	else
1527		rlim64->rlim_max = rlim->rlim_max;
1528}
1529
1530static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1531{
1532	if (rlim64_is_infinity(rlim64->rlim_cur))
1533		rlim->rlim_cur = RLIM_INFINITY;
1534	else
1535		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1536	if (rlim64_is_infinity(rlim64->rlim_max))
1537		rlim->rlim_max = RLIM_INFINITY;
1538	else
1539		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1540}
1541
1542/* make sure you are allowed to change @tsk limits before calling this */
1543int do_prlimit(struct task_struct *tsk, unsigned int resource,
1544		struct rlimit *new_rlim, struct rlimit *old_rlim)
1545{
1546	struct rlimit *rlim;
1547	int retval = 0;
1548
1549	if (resource >= RLIM_NLIMITS)
1550		return -EINVAL;
1551	if (new_rlim) {
1552		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1553			return -EINVAL;
1554		if (resource == RLIMIT_NOFILE &&
1555				new_rlim->rlim_max > sysctl_nr_open)
1556			return -EPERM;
1557	}
1558
1559	/* protect tsk->signal and tsk->sighand from disappearing */
1560	read_lock(&tasklist_lock);
1561	if (!tsk->sighand) {
1562		retval = -ESRCH;
1563		goto out;
1564	}
1565
1566	rlim = tsk->signal->rlim + resource;
1567	task_lock(tsk->group_leader);
1568	if (new_rlim) {
1569		/* Keep the capable check against init_user_ns until
1570		   cgroups can contain all limits */
1571		if (new_rlim->rlim_max > rlim->rlim_max &&
1572				!capable(CAP_SYS_RESOURCE))
1573			retval = -EPERM;
1574		if (!retval)
1575			retval = security_task_setrlimit(tsk, resource, new_rlim);
 
 
 
 
 
 
 
 
 
 
1576	}
1577	if (!retval) {
1578		if (old_rlim)
1579			*old_rlim = *rlim;
1580		if (new_rlim)
1581			*rlim = *new_rlim;
1582	}
1583	task_unlock(tsk->group_leader);
1584
1585	/*
1586	 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1587	 * infite. In case of RLIM_INFINITY the posix CPU timer code
1588	 * ignores the rlimit.
 
1589	 */
1590	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1591	     new_rlim->rlim_cur != RLIM_INFINITY &&
1592	     IS_ENABLED(CONFIG_POSIX_TIMERS))
1593		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1594out:
1595	read_unlock(&tasklist_lock);
1596	return retval;
1597}
1598
1599/* rcu lock must be held */
1600static int check_prlimit_permission(struct task_struct *task,
1601				    unsigned int flags)
1602{
1603	const struct cred *cred = current_cred(), *tcred;
1604	bool id_match;
1605
1606	if (current == task)
1607		return 0;
1608
1609	tcred = __task_cred(task);
1610	id_match = (uid_eq(cred->uid, tcred->euid) &&
1611		    uid_eq(cred->uid, tcred->suid) &&
1612		    uid_eq(cred->uid, tcred->uid)  &&
1613		    gid_eq(cred->gid, tcred->egid) &&
1614		    gid_eq(cred->gid, tcred->sgid) &&
1615		    gid_eq(cred->gid, tcred->gid));
1616	if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1617		return -EPERM;
 
1618
1619	return security_task_prlimit(cred, tcred, flags);
1620}
1621
1622SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1623		const struct rlimit64 __user *, new_rlim,
1624		struct rlimit64 __user *, old_rlim)
1625{
1626	struct rlimit64 old64, new64;
1627	struct rlimit old, new;
1628	struct task_struct *tsk;
1629	unsigned int checkflags = 0;
1630	int ret;
1631
1632	if (old_rlim)
1633		checkflags |= LSM_PRLIMIT_READ;
1634
1635	if (new_rlim) {
1636		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1637			return -EFAULT;
1638		rlim64_to_rlim(&new64, &new);
1639		checkflags |= LSM_PRLIMIT_WRITE;
1640	}
1641
1642	rcu_read_lock();
1643	tsk = pid ? find_task_by_vpid(pid) : current;
1644	if (!tsk) {
1645		rcu_read_unlock();
1646		return -ESRCH;
1647	}
1648	ret = check_prlimit_permission(tsk, checkflags);
1649	if (ret) {
1650		rcu_read_unlock();
1651		return ret;
1652	}
1653	get_task_struct(tsk);
1654	rcu_read_unlock();
1655
1656	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1657			old_rlim ? &old : NULL);
1658
1659	if (!ret && old_rlim) {
1660		rlim_to_rlim64(&old, &old64);
1661		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1662			ret = -EFAULT;
1663	}
1664
1665	put_task_struct(tsk);
1666	return ret;
1667}
1668
1669SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1670{
1671	struct rlimit new_rlim;
1672
1673	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1674		return -EFAULT;
1675	return do_prlimit(current, resource, &new_rlim, NULL);
1676}
1677
1678/*
1679 * It would make sense to put struct rusage in the task_struct,
1680 * except that would make the task_struct be *really big*.  After
1681 * task_struct gets moved into malloc'ed memory, it would
1682 * make sense to do this.  It will make moving the rest of the information
1683 * a lot simpler!  (Which we're not doing right now because we're not
1684 * measuring them yet).
1685 *
1686 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1687 * races with threads incrementing their own counters.  But since word
1688 * reads are atomic, we either get new values or old values and we don't
1689 * care which for the sums.  We always take the siglock to protect reading
1690 * the c* fields from p->signal from races with exit.c updating those
1691 * fields when reaping, so a sample either gets all the additions of a
1692 * given child after it's reaped, or none so this sample is before reaping.
1693 *
1694 * Locking:
1695 * We need to take the siglock for CHILDEREN, SELF and BOTH
1696 * for  the cases current multithreaded, non-current single threaded
1697 * non-current multithreaded.  Thread traversal is now safe with
1698 * the siglock held.
1699 * Strictly speaking, we donot need to take the siglock if we are current and
1700 * single threaded,  as no one else can take our signal_struct away, no one
1701 * else can  reap the  children to update signal->c* counters, and no one else
1702 * can race with the signal-> fields. If we do not take any lock, the
1703 * signal-> fields could be read out of order while another thread was just
1704 * exiting. So we should  place a read memory barrier when we avoid the lock.
1705 * On the writer side,  write memory barrier is implied in  __exit_signal
1706 * as __exit_signal releases  the siglock spinlock after updating the signal->
1707 * fields. But we don't do this yet to keep things simple.
1708 *
1709 */
1710
1711static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1712{
1713	r->ru_nvcsw += t->nvcsw;
1714	r->ru_nivcsw += t->nivcsw;
1715	r->ru_minflt += t->min_flt;
1716	r->ru_majflt += t->maj_flt;
1717	r->ru_inblock += task_io_get_inblock(t);
1718	r->ru_oublock += task_io_get_oublock(t);
1719}
1720
1721void getrusage(struct task_struct *p, int who, struct rusage *r)
1722{
1723	struct task_struct *t;
1724	unsigned long flags;
1725	u64 tgutime, tgstime, utime, stime;
1726	unsigned long maxrss = 0;
1727
1728	memset((char *)r, 0, sizeof (*r));
1729	utime = stime = 0;
1730
1731	if (who == RUSAGE_THREAD) {
1732		task_cputime_adjusted(current, &utime, &stime);
1733		accumulate_thread_rusage(p, r);
1734		maxrss = p->signal->maxrss;
1735		goto out;
1736	}
1737
1738	if (!lock_task_sighand(p, &flags))
1739		return;
1740
1741	switch (who) {
1742	case RUSAGE_BOTH:
1743	case RUSAGE_CHILDREN:
1744		utime = p->signal->cutime;
1745		stime = p->signal->cstime;
1746		r->ru_nvcsw = p->signal->cnvcsw;
1747		r->ru_nivcsw = p->signal->cnivcsw;
1748		r->ru_minflt = p->signal->cmin_flt;
1749		r->ru_majflt = p->signal->cmaj_flt;
1750		r->ru_inblock = p->signal->cinblock;
1751		r->ru_oublock = p->signal->coublock;
1752		maxrss = p->signal->cmaxrss;
1753
1754		if (who == RUSAGE_CHILDREN)
1755			break;
1756		fallthrough;
1757
1758	case RUSAGE_SELF:
1759		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1760		utime += tgutime;
1761		stime += tgstime;
1762		r->ru_nvcsw += p->signal->nvcsw;
1763		r->ru_nivcsw += p->signal->nivcsw;
1764		r->ru_minflt += p->signal->min_flt;
1765		r->ru_majflt += p->signal->maj_flt;
1766		r->ru_inblock += p->signal->inblock;
1767		r->ru_oublock += p->signal->oublock;
1768		if (maxrss < p->signal->maxrss)
1769			maxrss = p->signal->maxrss;
1770		t = p;
1771		do {
1772			accumulate_thread_rusage(t, r);
1773		} while_each_thread(p, t);
1774		break;
1775
1776	default:
1777		BUG();
1778	}
1779	unlock_task_sighand(p, &flags);
1780
1781out:
1782	r->ru_utime = ns_to_kernel_old_timeval(utime);
1783	r->ru_stime = ns_to_kernel_old_timeval(stime);
1784
1785	if (who != RUSAGE_CHILDREN) {
1786		struct mm_struct *mm = get_task_mm(p);
1787
1788		if (mm) {
1789			setmax_mm_hiwater_rss(&maxrss, mm);
1790			mmput(mm);
1791		}
1792	}
1793	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1794}
1795
1796SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1797{
1798	struct rusage r;
1799
 
 
 
 
 
 
1800	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1801	    who != RUSAGE_THREAD)
1802		return -EINVAL;
1803
1804	getrusage(current, who, &r);
1805	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1806}
1807
1808#ifdef CONFIG_COMPAT
1809COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1810{
1811	struct rusage r;
1812
1813	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1814	    who != RUSAGE_THREAD)
1815		return -EINVAL;
1816
1817	getrusage(current, who, &r);
1818	return put_compat_rusage(&r, ru);
1819}
1820#endif
1821
1822SYSCALL_DEFINE1(umask, int, mask)
1823{
1824	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1825	return mask;
1826}
1827
1828static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1829{
1830	struct fd exe;
1831	struct file *old_exe, *exe_file;
1832	struct inode *inode;
1833	int err;
1834
1835	exe = fdget(fd);
1836	if (!exe.file)
1837		return -EBADF;
1838
1839	inode = file_inode(exe.file);
1840
1841	/*
1842	 * Because the original mm->exe_file points to executable file, make
1843	 * sure that this one is executable as well, to avoid breaking an
1844	 * overall picture.
1845	 */
1846	err = -EACCES;
1847	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1848		goto exit;
1849
1850	err = inode_permission(inode, MAY_EXEC);
1851	if (err)
1852		goto exit;
1853
1854	/*
1855	 * Forbid mm->exe_file change if old file still mapped.
1856	 */
1857	exe_file = get_mm_exe_file(mm);
1858	err = -EBUSY;
1859	if (exe_file) {
1860		struct vm_area_struct *vma;
1861
1862		mmap_read_lock(mm);
1863		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1864			if (!vma->vm_file)
1865				continue;
1866			if (path_equal(&vma->vm_file->f_path,
1867				       &exe_file->f_path))
1868				goto exit_err;
1869		}
1870
1871		mmap_read_unlock(mm);
1872		fput(exe_file);
1873	}
1874
1875	err = 0;
1876	/* set the new file, lockless */
1877	get_file(exe.file);
1878	old_exe = xchg(&mm->exe_file, exe.file);
1879	if (old_exe)
1880		fput(old_exe);
1881exit:
1882	fdput(exe);
1883	return err;
1884exit_err:
1885	mmap_read_unlock(mm);
1886	fput(exe_file);
1887	goto exit;
1888}
1889
1890/*
1891 * Check arithmetic relations of passed addresses.
1892 *
1893 * WARNING: we don't require any capability here so be very careful
1894 * in what is allowed for modification from userspace.
1895 */
1896static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1897{
1898	unsigned long mmap_max_addr = TASK_SIZE;
 
1899	int error = -EINVAL, i;
1900
1901	static const unsigned char offsets[] = {
1902		offsetof(struct prctl_mm_map, start_code),
1903		offsetof(struct prctl_mm_map, end_code),
1904		offsetof(struct prctl_mm_map, start_data),
1905		offsetof(struct prctl_mm_map, end_data),
1906		offsetof(struct prctl_mm_map, start_brk),
1907		offsetof(struct prctl_mm_map, brk),
1908		offsetof(struct prctl_mm_map, start_stack),
1909		offsetof(struct prctl_mm_map, arg_start),
1910		offsetof(struct prctl_mm_map, arg_end),
1911		offsetof(struct prctl_mm_map, env_start),
1912		offsetof(struct prctl_mm_map, env_end),
1913	};
1914
1915	/*
1916	 * Make sure the members are not somewhere outside
1917	 * of allowed address space.
1918	 */
1919	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1920		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1921
1922		if ((unsigned long)val >= mmap_max_addr ||
1923		    (unsigned long)val < mmap_min_addr)
1924			goto out;
1925	}
1926
1927	/*
1928	 * Make sure the pairs are ordered.
1929	 */
1930#define __prctl_check_order(__m1, __op, __m2)				\
1931	((unsigned long)prctl_map->__m1 __op				\
1932	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1933	error  = __prctl_check_order(start_code, <, end_code);
1934	error |= __prctl_check_order(start_data,<=, end_data);
1935	error |= __prctl_check_order(start_brk, <=, brk);
1936	error |= __prctl_check_order(arg_start, <=, arg_end);
1937	error |= __prctl_check_order(env_start, <=, env_end);
1938	if (error)
1939		goto out;
1940#undef __prctl_check_order
1941
1942	error = -EINVAL;
1943
1944	/*
1945	 * @brk should be after @end_data in traditional maps.
1946	 */
1947	if (prctl_map->start_brk <= prctl_map->end_data ||
1948	    prctl_map->brk <= prctl_map->end_data)
1949		goto out;
1950
1951	/*
1952	 * Neither we should allow to override limits if they set.
1953	 */
1954	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1955			      prctl_map->start_brk, prctl_map->end_data,
1956			      prctl_map->start_data))
1957			goto out;
1958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1959	error = 0;
1960out:
1961	return error;
1962}
1963
1964#ifdef CONFIG_CHECKPOINT_RESTORE
1965static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1966{
1967	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1968	unsigned long user_auxv[AT_VECTOR_SIZE];
1969	struct mm_struct *mm = current->mm;
1970	int error;
1971
1972	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1973	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1974
1975	if (opt == PR_SET_MM_MAP_SIZE)
1976		return put_user((unsigned int)sizeof(prctl_map),
1977				(unsigned int __user *)addr);
1978
1979	if (data_size != sizeof(prctl_map))
1980		return -EINVAL;
1981
1982	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1983		return -EFAULT;
1984
1985	error = validate_prctl_map_addr(&prctl_map);
1986	if (error)
1987		return error;
1988
1989	if (prctl_map.auxv_size) {
1990		/*
1991		 * Someone is trying to cheat the auxv vector.
1992		 */
1993		if (!prctl_map.auxv ||
1994				prctl_map.auxv_size > sizeof(mm->saved_auxv))
1995			return -EINVAL;
1996
1997		memset(user_auxv, 0, sizeof(user_auxv));
1998		if (copy_from_user(user_auxv,
1999				   (const void __user *)prctl_map.auxv,
2000				   prctl_map.auxv_size))
2001			return -EFAULT;
2002
2003		/* Last entry must be AT_NULL as specification requires */
2004		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2005		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2006	}
2007
2008	if (prctl_map.exe_fd != (u32)-1) {
2009		/*
2010		 * Check if the current user is checkpoint/restore capable.
2011		 * At the time of this writing, it checks for CAP_SYS_ADMIN
2012		 * or CAP_CHECKPOINT_RESTORE.
2013		 * Note that a user with access to ptrace can masquerade an
2014		 * arbitrary program as any executable, even setuid ones.
2015		 * This may have implications in the tomoyo subsystem.
2016		 */
2017		if (!checkpoint_restore_ns_capable(current_user_ns()))
2018			return -EPERM;
2019
2020		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2021		if (error)
2022			return error;
2023	}
2024
2025	/*
2026	 * arg_lock protects concurent updates but we still need mmap_lock for
2027	 * read to exclude races with sys_brk.
2028	 */
2029	mmap_read_lock(mm);
2030
2031	/*
2032	 * We don't validate if these members are pointing to
2033	 * real present VMAs because application may have correspond
2034	 * VMAs already unmapped and kernel uses these members for statistics
2035	 * output in procfs mostly, except
2036	 *
2037	 *  - @start_brk/@brk which are used in do_brk but kernel lookups
2038	 *    for VMAs when updating these memvers so anything wrong written
2039	 *    here cause kernel to swear at userspace program but won't lead
2040	 *    to any problem in kernel itself
2041	 */
2042
2043	spin_lock(&mm->arg_lock);
2044	mm->start_code	= prctl_map.start_code;
2045	mm->end_code	= prctl_map.end_code;
2046	mm->start_data	= prctl_map.start_data;
2047	mm->end_data	= prctl_map.end_data;
2048	mm->start_brk	= prctl_map.start_brk;
2049	mm->brk		= prctl_map.brk;
2050	mm->start_stack	= prctl_map.start_stack;
2051	mm->arg_start	= prctl_map.arg_start;
2052	mm->arg_end	= prctl_map.arg_end;
2053	mm->env_start	= prctl_map.env_start;
2054	mm->env_end	= prctl_map.env_end;
2055	spin_unlock(&mm->arg_lock);
2056
2057	/*
2058	 * Note this update of @saved_auxv is lockless thus
2059	 * if someone reads this member in procfs while we're
2060	 * updating -- it may get partly updated results. It's
2061	 * known and acceptable trade off: we leave it as is to
2062	 * not introduce additional locks here making the kernel
2063	 * more complex.
2064	 */
2065	if (prctl_map.auxv_size)
2066		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2067
2068	mmap_read_unlock(mm);
2069	return 0;
2070}
2071#endif /* CONFIG_CHECKPOINT_RESTORE */
2072
2073static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2074			  unsigned long len)
2075{
2076	/*
2077	 * This doesn't move the auxiliary vector itself since it's pinned to
2078	 * mm_struct, but it permits filling the vector with new values.  It's
2079	 * up to the caller to provide sane values here, otherwise userspace
2080	 * tools which use this vector might be unhappy.
2081	 */
2082	unsigned long user_auxv[AT_VECTOR_SIZE];
2083
2084	if (len > sizeof(user_auxv))
2085		return -EINVAL;
2086
2087	if (copy_from_user(user_auxv, (const void __user *)addr, len))
2088		return -EFAULT;
2089
2090	/* Make sure the last entry is always AT_NULL */
2091	user_auxv[AT_VECTOR_SIZE - 2] = 0;
2092	user_auxv[AT_VECTOR_SIZE - 1] = 0;
2093
2094	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2095
2096	task_lock(current);
2097	memcpy(mm->saved_auxv, user_auxv, len);
2098	task_unlock(current);
2099
2100	return 0;
2101}
2102
2103static int prctl_set_mm(int opt, unsigned long addr,
2104			unsigned long arg4, unsigned long arg5)
2105{
2106	struct mm_struct *mm = current->mm;
2107	struct prctl_mm_map prctl_map = {
2108		.auxv = NULL,
2109		.auxv_size = 0,
2110		.exe_fd = -1,
2111	};
2112	struct vm_area_struct *vma;
2113	int error;
2114
2115	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2116			      opt != PR_SET_MM_MAP &&
2117			      opt != PR_SET_MM_MAP_SIZE)))
2118		return -EINVAL;
2119
2120#ifdef CONFIG_CHECKPOINT_RESTORE
2121	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2122		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2123#endif
2124
2125	if (!capable(CAP_SYS_RESOURCE))
2126		return -EPERM;
2127
2128	if (opt == PR_SET_MM_EXE_FILE)
2129		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2130
2131	if (opt == PR_SET_MM_AUXV)
2132		return prctl_set_auxv(mm, addr, arg4);
2133
2134	if (addr >= TASK_SIZE || addr < mmap_min_addr)
2135		return -EINVAL;
2136
2137	error = -EINVAL;
2138
2139	/*
2140	 * arg_lock protects concurent updates of arg boundaries, we need
2141	 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2142	 * validation.
2143	 */
2144	mmap_read_lock(mm);
2145	vma = find_vma(mm, addr);
2146
2147	spin_lock(&mm->arg_lock);
2148	prctl_map.start_code	= mm->start_code;
2149	prctl_map.end_code	= mm->end_code;
2150	prctl_map.start_data	= mm->start_data;
2151	prctl_map.end_data	= mm->end_data;
2152	prctl_map.start_brk	= mm->start_brk;
2153	prctl_map.brk		= mm->brk;
2154	prctl_map.start_stack	= mm->start_stack;
2155	prctl_map.arg_start	= mm->arg_start;
2156	prctl_map.arg_end	= mm->arg_end;
2157	prctl_map.env_start	= mm->env_start;
2158	prctl_map.env_end	= mm->env_end;
 
 
 
2159
2160	switch (opt) {
2161	case PR_SET_MM_START_CODE:
2162		prctl_map.start_code = addr;
2163		break;
2164	case PR_SET_MM_END_CODE:
2165		prctl_map.end_code = addr;
2166		break;
2167	case PR_SET_MM_START_DATA:
2168		prctl_map.start_data = addr;
2169		break;
2170	case PR_SET_MM_END_DATA:
2171		prctl_map.end_data = addr;
2172		break;
2173	case PR_SET_MM_START_STACK:
2174		prctl_map.start_stack = addr;
2175		break;
2176	case PR_SET_MM_START_BRK:
2177		prctl_map.start_brk = addr;
2178		break;
2179	case PR_SET_MM_BRK:
2180		prctl_map.brk = addr;
2181		break;
2182	case PR_SET_MM_ARG_START:
2183		prctl_map.arg_start = addr;
2184		break;
2185	case PR_SET_MM_ARG_END:
2186		prctl_map.arg_end = addr;
2187		break;
2188	case PR_SET_MM_ENV_START:
2189		prctl_map.env_start = addr;
2190		break;
2191	case PR_SET_MM_ENV_END:
2192		prctl_map.env_end = addr;
2193		break;
2194	default:
2195		goto out;
2196	}
2197
2198	error = validate_prctl_map_addr(&prctl_map);
2199	if (error)
2200		goto out;
2201
2202	switch (opt) {
2203	/*
2204	 * If command line arguments and environment
2205	 * are placed somewhere else on stack, we can
2206	 * set them up here, ARG_START/END to setup
2207	 * command line argumets and ENV_START/END
2208	 * for environment.
2209	 */
2210	case PR_SET_MM_START_STACK:
2211	case PR_SET_MM_ARG_START:
2212	case PR_SET_MM_ARG_END:
2213	case PR_SET_MM_ENV_START:
2214	case PR_SET_MM_ENV_END:
2215		if (!vma) {
2216			error = -EFAULT;
2217			goto out;
2218		}
2219	}
2220
2221	mm->start_code	= prctl_map.start_code;
2222	mm->end_code	= prctl_map.end_code;
2223	mm->start_data	= prctl_map.start_data;
2224	mm->end_data	= prctl_map.end_data;
2225	mm->start_brk	= prctl_map.start_brk;
2226	mm->brk		= prctl_map.brk;
2227	mm->start_stack	= prctl_map.start_stack;
2228	mm->arg_start	= prctl_map.arg_start;
2229	mm->arg_end	= prctl_map.arg_end;
2230	mm->env_start	= prctl_map.env_start;
2231	mm->env_end	= prctl_map.env_end;
2232
2233	error = 0;
2234out:
2235	spin_unlock(&mm->arg_lock);
2236	mmap_read_unlock(mm);
2237	return error;
2238}
2239
2240#ifdef CONFIG_CHECKPOINT_RESTORE
2241static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2242{
2243	return put_user(me->clear_child_tid, tid_addr);
2244}
2245#else
2246static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2247{
2248	return -EINVAL;
2249}
2250#endif
2251
2252static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2253{
2254	/*
2255	 * If task has has_child_subreaper - all its decendants
2256	 * already have these flag too and new decendants will
2257	 * inherit it on fork, skip them.
2258	 *
2259	 * If we've found child_reaper - skip descendants in
2260	 * it's subtree as they will never get out pidns.
2261	 */
2262	if (p->signal->has_child_subreaper ||
2263	    is_child_reaper(task_pid(p)))
2264		return 0;
2265
2266	p->signal->has_child_subreaper = 1;
2267	return 1;
2268}
2269
2270int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2271{
2272	return -EINVAL;
2273}
2274
2275int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2276				    unsigned long ctrl)
2277{
2278	return -EINVAL;
2279}
2280
2281#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2282
2283SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2284		unsigned long, arg4, unsigned long, arg5)
2285{
2286	struct task_struct *me = current;
2287	unsigned char comm[sizeof(me->comm)];
2288	long error;
2289
2290	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2291	if (error != -ENOSYS)
2292		return error;
2293
2294	error = 0;
2295	switch (option) {
2296	case PR_SET_PDEATHSIG:
2297		if (!valid_signal(arg2)) {
2298			error = -EINVAL;
2299			break;
2300		}
2301		me->pdeath_signal = arg2;
2302		break;
2303	case PR_GET_PDEATHSIG:
2304		error = put_user(me->pdeath_signal, (int __user *)arg2);
2305		break;
2306	case PR_GET_DUMPABLE:
2307		error = get_dumpable(me->mm);
2308		break;
2309	case PR_SET_DUMPABLE:
2310		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2311			error = -EINVAL;
2312			break;
2313		}
2314		set_dumpable(me->mm, arg2);
2315		break;
2316
2317	case PR_SET_UNALIGN:
2318		error = SET_UNALIGN_CTL(me, arg2);
2319		break;
2320	case PR_GET_UNALIGN:
2321		error = GET_UNALIGN_CTL(me, arg2);
2322		break;
2323	case PR_SET_FPEMU:
2324		error = SET_FPEMU_CTL(me, arg2);
2325		break;
2326	case PR_GET_FPEMU:
2327		error = GET_FPEMU_CTL(me, arg2);
2328		break;
2329	case PR_SET_FPEXC:
2330		error = SET_FPEXC_CTL(me, arg2);
2331		break;
2332	case PR_GET_FPEXC:
2333		error = GET_FPEXC_CTL(me, arg2);
2334		break;
2335	case PR_GET_TIMING:
2336		error = PR_TIMING_STATISTICAL;
2337		break;
2338	case PR_SET_TIMING:
2339		if (arg2 != PR_TIMING_STATISTICAL)
2340			error = -EINVAL;
2341		break;
2342	case PR_SET_NAME:
2343		comm[sizeof(me->comm) - 1] = 0;
2344		if (strncpy_from_user(comm, (char __user *)arg2,
2345				      sizeof(me->comm) - 1) < 0)
2346			return -EFAULT;
2347		set_task_comm(me, comm);
2348		proc_comm_connector(me);
2349		break;
2350	case PR_GET_NAME:
2351		get_task_comm(comm, me);
2352		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2353			return -EFAULT;
2354		break;
2355	case PR_GET_ENDIAN:
2356		error = GET_ENDIAN(me, arg2);
2357		break;
2358	case PR_SET_ENDIAN:
2359		error = SET_ENDIAN(me, arg2);
2360		break;
2361	case PR_GET_SECCOMP:
2362		error = prctl_get_seccomp();
2363		break;
2364	case PR_SET_SECCOMP:
2365		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2366		break;
2367	case PR_GET_TSC:
2368		error = GET_TSC_CTL(arg2);
2369		break;
2370	case PR_SET_TSC:
2371		error = SET_TSC_CTL(arg2);
2372		break;
2373	case PR_TASK_PERF_EVENTS_DISABLE:
2374		error = perf_event_task_disable();
2375		break;
2376	case PR_TASK_PERF_EVENTS_ENABLE:
2377		error = perf_event_task_enable();
2378		break;
2379	case PR_GET_TIMERSLACK:
2380		if (current->timer_slack_ns > ULONG_MAX)
2381			error = ULONG_MAX;
2382		else
2383			error = current->timer_slack_ns;
2384		break;
2385	case PR_SET_TIMERSLACK:
2386		if (arg2 <= 0)
2387			current->timer_slack_ns =
2388					current->default_timer_slack_ns;
2389		else
2390			current->timer_slack_ns = arg2;
2391		break;
2392	case PR_MCE_KILL:
2393		if (arg4 | arg5)
2394			return -EINVAL;
2395		switch (arg2) {
2396		case PR_MCE_KILL_CLEAR:
2397			if (arg3 != 0)
2398				return -EINVAL;
2399			current->flags &= ~PF_MCE_PROCESS;
2400			break;
2401		case PR_MCE_KILL_SET:
2402			current->flags |= PF_MCE_PROCESS;
2403			if (arg3 == PR_MCE_KILL_EARLY)
2404				current->flags |= PF_MCE_EARLY;
2405			else if (arg3 == PR_MCE_KILL_LATE)
2406				current->flags &= ~PF_MCE_EARLY;
2407			else if (arg3 == PR_MCE_KILL_DEFAULT)
2408				current->flags &=
2409						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2410			else
2411				return -EINVAL;
2412			break;
2413		default:
2414			return -EINVAL;
2415		}
2416		break;
2417	case PR_MCE_KILL_GET:
2418		if (arg2 | arg3 | arg4 | arg5)
2419			return -EINVAL;
2420		if (current->flags & PF_MCE_PROCESS)
2421			error = (current->flags & PF_MCE_EARLY) ?
2422				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2423		else
2424			error = PR_MCE_KILL_DEFAULT;
2425		break;
2426	case PR_SET_MM:
2427		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2428		break;
2429	case PR_GET_TID_ADDRESS:
2430		error = prctl_get_tid_address(me, (int __user **)arg2);
2431		break;
2432	case PR_SET_CHILD_SUBREAPER:
2433		me->signal->is_child_subreaper = !!arg2;
2434		if (!arg2)
2435			break;
2436
2437		walk_process_tree(me, propagate_has_child_subreaper, NULL);
2438		break;
2439	case PR_GET_CHILD_SUBREAPER:
2440		error = put_user(me->signal->is_child_subreaper,
2441				 (int __user *)arg2);
2442		break;
2443	case PR_SET_NO_NEW_PRIVS:
2444		if (arg2 != 1 || arg3 || arg4 || arg5)
2445			return -EINVAL;
2446
2447		task_set_no_new_privs(current);
2448		break;
2449	case PR_GET_NO_NEW_PRIVS:
2450		if (arg2 || arg3 || arg4 || arg5)
2451			return -EINVAL;
2452		return task_no_new_privs(current) ? 1 : 0;
2453	case PR_GET_THP_DISABLE:
2454		if (arg2 || arg3 || arg4 || arg5)
2455			return -EINVAL;
2456		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2457		break;
2458	case PR_SET_THP_DISABLE:
2459		if (arg3 || arg4 || arg5)
2460			return -EINVAL;
2461		if (mmap_write_lock_killable(me->mm))
2462			return -EINTR;
2463		if (arg2)
2464			set_bit(MMF_DISABLE_THP, &me->mm->flags);
2465		else
2466			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2467		mmap_write_unlock(me->mm);
2468		break;
2469	case PR_MPX_ENABLE_MANAGEMENT:
 
 
 
 
2470	case PR_MPX_DISABLE_MANAGEMENT:
2471		/* No longer implemented: */
2472		return -EINVAL;
 
 
2473	case PR_SET_FP_MODE:
2474		error = SET_FP_MODE(me, arg2);
2475		break;
2476	case PR_GET_FP_MODE:
2477		error = GET_FP_MODE(me);
2478		break;
2479	case PR_SVE_SET_VL:
2480		error = SVE_SET_VL(arg2);
2481		break;
2482	case PR_SVE_GET_VL:
2483		error = SVE_GET_VL();
2484		break;
2485	case PR_GET_SPECULATION_CTRL:
2486		if (arg3 || arg4 || arg5)
2487			return -EINVAL;
2488		error = arch_prctl_spec_ctrl_get(me, arg2);
2489		break;
2490	case PR_SET_SPECULATION_CTRL:
2491		if (arg4 || arg5)
2492			return -EINVAL;
2493		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2494		break;
2495	case PR_PAC_RESET_KEYS:
2496		if (arg3 || arg4 || arg5)
2497			return -EINVAL;
2498		error = PAC_RESET_KEYS(me, arg2);
2499		break;
2500	case PR_SET_TAGGED_ADDR_CTRL:
2501		if (arg3 || arg4 || arg5)
2502			return -EINVAL;
2503		error = SET_TAGGED_ADDR_CTRL(arg2);
2504		break;
2505	case PR_GET_TAGGED_ADDR_CTRL:
2506		if (arg2 || arg3 || arg4 || arg5)
2507			return -EINVAL;
2508		error = GET_TAGGED_ADDR_CTRL();
2509		break;
2510	case PR_SET_IO_FLUSHER:
2511		if (!capable(CAP_SYS_RESOURCE))
2512			return -EPERM;
2513
2514		if (arg3 || arg4 || arg5)
2515			return -EINVAL;
2516
2517		if (arg2 == 1)
2518			current->flags |= PR_IO_FLUSHER;
2519		else if (!arg2)
2520			current->flags &= ~PR_IO_FLUSHER;
2521		else
2522			return -EINVAL;
2523		break;
2524	case PR_GET_IO_FLUSHER:
2525		if (!capable(CAP_SYS_RESOURCE))
2526			return -EPERM;
2527
2528		if (arg2 || arg3 || arg4 || arg5)
2529			return -EINVAL;
2530
2531		error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2532		break;
2533	default:
2534		error = -EINVAL;
2535		break;
2536	}
2537	return error;
2538}
2539
2540SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2541		struct getcpu_cache __user *, unused)
2542{
2543	int err = 0;
2544	int cpu = raw_smp_processor_id();
2545
2546	if (cpup)
2547		err |= put_user(cpu, cpup);
2548	if (nodep)
2549		err |= put_user(cpu_to_node(cpu), nodep);
2550	return err ? -EFAULT : 0;
2551}
2552
2553/**
2554 * do_sysinfo - fill in sysinfo struct
2555 * @info: pointer to buffer to fill
2556 */
2557static int do_sysinfo(struct sysinfo *info)
2558{
2559	unsigned long mem_total, sav_total;
2560	unsigned int mem_unit, bitcount;
2561	struct timespec64 tp;
2562
2563	memset(info, 0, sizeof(struct sysinfo));
2564
2565	ktime_get_boottime_ts64(&tp);
2566	timens_add_boottime(&tp);
2567	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2568
2569	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2570
2571	info->procs = nr_threads;
2572
2573	si_meminfo(info);
2574	si_swapinfo(info);
2575
2576	/*
2577	 * If the sum of all the available memory (i.e. ram + swap)
2578	 * is less than can be stored in a 32 bit unsigned long then
2579	 * we can be binary compatible with 2.2.x kernels.  If not,
2580	 * well, in that case 2.2.x was broken anyways...
2581	 *
2582	 *  -Erik Andersen <andersee@debian.org>
2583	 */
2584
2585	mem_total = info->totalram + info->totalswap;
2586	if (mem_total < info->totalram || mem_total < info->totalswap)
2587		goto out;
2588	bitcount = 0;
2589	mem_unit = info->mem_unit;
2590	while (mem_unit > 1) {
2591		bitcount++;
2592		mem_unit >>= 1;
2593		sav_total = mem_total;
2594		mem_total <<= 1;
2595		if (mem_total < sav_total)
2596			goto out;
2597	}
2598
2599	/*
2600	 * If mem_total did not overflow, multiply all memory values by
2601	 * info->mem_unit and set it to 1.  This leaves things compatible
2602	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2603	 * kernels...
2604	 */
2605
2606	info->mem_unit = 1;
2607	info->totalram <<= bitcount;
2608	info->freeram <<= bitcount;
2609	info->sharedram <<= bitcount;
2610	info->bufferram <<= bitcount;
2611	info->totalswap <<= bitcount;
2612	info->freeswap <<= bitcount;
2613	info->totalhigh <<= bitcount;
2614	info->freehigh <<= bitcount;
2615
2616out:
2617	return 0;
2618}
2619
2620SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2621{
2622	struct sysinfo val;
2623
2624	do_sysinfo(&val);
2625
2626	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2627		return -EFAULT;
2628
2629	return 0;
2630}
2631
2632#ifdef CONFIG_COMPAT
2633struct compat_sysinfo {
2634	s32 uptime;
2635	u32 loads[3];
2636	u32 totalram;
2637	u32 freeram;
2638	u32 sharedram;
2639	u32 bufferram;
2640	u32 totalswap;
2641	u32 freeswap;
2642	u16 procs;
2643	u16 pad;
2644	u32 totalhigh;
2645	u32 freehigh;
2646	u32 mem_unit;
2647	char _f[20-2*sizeof(u32)-sizeof(int)];
2648};
2649
2650COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2651{
2652	struct sysinfo s;
2653	struct compat_sysinfo s_32;
2654
2655	do_sysinfo(&s);
2656
2657	/* Check to see if any memory value is too large for 32-bit and scale
2658	 *  down if needed
2659	 */
2660	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2661		int bitcount = 0;
2662
2663		while (s.mem_unit < PAGE_SIZE) {
2664			s.mem_unit <<= 1;
2665			bitcount++;
2666		}
2667
2668		s.totalram >>= bitcount;
2669		s.freeram >>= bitcount;
2670		s.sharedram >>= bitcount;
2671		s.bufferram >>= bitcount;
2672		s.totalswap >>= bitcount;
2673		s.freeswap >>= bitcount;
2674		s.totalhigh >>= bitcount;
2675		s.freehigh >>= bitcount;
2676	}
2677
2678	memset(&s_32, 0, sizeof(s_32));
2679	s_32.uptime = s.uptime;
2680	s_32.loads[0] = s.loads[0];
2681	s_32.loads[1] = s.loads[1];
2682	s_32.loads[2] = s.loads[2];
2683	s_32.totalram = s.totalram;
2684	s_32.freeram = s.freeram;
2685	s_32.sharedram = s.sharedram;
2686	s_32.bufferram = s.bufferram;
2687	s_32.totalswap = s.totalswap;
2688	s_32.freeswap = s.freeswap;
2689	s_32.procs = s.procs;
2690	s_32.totalhigh = s.totalhigh;
2691	s_32.freehigh = s.freehigh;
2692	s_32.mem_unit = s.mem_unit;
2693	if (copy_to_user(info, &s_32, sizeof(s_32)))
2694		return -EFAULT;
 
2695	return 0;
2696}
2697#endif /* CONFIG_COMPAT */