Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *  linux/kernel/sys.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7#include <linux/export.h>
   8#include <linux/mm.h>
 
   9#include <linux/utsname.h>
  10#include <linux/mman.h>
  11#include <linux/reboot.h>
  12#include <linux/prctl.h>
  13#include <linux/highuid.h>
  14#include <linux/fs.h>
  15#include <linux/kmod.h>
 
  16#include <linux/perf_event.h>
  17#include <linux/resource.h>
  18#include <linux/kernel.h>
  19#include <linux/workqueue.h>
  20#include <linux/capability.h>
  21#include <linux/device.h>
  22#include <linux/key.h>
  23#include <linux/times.h>
  24#include <linux/posix-timers.h>
  25#include <linux/security.h>
  26#include <linux/dcookies.h>
  27#include <linux/suspend.h>
  28#include <linux/tty.h>
  29#include <linux/signal.h>
  30#include <linux/cn_proc.h>
  31#include <linux/getcpu.h>
  32#include <linux/task_io_accounting_ops.h>
  33#include <linux/seccomp.h>
  34#include <linux/cpu.h>
  35#include <linux/personality.h>
  36#include <linux/ptrace.h>
  37#include <linux/fs_struct.h>
  38#include <linux/file.h>
  39#include <linux/mount.h>
  40#include <linux/gfp.h>
  41#include <linux/syscore_ops.h>
  42#include <linux/version.h>
  43#include <linux/ctype.h>
 
  44
  45#include <linux/compat.h>
  46#include <linux/syscalls.h>
  47#include <linux/kprobes.h>
  48#include <linux/user_namespace.h>
 
  49#include <linux/binfmts.h>
  50
  51#include <linux/sched.h>
 
 
 
 
 
 
 
  52#include <linux/rcupdate.h>
  53#include <linux/uidgid.h>
  54#include <linux/cred.h>
  55
 
 
  56#include <linux/kmsg_dump.h>
  57/* Move somewhere else to avoid recompiling? */
  58#include <generated/utsrelease.h>
  59
  60#include <asm/uaccess.h>
  61#include <asm/io.h>
  62#include <asm/unistd.h>
  63
 
 
  64#ifndef SET_UNALIGN_CTL
  65# define SET_UNALIGN_CTL(a, b)	(-EINVAL)
  66#endif
  67#ifndef GET_UNALIGN_CTL
  68# define GET_UNALIGN_CTL(a, b)	(-EINVAL)
  69#endif
  70#ifndef SET_FPEMU_CTL
  71# define SET_FPEMU_CTL(a, b)	(-EINVAL)
  72#endif
  73#ifndef GET_FPEMU_CTL
  74# define GET_FPEMU_CTL(a, b)	(-EINVAL)
  75#endif
  76#ifndef SET_FPEXC_CTL
  77# define SET_FPEXC_CTL(a, b)	(-EINVAL)
  78#endif
  79#ifndef GET_FPEXC_CTL
  80# define GET_FPEXC_CTL(a, b)	(-EINVAL)
  81#endif
  82#ifndef GET_ENDIAN
  83# define GET_ENDIAN(a, b)	(-EINVAL)
  84#endif
  85#ifndef SET_ENDIAN
  86# define SET_ENDIAN(a, b)	(-EINVAL)
  87#endif
  88#ifndef GET_TSC_CTL
  89# define GET_TSC_CTL(a)		(-EINVAL)
  90#endif
  91#ifndef SET_TSC_CTL
  92# define SET_TSC_CTL(a)		(-EINVAL)
  93#endif
  94#ifndef MPX_ENABLE_MANAGEMENT
  95# define MPX_ENABLE_MANAGEMENT()	(-EINVAL)
  96#endif
  97#ifndef MPX_DISABLE_MANAGEMENT
  98# define MPX_DISABLE_MANAGEMENT()	(-EINVAL)
  99#endif
 100#ifndef GET_FP_MODE
 101# define GET_FP_MODE(a)		(-EINVAL)
 102#endif
 103#ifndef SET_FP_MODE
 104# define SET_FP_MODE(a,b)	(-EINVAL)
 105#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106
 107/*
 108 * this is where the system-wide overflow UID and GID are defined, for
 109 * architectures that now have 32-bit UID/GID but didn't in the past
 110 */
 111
 112int overflowuid = DEFAULT_OVERFLOWUID;
 113int overflowgid = DEFAULT_OVERFLOWGID;
 114
 115EXPORT_SYMBOL(overflowuid);
 116EXPORT_SYMBOL(overflowgid);
 117
 118/*
 119 * the same as above, but for filesystems which can only store a 16-bit
 120 * UID and GID. as such, this is needed on all architectures
 121 */
 122
 123int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
 124int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
 125
 126EXPORT_SYMBOL(fs_overflowuid);
 127EXPORT_SYMBOL(fs_overflowgid);
 128
 129/*
 130 * Returns true if current's euid is same as p's uid or euid,
 131 * or has CAP_SYS_NICE to p's user_ns.
 132 *
 133 * Called with rcu_read_lock, creds are safe
 134 */
 135static bool set_one_prio_perm(struct task_struct *p)
 136{
 137	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
 138
 139	if (uid_eq(pcred->uid,  cred->euid) ||
 140	    uid_eq(pcred->euid, cred->euid))
 141		return true;
 142	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
 143		return true;
 144	return false;
 145}
 146
 147/*
 148 * set the priority of a task
 149 * - the caller must hold the RCU read lock
 150 */
 151static int set_one_prio(struct task_struct *p, int niceval, int error)
 152{
 153	int no_nice;
 154
 155	if (!set_one_prio_perm(p)) {
 156		error = -EPERM;
 157		goto out;
 158	}
 159	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
 160		error = -EACCES;
 161		goto out;
 162	}
 163	no_nice = security_task_setnice(p, niceval);
 164	if (no_nice) {
 165		error = no_nice;
 166		goto out;
 167	}
 168	if (error == -ESRCH)
 169		error = 0;
 170	set_user_nice(p, niceval);
 171out:
 172	return error;
 173}
 174
 175SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 176{
 177	struct task_struct *g, *p;
 178	struct user_struct *user;
 179	const struct cred *cred = current_cred();
 180	int error = -EINVAL;
 181	struct pid *pgrp;
 182	kuid_t uid;
 183
 184	if (which > PRIO_USER || which < PRIO_PROCESS)
 185		goto out;
 186
 187	/* normalize: avoid signed division (rounding problems) */
 188	error = -ESRCH;
 189	if (niceval < MIN_NICE)
 190		niceval = MIN_NICE;
 191	if (niceval > MAX_NICE)
 192		niceval = MAX_NICE;
 193
 194	rcu_read_lock();
 195	read_lock(&tasklist_lock);
 196	switch (which) {
 197	case PRIO_PROCESS:
 198		if (who)
 199			p = find_task_by_vpid(who);
 200		else
 201			p = current;
 202		if (p)
 203			error = set_one_prio(p, niceval, error);
 204		break;
 205	case PRIO_PGRP:
 206		if (who)
 207			pgrp = find_vpid(who);
 208		else
 209			pgrp = task_pgrp(current);
 
 210		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 211			error = set_one_prio(p, niceval, error);
 212		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 
 213		break;
 214	case PRIO_USER:
 215		uid = make_kuid(cred->user_ns, who);
 216		user = cred->user;
 217		if (!who)
 218			uid = cred->uid;
 219		else if (!uid_eq(uid, cred->uid)) {
 220			user = find_user(uid);
 221			if (!user)
 222				goto out_unlock;	/* No processes for this user */
 223		}
 224		do_each_thread(g, p) {
 225			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
 226				error = set_one_prio(p, niceval, error);
 227		} while_each_thread(g, p);
 228		if (!uid_eq(uid, cred->uid))
 229			free_uid(user);		/* For find_user() */
 230		break;
 231	}
 232out_unlock:
 233	read_unlock(&tasklist_lock);
 234	rcu_read_unlock();
 235out:
 236	return error;
 237}
 238
 239/*
 240 * Ugh. To avoid negative return values, "getpriority()" will
 241 * not return the normal nice-value, but a negated value that
 242 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
 243 * to stay compatible.
 244 */
 245SYSCALL_DEFINE2(getpriority, int, which, int, who)
 246{
 247	struct task_struct *g, *p;
 248	struct user_struct *user;
 249	const struct cred *cred = current_cred();
 250	long niceval, retval = -ESRCH;
 251	struct pid *pgrp;
 252	kuid_t uid;
 253
 254	if (which > PRIO_USER || which < PRIO_PROCESS)
 255		return -EINVAL;
 256
 257	rcu_read_lock();
 258	read_lock(&tasklist_lock);
 259	switch (which) {
 260	case PRIO_PROCESS:
 261		if (who)
 262			p = find_task_by_vpid(who);
 263		else
 264			p = current;
 265		if (p) {
 266			niceval = nice_to_rlimit(task_nice(p));
 267			if (niceval > retval)
 268				retval = niceval;
 269		}
 270		break;
 271	case PRIO_PGRP:
 272		if (who)
 273			pgrp = find_vpid(who);
 274		else
 275			pgrp = task_pgrp(current);
 
 276		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 277			niceval = nice_to_rlimit(task_nice(p));
 278			if (niceval > retval)
 279				retval = niceval;
 280		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 
 281		break;
 282	case PRIO_USER:
 283		uid = make_kuid(cred->user_ns, who);
 284		user = cred->user;
 285		if (!who)
 286			uid = cred->uid;
 287		else if (!uid_eq(uid, cred->uid)) {
 288			user = find_user(uid);
 289			if (!user)
 290				goto out_unlock;	/* No processes for this user */
 291		}
 292		do_each_thread(g, p) {
 293			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
 294				niceval = nice_to_rlimit(task_nice(p));
 295				if (niceval > retval)
 296					retval = niceval;
 297			}
 298		} while_each_thread(g, p);
 299		if (!uid_eq(uid, cred->uid))
 300			free_uid(user);		/* for find_user() */
 301		break;
 302	}
 303out_unlock:
 304	read_unlock(&tasklist_lock);
 305	rcu_read_unlock();
 306
 307	return retval;
 308}
 309
 310/*
 311 * Unprivileged users may change the real gid to the effective gid
 312 * or vice versa.  (BSD-style)
 313 *
 314 * If you set the real gid at all, or set the effective gid to a value not
 315 * equal to the real gid, then the saved gid is set to the new effective gid.
 316 *
 317 * This makes it possible for a setgid program to completely drop its
 318 * privileges, which is often a useful assertion to make when you are doing
 319 * a security audit over a program.
 320 *
 321 * The general idea is that a program which uses just setregid() will be
 322 * 100% compatible with BSD.  A program which uses just setgid() will be
 323 * 100% compatible with POSIX with saved IDs.
 324 *
 325 * SMP: There are not races, the GIDs are checked only by filesystem
 326 *      operations (as far as semantic preservation is concerned).
 327 */
 328#ifdef CONFIG_MULTIUSER
 329SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
 330{
 331	struct user_namespace *ns = current_user_ns();
 332	const struct cred *old;
 333	struct cred *new;
 334	int retval;
 335	kgid_t krgid, kegid;
 336
 337	krgid = make_kgid(ns, rgid);
 338	kegid = make_kgid(ns, egid);
 339
 340	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 341		return -EINVAL;
 342	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 343		return -EINVAL;
 344
 345	new = prepare_creds();
 346	if (!new)
 347		return -ENOMEM;
 348	old = current_cred();
 349
 350	retval = -EPERM;
 351	if (rgid != (gid_t) -1) {
 352		if (gid_eq(old->gid, krgid) ||
 353		    gid_eq(old->egid, krgid) ||
 354		    ns_capable(old->user_ns, CAP_SETGID))
 355			new->gid = krgid;
 356		else
 357			goto error;
 358	}
 359	if (egid != (gid_t) -1) {
 360		if (gid_eq(old->gid, kegid) ||
 361		    gid_eq(old->egid, kegid) ||
 362		    gid_eq(old->sgid, kegid) ||
 363		    ns_capable(old->user_ns, CAP_SETGID))
 364			new->egid = kegid;
 365		else
 366			goto error;
 367	}
 368
 369	if (rgid != (gid_t) -1 ||
 370	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
 371		new->sgid = new->egid;
 372	new->fsgid = new->egid;
 373
 
 
 
 
 374	return commit_creds(new);
 375
 376error:
 377	abort_creds(new);
 378	return retval;
 379}
 380
 
 
 
 
 
 381/*
 382 * setgid() is implemented like SysV w/ SAVED_IDS
 383 *
 384 * SMP: Same implicit races as above.
 385 */
 386SYSCALL_DEFINE1(setgid, gid_t, gid)
 387{
 388	struct user_namespace *ns = current_user_ns();
 389	const struct cred *old;
 390	struct cred *new;
 391	int retval;
 392	kgid_t kgid;
 393
 394	kgid = make_kgid(ns, gid);
 395	if (!gid_valid(kgid))
 396		return -EINVAL;
 397
 398	new = prepare_creds();
 399	if (!new)
 400		return -ENOMEM;
 401	old = current_cred();
 402
 403	retval = -EPERM;
 404	if (ns_capable(old->user_ns, CAP_SETGID))
 405		new->gid = new->egid = new->sgid = new->fsgid = kgid;
 406	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
 407		new->egid = new->fsgid = kgid;
 408	else
 409		goto error;
 410
 
 
 
 
 411	return commit_creds(new);
 412
 413error:
 414	abort_creds(new);
 415	return retval;
 416}
 417
 
 
 
 
 
 418/*
 419 * change the user struct in a credentials set to match the new UID
 420 */
 421static int set_user(struct cred *new)
 422{
 423	struct user_struct *new_user;
 424
 425	new_user = alloc_uid(new->uid);
 426	if (!new_user)
 427		return -EAGAIN;
 428
 
 
 
 
 
 
 
 
 
 
 429	/*
 430	 * We don't fail in case of NPROC limit excess here because too many
 431	 * poorly written programs don't check set*uid() return code, assuming
 432	 * it never fails if called by root.  We may still enforce NPROC limit
 433	 * for programs doing set*uid()+execve() by harmlessly deferring the
 434	 * failure to the execve() stage.
 435	 */
 436	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
 437			new_user != INIT_USER)
 438		current->flags |= PF_NPROC_EXCEEDED;
 439	else
 440		current->flags &= ~PF_NPROC_EXCEEDED;
 441
 442	free_uid(new->user);
 443	new->user = new_user;
 444	return 0;
 445}
 446
 447/*
 448 * Unprivileged users may change the real uid to the effective uid
 449 * or vice versa.  (BSD-style)
 450 *
 451 * If you set the real uid at all, or set the effective uid to a value not
 452 * equal to the real uid, then the saved uid is set to the new effective uid.
 453 *
 454 * This makes it possible for a setuid program to completely drop its
 455 * privileges, which is often a useful assertion to make when you are doing
 456 * a security audit over a program.
 457 *
 458 * The general idea is that a program which uses just setreuid() will be
 459 * 100% compatible with BSD.  A program which uses just setuid() will be
 460 * 100% compatible with POSIX with saved IDs.
 461 */
 462SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 463{
 464	struct user_namespace *ns = current_user_ns();
 465	const struct cred *old;
 466	struct cred *new;
 467	int retval;
 468	kuid_t kruid, keuid;
 469
 470	kruid = make_kuid(ns, ruid);
 471	keuid = make_kuid(ns, euid);
 472
 473	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 474		return -EINVAL;
 475	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 476		return -EINVAL;
 477
 478	new = prepare_creds();
 479	if (!new)
 480		return -ENOMEM;
 481	old = current_cred();
 482
 483	retval = -EPERM;
 484	if (ruid != (uid_t) -1) {
 485		new->uid = kruid;
 486		if (!uid_eq(old->uid, kruid) &&
 487		    !uid_eq(old->euid, kruid) &&
 488		    !ns_capable(old->user_ns, CAP_SETUID))
 489			goto error;
 490	}
 491
 492	if (euid != (uid_t) -1) {
 493		new->euid = keuid;
 494		if (!uid_eq(old->uid, keuid) &&
 495		    !uid_eq(old->euid, keuid) &&
 496		    !uid_eq(old->suid, keuid) &&
 497		    !ns_capable(old->user_ns, CAP_SETUID))
 498			goto error;
 499	}
 500
 501	if (!uid_eq(new->uid, old->uid)) {
 502		retval = set_user(new);
 503		if (retval < 0)
 504			goto error;
 505	}
 506	if (ruid != (uid_t) -1 ||
 507	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
 508		new->suid = new->euid;
 509	new->fsuid = new->euid;
 510
 511	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
 512	if (retval < 0)
 513		goto error;
 514
 
 
 
 
 
 515	return commit_creds(new);
 516
 517error:
 518	abort_creds(new);
 519	return retval;
 520}
 521
 
 
 
 
 
 522/*
 523 * setuid() is implemented like SysV with SAVED_IDS
 524 *
 525 * Note that SAVED_ID's is deficient in that a setuid root program
 526 * like sendmail, for example, cannot set its uid to be a normal
 527 * user and then switch back, because if you're root, setuid() sets
 528 * the saved uid too.  If you don't like this, blame the bright people
 529 * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
 530 * will allow a root program to temporarily drop privileges and be able to
 531 * regain them by swapping the real and effective uid.
 532 */
 533SYSCALL_DEFINE1(setuid, uid_t, uid)
 534{
 535	struct user_namespace *ns = current_user_ns();
 536	const struct cred *old;
 537	struct cred *new;
 538	int retval;
 539	kuid_t kuid;
 540
 541	kuid = make_kuid(ns, uid);
 542	if (!uid_valid(kuid))
 543		return -EINVAL;
 544
 545	new = prepare_creds();
 546	if (!new)
 547		return -ENOMEM;
 548	old = current_cred();
 549
 550	retval = -EPERM;
 551	if (ns_capable(old->user_ns, CAP_SETUID)) {
 552		new->suid = new->uid = kuid;
 553		if (!uid_eq(kuid, old->uid)) {
 554			retval = set_user(new);
 555			if (retval < 0)
 556				goto error;
 557		}
 558	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
 559		goto error;
 560	}
 561
 562	new->fsuid = new->euid = kuid;
 563
 564	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
 565	if (retval < 0)
 566		goto error;
 567
 
 
 
 
 
 568	return commit_creds(new);
 569
 570error:
 571	abort_creds(new);
 572	return retval;
 573}
 574
 
 
 
 
 
 575
 576/*
 577 * This function implements a generic ability to update ruid, euid,
 578 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 579 */
 580SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 581{
 582	struct user_namespace *ns = current_user_ns();
 583	const struct cred *old;
 584	struct cred *new;
 585	int retval;
 586	kuid_t kruid, keuid, ksuid;
 
 587
 588	kruid = make_kuid(ns, ruid);
 589	keuid = make_kuid(ns, euid);
 590	ksuid = make_kuid(ns, suid);
 591
 592	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 593		return -EINVAL;
 594
 595	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 596		return -EINVAL;
 597
 598	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
 599		return -EINVAL;
 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601	new = prepare_creds();
 602	if (!new)
 603		return -ENOMEM;
 604
 605	old = current_cred();
 606
 607	retval = -EPERM;
 608	if (!ns_capable(old->user_ns, CAP_SETUID)) {
 609		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
 610		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
 611			goto error;
 612		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
 613		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
 614			goto error;
 615		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
 616		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
 617			goto error;
 618	}
 619
 620	if (ruid != (uid_t) -1) {
 621		new->uid = kruid;
 622		if (!uid_eq(kruid, old->uid)) {
 623			retval = set_user(new);
 624			if (retval < 0)
 625				goto error;
 626		}
 627	}
 628	if (euid != (uid_t) -1)
 629		new->euid = keuid;
 630	if (suid != (uid_t) -1)
 631		new->suid = ksuid;
 632	new->fsuid = new->euid;
 633
 634	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
 635	if (retval < 0)
 636		goto error;
 637
 
 
 
 
 
 638	return commit_creds(new);
 639
 640error:
 641	abort_creds(new);
 642	return retval;
 643}
 644
 
 
 
 
 
 645SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
 646{
 647	const struct cred *cred = current_cred();
 648	int retval;
 649	uid_t ruid, euid, suid;
 650
 651	ruid = from_kuid_munged(cred->user_ns, cred->uid);
 652	euid = from_kuid_munged(cred->user_ns, cred->euid);
 653	suid = from_kuid_munged(cred->user_ns, cred->suid);
 654
 655	retval = put_user(ruid, ruidp);
 656	if (!retval) {
 657		retval = put_user(euid, euidp);
 658		if (!retval)
 659			return put_user(suid, suidp);
 660	}
 661	return retval;
 662}
 663
 664/*
 665 * Same as above, but for rgid, egid, sgid.
 666 */
 667SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
 668{
 669	struct user_namespace *ns = current_user_ns();
 670	const struct cred *old;
 671	struct cred *new;
 672	int retval;
 673	kgid_t krgid, kegid, ksgid;
 
 674
 675	krgid = make_kgid(ns, rgid);
 676	kegid = make_kgid(ns, egid);
 677	ksgid = make_kgid(ns, sgid);
 678
 679	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 680		return -EINVAL;
 681	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 682		return -EINVAL;
 683	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
 684		return -EINVAL;
 685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686	new = prepare_creds();
 687	if (!new)
 688		return -ENOMEM;
 689	old = current_cred();
 690
 691	retval = -EPERM;
 692	if (!ns_capable(old->user_ns, CAP_SETGID)) {
 693		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
 694		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
 695			goto error;
 696		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
 697		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
 698			goto error;
 699		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
 700		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
 701			goto error;
 702	}
 703
 704	if (rgid != (gid_t) -1)
 705		new->gid = krgid;
 706	if (egid != (gid_t) -1)
 707		new->egid = kegid;
 708	if (sgid != (gid_t) -1)
 709		new->sgid = ksgid;
 710	new->fsgid = new->egid;
 711
 
 
 
 
 712	return commit_creds(new);
 713
 714error:
 715	abort_creds(new);
 716	return retval;
 717}
 718
 
 
 
 
 
 719SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
 720{
 721	const struct cred *cred = current_cred();
 722	int retval;
 723	gid_t rgid, egid, sgid;
 724
 725	rgid = from_kgid_munged(cred->user_ns, cred->gid);
 726	egid = from_kgid_munged(cred->user_ns, cred->egid);
 727	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
 728
 729	retval = put_user(rgid, rgidp);
 730	if (!retval) {
 731		retval = put_user(egid, egidp);
 732		if (!retval)
 733			retval = put_user(sgid, sgidp);
 734	}
 735
 736	return retval;
 737}
 738
 739
 740/*
 741 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
 742 * is used for "access()" and for the NFS daemon (letting nfsd stay at
 743 * whatever uid it wants to). It normally shadows "euid", except when
 744 * explicitly set by setfsuid() or for access..
 745 */
 746SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 747{
 748	const struct cred *old;
 749	struct cred *new;
 750	uid_t old_fsuid;
 751	kuid_t kuid;
 752
 753	old = current_cred();
 754	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
 755
 756	kuid = make_kuid(old->user_ns, uid);
 757	if (!uid_valid(kuid))
 758		return old_fsuid;
 759
 760	new = prepare_creds();
 761	if (!new)
 762		return old_fsuid;
 763
 764	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
 765	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
 766	    ns_capable(old->user_ns, CAP_SETUID)) {
 767		if (!uid_eq(kuid, old->fsuid)) {
 768			new->fsuid = kuid;
 769			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
 770				goto change_okay;
 771		}
 772	}
 773
 774	abort_creds(new);
 775	return old_fsuid;
 776
 777change_okay:
 778	commit_creds(new);
 779	return old_fsuid;
 780}
 781
 
 
 
 
 
 782/*
 783 * Samma på svenska..
 784 */
 785SYSCALL_DEFINE1(setfsgid, gid_t, gid)
 786{
 787	const struct cred *old;
 788	struct cred *new;
 789	gid_t old_fsgid;
 790	kgid_t kgid;
 791
 792	old = current_cred();
 793	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
 794
 795	kgid = make_kgid(old->user_ns, gid);
 796	if (!gid_valid(kgid))
 797		return old_fsgid;
 798
 799	new = prepare_creds();
 800	if (!new)
 801		return old_fsgid;
 802
 803	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
 804	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
 805	    ns_capable(old->user_ns, CAP_SETGID)) {
 806		if (!gid_eq(kgid, old->fsgid)) {
 807			new->fsgid = kgid;
 808			goto change_okay;
 
 809		}
 810	}
 811
 812	abort_creds(new);
 813	return old_fsgid;
 814
 815change_okay:
 816	commit_creds(new);
 817	return old_fsgid;
 818}
 
 
 
 
 
 819#endif /* CONFIG_MULTIUSER */
 820
 821/**
 822 * sys_getpid - return the thread group id of the current process
 823 *
 824 * Note, despite the name, this returns the tgid not the pid.  The tgid and
 825 * the pid are identical unless CLONE_THREAD was specified on clone() in
 826 * which case the tgid is the same in all threads of the same group.
 827 *
 828 * This is SMP safe as current->tgid does not change.
 829 */
 830SYSCALL_DEFINE0(getpid)
 831{
 832	return task_tgid_vnr(current);
 833}
 834
 835/* Thread ID - the internal kernel "pid" */
 836SYSCALL_DEFINE0(gettid)
 837{
 838	return task_pid_vnr(current);
 839}
 840
 841/*
 842 * Accessing ->real_parent is not SMP-safe, it could
 843 * change from under us. However, we can use a stale
 844 * value of ->real_parent under rcu_read_lock(), see
 845 * release_task()->call_rcu(delayed_put_task_struct).
 846 */
 847SYSCALL_DEFINE0(getppid)
 848{
 849	int pid;
 850
 851	rcu_read_lock();
 852	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
 853	rcu_read_unlock();
 854
 855	return pid;
 856}
 857
 858SYSCALL_DEFINE0(getuid)
 859{
 860	/* Only we change this so SMP safe */
 861	return from_kuid_munged(current_user_ns(), current_uid());
 862}
 863
 864SYSCALL_DEFINE0(geteuid)
 865{
 866	/* Only we change this so SMP safe */
 867	return from_kuid_munged(current_user_ns(), current_euid());
 868}
 869
 870SYSCALL_DEFINE0(getgid)
 871{
 872	/* Only we change this so SMP safe */
 873	return from_kgid_munged(current_user_ns(), current_gid());
 874}
 875
 876SYSCALL_DEFINE0(getegid)
 877{
 878	/* Only we change this so SMP safe */
 879	return from_kgid_munged(current_user_ns(), current_egid());
 880}
 881
 882void do_sys_times(struct tms *tms)
 883{
 884	cputime_t tgutime, tgstime, cutime, cstime;
 885
 886	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
 887	cutime = current->signal->cutime;
 888	cstime = current->signal->cstime;
 889	tms->tms_utime = cputime_to_clock_t(tgutime);
 890	tms->tms_stime = cputime_to_clock_t(tgstime);
 891	tms->tms_cutime = cputime_to_clock_t(cutime);
 892	tms->tms_cstime = cputime_to_clock_t(cstime);
 893}
 894
 895SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
 896{
 897	if (tbuf) {
 898		struct tms tmp;
 899
 900		do_sys_times(&tmp);
 901		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
 902			return -EFAULT;
 903	}
 904	force_successful_syscall_return();
 905	return (long) jiffies_64_to_clock_t(get_jiffies_64());
 906}
 907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908/*
 909 * This needs some heavy checking ...
 910 * I just haven't the stomach for it. I also don't fully
 911 * understand sessions/pgrp etc. Let somebody who does explain it.
 912 *
 913 * OK, I think I have the protection semantics right.... this is really
 914 * only important on a multi-user system anyway, to make sure one user
 915 * can't send a signal to a process owned by another.  -TYT, 12/12/91
 916 *
 917 * !PF_FORKNOEXEC check to conform completely to POSIX.
 918 */
 919SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 920{
 921	struct task_struct *p;
 922	struct task_struct *group_leader = current->group_leader;
 923	struct pid *pgrp;
 924	int err;
 925
 926	if (!pid)
 927		pid = task_pid_vnr(group_leader);
 928	if (!pgid)
 929		pgid = pid;
 930	if (pgid < 0)
 931		return -EINVAL;
 932	rcu_read_lock();
 933
 934	/* From this point forward we keep holding onto the tasklist lock
 935	 * so that our parent does not change from under us. -DaveM
 936	 */
 937	write_lock_irq(&tasklist_lock);
 938
 939	err = -ESRCH;
 940	p = find_task_by_vpid(pid);
 941	if (!p)
 942		goto out;
 943
 944	err = -EINVAL;
 945	if (!thread_group_leader(p))
 946		goto out;
 947
 948	if (same_thread_group(p->real_parent, group_leader)) {
 949		err = -EPERM;
 950		if (task_session(p) != task_session(group_leader))
 951			goto out;
 952		err = -EACCES;
 953		if (!(p->flags & PF_FORKNOEXEC))
 954			goto out;
 955	} else {
 956		err = -ESRCH;
 957		if (p != group_leader)
 958			goto out;
 959	}
 960
 961	err = -EPERM;
 962	if (p->signal->leader)
 963		goto out;
 964
 965	pgrp = task_pid(p);
 966	if (pgid != pid) {
 967		struct task_struct *g;
 968
 969		pgrp = find_vpid(pgid);
 970		g = pid_task(pgrp, PIDTYPE_PGID);
 971		if (!g || task_session(g) != task_session(group_leader))
 972			goto out;
 973	}
 974
 975	err = security_task_setpgid(p, pgid);
 976	if (err)
 977		goto out;
 978
 979	if (task_pgrp(p) != pgrp)
 980		change_pid(p, PIDTYPE_PGID, pgrp);
 981
 982	err = 0;
 983out:
 984	/* All paths lead to here, thus we are safe. -DaveM */
 985	write_unlock_irq(&tasklist_lock);
 986	rcu_read_unlock();
 987	return err;
 988}
 989
 990SYSCALL_DEFINE1(getpgid, pid_t, pid)
 991{
 992	struct task_struct *p;
 993	struct pid *grp;
 994	int retval;
 995
 996	rcu_read_lock();
 997	if (!pid)
 998		grp = task_pgrp(current);
 999	else {
1000		retval = -ESRCH;
1001		p = find_task_by_vpid(pid);
1002		if (!p)
1003			goto out;
1004		grp = task_pgrp(p);
1005		if (!grp)
1006			goto out;
1007
1008		retval = security_task_getpgid(p);
1009		if (retval)
1010			goto out;
1011	}
1012	retval = pid_vnr(grp);
1013out:
1014	rcu_read_unlock();
1015	return retval;
1016}
1017
 
 
 
 
 
1018#ifdef __ARCH_WANT_SYS_GETPGRP
1019
1020SYSCALL_DEFINE0(getpgrp)
1021{
1022	return sys_getpgid(0);
1023}
1024
1025#endif
1026
1027SYSCALL_DEFINE1(getsid, pid_t, pid)
1028{
1029	struct task_struct *p;
1030	struct pid *sid;
1031	int retval;
1032
1033	rcu_read_lock();
1034	if (!pid)
1035		sid = task_session(current);
1036	else {
1037		retval = -ESRCH;
1038		p = find_task_by_vpid(pid);
1039		if (!p)
1040			goto out;
1041		sid = task_session(p);
1042		if (!sid)
1043			goto out;
1044
1045		retval = security_task_getsid(p);
1046		if (retval)
1047			goto out;
1048	}
1049	retval = pid_vnr(sid);
1050out:
1051	rcu_read_unlock();
1052	return retval;
1053}
1054
1055static void set_special_pids(struct pid *pid)
1056{
1057	struct task_struct *curr = current->group_leader;
1058
1059	if (task_session(curr) != pid)
1060		change_pid(curr, PIDTYPE_SID, pid);
1061
1062	if (task_pgrp(curr) != pid)
1063		change_pid(curr, PIDTYPE_PGID, pid);
1064}
1065
1066SYSCALL_DEFINE0(setsid)
1067{
1068	struct task_struct *group_leader = current->group_leader;
1069	struct pid *sid = task_pid(group_leader);
1070	pid_t session = pid_vnr(sid);
1071	int err = -EPERM;
1072
1073	write_lock_irq(&tasklist_lock);
1074	/* Fail if I am already a session leader */
1075	if (group_leader->signal->leader)
1076		goto out;
1077
1078	/* Fail if a process group id already exists that equals the
1079	 * proposed session id.
1080	 */
1081	if (pid_task(sid, PIDTYPE_PGID))
1082		goto out;
1083
1084	group_leader->signal->leader = 1;
1085	set_special_pids(sid);
1086
1087	proc_clear_tty(group_leader);
1088
1089	err = session;
1090out:
1091	write_unlock_irq(&tasklist_lock);
1092	if (err > 0) {
1093		proc_sid_connector(group_leader);
1094		sched_autogroup_create_attach(group_leader);
1095	}
1096	return err;
1097}
1098
 
 
 
 
 
1099DECLARE_RWSEM(uts_sem);
1100
1101#ifdef COMPAT_UTS_MACHINE
1102#define override_architecture(name) \
1103	(personality(current->personality) == PER_LINUX32 && \
1104	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1105		      sizeof(COMPAT_UTS_MACHINE)))
1106#else
1107#define override_architecture(name)	0
1108#endif
1109
1110/*
1111 * Work around broken programs that cannot handle "Linux 3.0".
1112 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1113 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
 
1114 */
1115static int override_release(char __user *release, size_t len)
1116{
1117	int ret = 0;
1118
1119	if (current->personality & UNAME26) {
1120		const char *rest = UTS_RELEASE;
1121		char buf[65] = { 0 };
1122		int ndots = 0;
1123		unsigned v;
1124		size_t copy;
1125
1126		while (*rest) {
1127			if (*rest == '.' && ++ndots >= 3)
1128				break;
1129			if (!isdigit(*rest) && *rest != '.')
1130				break;
1131			rest++;
1132		}
1133		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1134		copy = clamp_t(size_t, len, 1, sizeof(buf));
1135		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1136		ret = copy_to_user(release, buf, copy + 1);
1137	}
1138	return ret;
1139}
1140
1141SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1142{
1143	int errno = 0;
1144
1145	down_read(&uts_sem);
1146	if (copy_to_user(name, utsname(), sizeof *name))
1147		errno = -EFAULT;
1148	up_read(&uts_sem);
 
 
1149
1150	if (!errno && override_release(name->release, sizeof(name->release)))
1151		errno = -EFAULT;
1152	if (!errno && override_architecture(name))
1153		errno = -EFAULT;
1154	return errno;
1155}
1156
1157#ifdef __ARCH_WANT_SYS_OLD_UNAME
1158/*
1159 * Old cruft
1160 */
1161SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1162{
1163	int error = 0;
1164
1165	if (!name)
1166		return -EFAULT;
1167
1168	down_read(&uts_sem);
1169	if (copy_to_user(name, utsname(), sizeof(*name)))
1170		error = -EFAULT;
1171	up_read(&uts_sem);
 
 
1172
1173	if (!error && override_release(name->release, sizeof(name->release)))
1174		error = -EFAULT;
1175	if (!error && override_architecture(name))
1176		error = -EFAULT;
1177	return error;
1178}
1179
1180SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1181{
1182	int error;
1183
1184	if (!name)
1185		return -EFAULT;
1186	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1187		return -EFAULT;
1188
1189	down_read(&uts_sem);
1190	error = __copy_to_user(&name->sysname, &utsname()->sysname,
1191			       __OLD_UTS_LEN);
1192	error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1193	error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1194				__OLD_UTS_LEN);
1195	error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1196	error |= __copy_to_user(&name->release, &utsname()->release,
1197				__OLD_UTS_LEN);
1198	error |= __put_user(0, name->release + __OLD_UTS_LEN);
1199	error |= __copy_to_user(&name->version, &utsname()->version,
1200				__OLD_UTS_LEN);
1201	error |= __put_user(0, name->version + __OLD_UTS_LEN);
1202	error |= __copy_to_user(&name->machine, &utsname()->machine,
1203				__OLD_UTS_LEN);
1204	error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1205	up_read(&uts_sem);
 
 
1206
1207	if (!error && override_architecture(name))
1208		error = -EFAULT;
1209	if (!error && override_release(name->release, sizeof(name->release)))
1210		error = -EFAULT;
1211	return error ? -EFAULT : 0;
1212}
1213#endif
1214
1215SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1216{
1217	int errno;
1218	char tmp[__NEW_UTS_LEN];
1219
1220	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1221		return -EPERM;
1222
1223	if (len < 0 || len > __NEW_UTS_LEN)
1224		return -EINVAL;
1225	down_write(&uts_sem);
1226	errno = -EFAULT;
1227	if (!copy_from_user(tmp, name, len)) {
1228		struct new_utsname *u = utsname();
1229
 
 
 
1230		memcpy(u->nodename, tmp, len);
1231		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1232		errno = 0;
1233		uts_proc_notify(UTS_PROC_HOSTNAME);
 
1234	}
1235	up_write(&uts_sem);
1236	return errno;
1237}
1238
1239#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1240
1241SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1242{
1243	int i, errno;
1244	struct new_utsname *u;
 
1245
1246	if (len < 0)
1247		return -EINVAL;
1248	down_read(&uts_sem);
1249	u = utsname();
1250	i = 1 + strlen(u->nodename);
1251	if (i > len)
1252		i = len;
1253	errno = 0;
1254	if (copy_to_user(name, u->nodename, i))
1255		errno = -EFAULT;
1256	up_read(&uts_sem);
1257	return errno;
 
 
1258}
1259
1260#endif
1261
1262/*
1263 * Only setdomainname; getdomainname can be implemented by calling
1264 * uname()
1265 */
1266SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1267{
1268	int errno;
1269	char tmp[__NEW_UTS_LEN];
1270
1271	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1272		return -EPERM;
1273	if (len < 0 || len > __NEW_UTS_LEN)
1274		return -EINVAL;
1275
1276	down_write(&uts_sem);
1277	errno = -EFAULT;
1278	if (!copy_from_user(tmp, name, len)) {
1279		struct new_utsname *u = utsname();
1280
 
 
 
1281		memcpy(u->domainname, tmp, len);
1282		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1283		errno = 0;
1284		uts_proc_notify(UTS_PROC_DOMAINNAME);
 
1285	}
1286	up_write(&uts_sem);
1287	return errno;
1288}
1289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1291{
1292	struct rlimit value;
1293	int ret;
1294
1295	ret = do_prlimit(current, resource, NULL, &value);
1296	if (!ret)
1297		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1298
1299	return ret;
1300}
1301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1302#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1303
1304/*
1305 *	Back compatibility for getrlimit. Needed for some apps.
1306 */
1307SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1308		struct rlimit __user *, rlim)
1309{
1310	struct rlimit x;
1311	if (resource >= RLIM_NLIMITS)
1312		return -EINVAL;
1313
 
1314	task_lock(current->group_leader);
1315	x = current->signal->rlim[resource];
1316	task_unlock(current->group_leader);
1317	if (x.rlim_cur > 0x7FFFFFFF)
1318		x.rlim_cur = 0x7FFFFFFF;
1319	if (x.rlim_max > 0x7FFFFFFF)
1320		x.rlim_max = 0x7FFFFFFF;
1321	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1322}
1323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324#endif
1325
1326static inline bool rlim64_is_infinity(__u64 rlim64)
1327{
1328#if BITS_PER_LONG < 64
1329	return rlim64 >= ULONG_MAX;
1330#else
1331	return rlim64 == RLIM64_INFINITY;
1332#endif
1333}
1334
1335static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1336{
1337	if (rlim->rlim_cur == RLIM_INFINITY)
1338		rlim64->rlim_cur = RLIM64_INFINITY;
1339	else
1340		rlim64->rlim_cur = rlim->rlim_cur;
1341	if (rlim->rlim_max == RLIM_INFINITY)
1342		rlim64->rlim_max = RLIM64_INFINITY;
1343	else
1344		rlim64->rlim_max = rlim->rlim_max;
1345}
1346
1347static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1348{
1349	if (rlim64_is_infinity(rlim64->rlim_cur))
1350		rlim->rlim_cur = RLIM_INFINITY;
1351	else
1352		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1353	if (rlim64_is_infinity(rlim64->rlim_max))
1354		rlim->rlim_max = RLIM_INFINITY;
1355	else
1356		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1357}
1358
1359/* make sure you are allowed to change @tsk limits before calling this */
1360int do_prlimit(struct task_struct *tsk, unsigned int resource,
1361		struct rlimit *new_rlim, struct rlimit *old_rlim)
1362{
1363	struct rlimit *rlim;
1364	int retval = 0;
1365
1366	if (resource >= RLIM_NLIMITS)
1367		return -EINVAL;
1368	if (new_rlim) {
1369		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1370			return -EINVAL;
1371		if (resource == RLIMIT_NOFILE &&
1372				new_rlim->rlim_max > sysctl_nr_open)
1373			return -EPERM;
1374	}
1375
1376	/* protect tsk->signal and tsk->sighand from disappearing */
1377	read_lock(&tasklist_lock);
1378	if (!tsk->sighand) {
1379		retval = -ESRCH;
1380		goto out;
1381	}
1382
1383	rlim = tsk->signal->rlim + resource;
1384	task_lock(tsk->group_leader);
1385	if (new_rlim) {
1386		/* Keep the capable check against init_user_ns until
1387		   cgroups can contain all limits */
1388		if (new_rlim->rlim_max > rlim->rlim_max &&
1389				!capable(CAP_SYS_RESOURCE))
1390			retval = -EPERM;
1391		if (!retval)
1392			retval = security_task_setrlimit(tsk->group_leader,
1393					resource, new_rlim);
1394		if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1395			/*
1396			 * The caller is asking for an immediate RLIMIT_CPU
1397			 * expiry.  But we use the zero value to mean "it was
1398			 * never set".  So let's cheat and make it one second
1399			 * instead
1400			 */
1401			new_rlim->rlim_cur = 1;
1402		}
1403	}
1404	if (!retval) {
1405		if (old_rlim)
1406			*old_rlim = *rlim;
1407		if (new_rlim)
1408			*rlim = *new_rlim;
1409	}
1410	task_unlock(tsk->group_leader);
1411
1412	/*
1413	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error
1414	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
1415	 * very long-standing error, and fixing it now risks breakage of
1416	 * applications, so we live with it
1417	 */
1418	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1419			 new_rlim->rlim_cur != RLIM_INFINITY)
1420		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1421out:
1422	read_unlock(&tasklist_lock);
1423	return retval;
1424}
1425
1426/* rcu lock must be held */
1427static int check_prlimit_permission(struct task_struct *task)
 
1428{
1429	const struct cred *cred = current_cred(), *tcred;
 
1430
1431	if (current == task)
1432		return 0;
1433
1434	tcred = __task_cred(task);
1435	if (uid_eq(cred->uid, tcred->euid) &&
1436	    uid_eq(cred->uid, tcred->suid) &&
1437	    uid_eq(cred->uid, tcred->uid)  &&
1438	    gid_eq(cred->gid, tcred->egid) &&
1439	    gid_eq(cred->gid, tcred->sgid) &&
1440	    gid_eq(cred->gid, tcred->gid))
1441		return 0;
1442	if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1443		return 0;
1444
1445	return -EPERM;
1446}
1447
1448SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1449		const struct rlimit64 __user *, new_rlim,
1450		struct rlimit64 __user *, old_rlim)
1451{
1452	struct rlimit64 old64, new64;
1453	struct rlimit old, new;
1454	struct task_struct *tsk;
 
1455	int ret;
1456
 
 
 
1457	if (new_rlim) {
1458		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1459			return -EFAULT;
1460		rlim64_to_rlim(&new64, &new);
 
1461	}
1462
1463	rcu_read_lock();
1464	tsk = pid ? find_task_by_vpid(pid) : current;
1465	if (!tsk) {
1466		rcu_read_unlock();
1467		return -ESRCH;
1468	}
1469	ret = check_prlimit_permission(tsk);
1470	if (ret) {
1471		rcu_read_unlock();
1472		return ret;
1473	}
1474	get_task_struct(tsk);
1475	rcu_read_unlock();
1476
1477	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1478			old_rlim ? &old : NULL);
1479
1480	if (!ret && old_rlim) {
1481		rlim_to_rlim64(&old, &old64);
1482		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1483			ret = -EFAULT;
1484	}
1485
1486	put_task_struct(tsk);
1487	return ret;
1488}
1489
1490SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1491{
1492	struct rlimit new_rlim;
1493
1494	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1495		return -EFAULT;
1496	return do_prlimit(current, resource, &new_rlim, NULL);
1497}
1498
1499/*
1500 * It would make sense to put struct rusage in the task_struct,
1501 * except that would make the task_struct be *really big*.  After
1502 * task_struct gets moved into malloc'ed memory, it would
1503 * make sense to do this.  It will make moving the rest of the information
1504 * a lot simpler!  (Which we're not doing right now because we're not
1505 * measuring them yet).
1506 *
1507 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1508 * races with threads incrementing their own counters.  But since word
1509 * reads are atomic, we either get new values or old values and we don't
1510 * care which for the sums.  We always take the siglock to protect reading
1511 * the c* fields from p->signal from races with exit.c updating those
1512 * fields when reaping, so a sample either gets all the additions of a
1513 * given child after it's reaped, or none so this sample is before reaping.
1514 *
1515 * Locking:
1516 * We need to take the siglock for CHILDEREN, SELF and BOTH
1517 * for  the cases current multithreaded, non-current single threaded
1518 * non-current multithreaded.  Thread traversal is now safe with
1519 * the siglock held.
1520 * Strictly speaking, we donot need to take the siglock if we are current and
1521 * single threaded,  as no one else can take our signal_struct away, no one
1522 * else can  reap the  children to update signal->c* counters, and no one else
1523 * can race with the signal-> fields. If we do not take any lock, the
1524 * signal-> fields could be read out of order while another thread was just
1525 * exiting. So we should  place a read memory barrier when we avoid the lock.
1526 * On the writer side,  write memory barrier is implied in  __exit_signal
1527 * as __exit_signal releases  the siglock spinlock after updating the signal->
1528 * fields. But we don't do this yet to keep things simple.
1529 *
1530 */
1531
1532static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1533{
1534	r->ru_nvcsw += t->nvcsw;
1535	r->ru_nivcsw += t->nivcsw;
1536	r->ru_minflt += t->min_flt;
1537	r->ru_majflt += t->maj_flt;
1538	r->ru_inblock += task_io_get_inblock(t);
1539	r->ru_oublock += task_io_get_oublock(t);
1540}
1541
1542static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1543{
1544	struct task_struct *t;
1545	unsigned long flags;
1546	cputime_t tgutime, tgstime, utime, stime;
1547	unsigned long maxrss = 0;
 
 
 
1548
1549	memset((char *)r, 0, sizeof (*r));
 
1550	utime = stime = 0;
 
1551
1552	if (who == RUSAGE_THREAD) {
1553		task_cputime_adjusted(current, &utime, &stime);
1554		accumulate_thread_rusage(p, r);
1555		maxrss = p->signal->maxrss;
1556		goto out;
1557	}
1558
1559	if (!lock_task_sighand(p, &flags))
1560		return;
1561
1562	switch (who) {
1563	case RUSAGE_BOTH:
1564	case RUSAGE_CHILDREN:
1565		utime = p->signal->cutime;
1566		stime = p->signal->cstime;
1567		r->ru_nvcsw = p->signal->cnvcsw;
1568		r->ru_nivcsw = p->signal->cnivcsw;
1569		r->ru_minflt = p->signal->cmin_flt;
1570		r->ru_majflt = p->signal->cmaj_flt;
1571		r->ru_inblock = p->signal->cinblock;
1572		r->ru_oublock = p->signal->coublock;
1573		maxrss = p->signal->cmaxrss;
1574
1575		if (who == RUSAGE_CHILDREN)
1576			break;
 
1577
1578	case RUSAGE_SELF:
1579		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1580		utime += tgutime;
1581		stime += tgstime;
1582		r->ru_nvcsw += p->signal->nvcsw;
1583		r->ru_nivcsw += p->signal->nivcsw;
1584		r->ru_minflt += p->signal->min_flt;
1585		r->ru_majflt += p->signal->maj_flt;
1586		r->ru_inblock += p->signal->inblock;
1587		r->ru_oublock += p->signal->oublock;
1588		if (maxrss < p->signal->maxrss)
1589			maxrss = p->signal->maxrss;
1590		t = p;
1591		do {
1592			accumulate_thread_rusage(t, r);
1593		} while_each_thread(p, t);
 
1594		break;
1595
1596	default:
1597		BUG();
1598	}
1599	unlock_task_sighand(p, &flags);
1600
1601out:
1602	cputime_to_timeval(utime, &r->ru_utime);
1603	cputime_to_timeval(stime, &r->ru_stime);
 
 
1604
1605	if (who != RUSAGE_CHILDREN) {
1606		struct mm_struct *mm = get_task_mm(p);
1607
1608		if (mm) {
1609			setmax_mm_hiwater_rss(&maxrss, mm);
1610			mmput(mm);
1611		}
 
 
 
 
 
1612	}
 
 
1613	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
 
 
1614}
1615
1616int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1617{
1618	struct rusage r;
1619
1620	k_getrusage(p, who, &r);
1621	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1622}
1623
1624SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1625{
1626	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1627	    who != RUSAGE_THREAD)
1628		return -EINVAL;
1629	return getrusage(current, who, ru);
 
 
1630}
1631
1632#ifdef CONFIG_COMPAT
1633COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1634{
1635	struct rusage r;
1636
1637	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1638	    who != RUSAGE_THREAD)
1639		return -EINVAL;
1640
1641	k_getrusage(current, who, &r);
1642	return put_compat_rusage(&r, ru);
1643}
1644#endif
1645
1646SYSCALL_DEFINE1(umask, int, mask)
1647{
1648	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1649	return mask;
1650}
1651
1652static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1653{
1654	struct fd exe;
1655	struct file *old_exe, *exe_file;
1656	struct inode *inode;
1657	int err;
1658
1659	exe = fdget(fd);
1660	if (!exe.file)
1661		return -EBADF;
1662
1663	inode = file_inode(exe.file);
1664
1665	/*
1666	 * Because the original mm->exe_file points to executable file, make
1667	 * sure that this one is executable as well, to avoid breaking an
1668	 * overall picture.
1669	 */
1670	err = -EACCES;
1671	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1672		goto exit;
1673
1674	err = inode_permission(inode, MAY_EXEC);
1675	if (err)
1676		goto exit;
1677
1678	/*
1679	 * Forbid mm->exe_file change if old file still mapped.
1680	 */
1681	exe_file = get_mm_exe_file(mm);
1682	err = -EBUSY;
1683	if (exe_file) {
1684		struct vm_area_struct *vma;
1685
1686		down_read(&mm->mmap_sem);
1687		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1688			if (!vma->vm_file)
1689				continue;
1690			if (path_equal(&vma->vm_file->f_path,
1691				       &exe_file->f_path))
1692				goto exit_err;
1693		}
1694
1695		up_read(&mm->mmap_sem);
1696		fput(exe_file);
1697	}
1698
1699	/*
1700	 * The symlink can be changed only once, just to disallow arbitrary
1701	 * transitions malicious software might bring in. This means one
1702	 * could make a snapshot over all processes running and monitor
1703	 * /proc/pid/exe changes to notice unusual activity if needed.
1704	 */
1705	err = -EPERM;
1706	if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1707		goto exit;
1708
1709	err = 0;
1710	/* set the new file, lockless */
1711	get_file(exe.file);
1712	old_exe = xchg(&mm->exe_file, exe.file);
1713	if (old_exe)
1714		fput(old_exe);
1715exit:
1716	fdput(exe);
1717	return err;
1718exit_err:
1719	up_read(&mm->mmap_sem);
1720	fput(exe_file);
1721	goto exit;
1722}
1723
1724/*
 
 
1725 * WARNING: we don't require any capability here so be very careful
1726 * in what is allowed for modification from userspace.
1727 */
1728static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1729{
1730	unsigned long mmap_max_addr = TASK_SIZE;
1731	struct mm_struct *mm = current->mm;
1732	int error = -EINVAL, i;
1733
1734	static const unsigned char offsets[] = {
1735		offsetof(struct prctl_mm_map, start_code),
1736		offsetof(struct prctl_mm_map, end_code),
1737		offsetof(struct prctl_mm_map, start_data),
1738		offsetof(struct prctl_mm_map, end_data),
1739		offsetof(struct prctl_mm_map, start_brk),
1740		offsetof(struct prctl_mm_map, brk),
1741		offsetof(struct prctl_mm_map, start_stack),
1742		offsetof(struct prctl_mm_map, arg_start),
1743		offsetof(struct prctl_mm_map, arg_end),
1744		offsetof(struct prctl_mm_map, env_start),
1745		offsetof(struct prctl_mm_map, env_end),
1746	};
1747
1748	/*
1749	 * Make sure the members are not somewhere outside
1750	 * of allowed address space.
1751	 */
1752	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1753		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1754
1755		if ((unsigned long)val >= mmap_max_addr ||
1756		    (unsigned long)val < mmap_min_addr)
1757			goto out;
1758	}
1759
1760	/*
1761	 * Make sure the pairs are ordered.
1762	 */
1763#define __prctl_check_order(__m1, __op, __m2)				\
1764	((unsigned long)prctl_map->__m1 __op				\
1765	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1766	error  = __prctl_check_order(start_code, <, end_code);
1767	error |= __prctl_check_order(start_data, <, end_data);
1768	error |= __prctl_check_order(start_brk, <=, brk);
1769	error |= __prctl_check_order(arg_start, <=, arg_end);
1770	error |= __prctl_check_order(env_start, <=, env_end);
1771	if (error)
1772		goto out;
1773#undef __prctl_check_order
1774
1775	error = -EINVAL;
1776
1777	/*
1778	 * @brk should be after @end_data in traditional maps.
1779	 */
1780	if (prctl_map->start_brk <= prctl_map->end_data ||
1781	    prctl_map->brk <= prctl_map->end_data)
1782		goto out;
1783
1784	/*
1785	 * Neither we should allow to override limits if they set.
1786	 */
1787	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1788			      prctl_map->start_brk, prctl_map->end_data,
1789			      prctl_map->start_data))
1790			goto out;
1791
1792	/*
1793	 * Someone is trying to cheat the auxv vector.
1794	 */
1795	if (prctl_map->auxv_size) {
1796		if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1797			goto out;
1798	}
1799
1800	/*
1801	 * Finally, make sure the caller has the rights to
1802	 * change /proc/pid/exe link: only local root should
1803	 * be allowed to.
1804	 */
1805	if (prctl_map->exe_fd != (u32)-1) {
1806		struct user_namespace *ns = current_user_ns();
1807		const struct cred *cred = current_cred();
1808
1809		if (!uid_eq(cred->uid, make_kuid(ns, 0)) ||
1810		    !gid_eq(cred->gid, make_kgid(ns, 0)))
1811			goto out;
1812	}
1813
1814	error = 0;
1815out:
1816	return error;
1817}
1818
1819#ifdef CONFIG_CHECKPOINT_RESTORE
1820static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1821{
1822	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1823	unsigned long user_auxv[AT_VECTOR_SIZE];
1824	struct mm_struct *mm = current->mm;
1825	int error;
1826
1827	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1828	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1829
1830	if (opt == PR_SET_MM_MAP_SIZE)
1831		return put_user((unsigned int)sizeof(prctl_map),
1832				(unsigned int __user *)addr);
1833
1834	if (data_size != sizeof(prctl_map))
1835		return -EINVAL;
1836
1837	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1838		return -EFAULT;
1839
1840	error = validate_prctl_map(&prctl_map);
1841	if (error)
1842		return error;
1843
1844	if (prctl_map.auxv_size) {
 
 
 
 
 
 
 
1845		memset(user_auxv, 0, sizeof(user_auxv));
1846		if (copy_from_user(user_auxv,
1847				   (const void __user *)prctl_map.auxv,
1848				   prctl_map.auxv_size))
1849			return -EFAULT;
1850
1851		/* Last entry must be AT_NULL as specification requires */
1852		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1853		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1854	}
1855
1856	if (prctl_map.exe_fd != (u32)-1) {
 
 
 
 
 
 
 
 
 
 
 
1857		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
1858		if (error)
1859			return error;
1860	}
1861
1862	down_write(&mm->mmap_sem);
 
 
 
 
1863
1864	/*
1865	 * We don't validate if these members are pointing to
1866	 * real present VMAs because application may have correspond
1867	 * VMAs already unmapped and kernel uses these members for statistics
1868	 * output in procfs mostly, except
1869	 *
1870	 *  - @start_brk/@brk which are used in do_brk but kernel lookups
1871	 *    for VMAs when updating these memvers so anything wrong written
1872	 *    here cause kernel to swear at userspace program but won't lead
1873	 *    to any problem in kernel itself
1874	 */
1875
 
1876	mm->start_code	= prctl_map.start_code;
1877	mm->end_code	= prctl_map.end_code;
1878	mm->start_data	= prctl_map.start_data;
1879	mm->end_data	= prctl_map.end_data;
1880	mm->start_brk	= prctl_map.start_brk;
1881	mm->brk		= prctl_map.brk;
1882	mm->start_stack	= prctl_map.start_stack;
1883	mm->arg_start	= prctl_map.arg_start;
1884	mm->arg_end	= prctl_map.arg_end;
1885	mm->env_start	= prctl_map.env_start;
1886	mm->env_end	= prctl_map.env_end;
 
1887
1888	/*
1889	 * Note this update of @saved_auxv is lockless thus
1890	 * if someone reads this member in procfs while we're
1891	 * updating -- it may get partly updated results. It's
1892	 * known and acceptable trade off: we leave it as is to
1893	 * not introduce additional locks here making the kernel
1894	 * more complex.
1895	 */
1896	if (prctl_map.auxv_size)
1897		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
1898
1899	up_write(&mm->mmap_sem);
1900	return 0;
1901}
1902#endif /* CONFIG_CHECKPOINT_RESTORE */
1903
1904static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
1905			  unsigned long len)
1906{
1907	/*
1908	 * This doesn't move the auxiliary vector itself since it's pinned to
1909	 * mm_struct, but it permits filling the vector with new values.  It's
1910	 * up to the caller to provide sane values here, otherwise userspace
1911	 * tools which use this vector might be unhappy.
1912	 */
1913	unsigned long user_auxv[AT_VECTOR_SIZE];
1914
1915	if (len > sizeof(user_auxv))
1916		return -EINVAL;
1917
1918	if (copy_from_user(user_auxv, (const void __user *)addr, len))
1919		return -EFAULT;
1920
1921	/* Make sure the last entry is always AT_NULL */
1922	user_auxv[AT_VECTOR_SIZE - 2] = 0;
1923	user_auxv[AT_VECTOR_SIZE - 1] = 0;
1924
1925	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1926
1927	task_lock(current);
1928	memcpy(mm->saved_auxv, user_auxv, len);
1929	task_unlock(current);
1930
1931	return 0;
1932}
1933
1934static int prctl_set_mm(int opt, unsigned long addr,
1935			unsigned long arg4, unsigned long arg5)
1936{
1937	struct mm_struct *mm = current->mm;
1938	struct prctl_mm_map prctl_map;
 
 
 
 
1939	struct vm_area_struct *vma;
1940	int error;
1941
1942	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
1943			      opt != PR_SET_MM_MAP &&
1944			      opt != PR_SET_MM_MAP_SIZE)))
1945		return -EINVAL;
1946
1947#ifdef CONFIG_CHECKPOINT_RESTORE
1948	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
1949		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
1950#endif
1951
1952	if (!capable(CAP_SYS_RESOURCE))
1953		return -EPERM;
1954
1955	if (opt == PR_SET_MM_EXE_FILE)
1956		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1957
1958	if (opt == PR_SET_MM_AUXV)
1959		return prctl_set_auxv(mm, addr, arg4);
1960
1961	if (addr >= TASK_SIZE || addr < mmap_min_addr)
1962		return -EINVAL;
1963
1964	error = -EINVAL;
1965
1966	down_write(&mm->mmap_sem);
 
 
 
 
 
1967	vma = find_vma(mm, addr);
1968
 
1969	prctl_map.start_code	= mm->start_code;
1970	prctl_map.end_code	= mm->end_code;
1971	prctl_map.start_data	= mm->start_data;
1972	prctl_map.end_data	= mm->end_data;
1973	prctl_map.start_brk	= mm->start_brk;
1974	prctl_map.brk		= mm->brk;
1975	prctl_map.start_stack	= mm->start_stack;
1976	prctl_map.arg_start	= mm->arg_start;
1977	prctl_map.arg_end	= mm->arg_end;
1978	prctl_map.env_start	= mm->env_start;
1979	prctl_map.env_end	= mm->env_end;
1980	prctl_map.auxv		= NULL;
1981	prctl_map.auxv_size	= 0;
1982	prctl_map.exe_fd	= -1;
1983
1984	switch (opt) {
1985	case PR_SET_MM_START_CODE:
1986		prctl_map.start_code = addr;
1987		break;
1988	case PR_SET_MM_END_CODE:
1989		prctl_map.end_code = addr;
1990		break;
1991	case PR_SET_MM_START_DATA:
1992		prctl_map.start_data = addr;
1993		break;
1994	case PR_SET_MM_END_DATA:
1995		prctl_map.end_data = addr;
1996		break;
1997	case PR_SET_MM_START_STACK:
1998		prctl_map.start_stack = addr;
1999		break;
2000	case PR_SET_MM_START_BRK:
2001		prctl_map.start_brk = addr;
2002		break;
2003	case PR_SET_MM_BRK:
2004		prctl_map.brk = addr;
2005		break;
2006	case PR_SET_MM_ARG_START:
2007		prctl_map.arg_start = addr;
2008		break;
2009	case PR_SET_MM_ARG_END:
2010		prctl_map.arg_end = addr;
2011		break;
2012	case PR_SET_MM_ENV_START:
2013		prctl_map.env_start = addr;
2014		break;
2015	case PR_SET_MM_ENV_END:
2016		prctl_map.env_end = addr;
2017		break;
2018	default:
2019		goto out;
2020	}
2021
2022	error = validate_prctl_map(&prctl_map);
2023	if (error)
2024		goto out;
2025
2026	switch (opt) {
2027	/*
2028	 * If command line arguments and environment
2029	 * are placed somewhere else on stack, we can
2030	 * set them up here, ARG_START/END to setup
2031	 * command line argumets and ENV_START/END
2032	 * for environment.
2033	 */
2034	case PR_SET_MM_START_STACK:
2035	case PR_SET_MM_ARG_START:
2036	case PR_SET_MM_ARG_END:
2037	case PR_SET_MM_ENV_START:
2038	case PR_SET_MM_ENV_END:
2039		if (!vma) {
2040			error = -EFAULT;
2041			goto out;
2042		}
2043	}
2044
2045	mm->start_code	= prctl_map.start_code;
2046	mm->end_code	= prctl_map.end_code;
2047	mm->start_data	= prctl_map.start_data;
2048	mm->end_data	= prctl_map.end_data;
2049	mm->start_brk	= prctl_map.start_brk;
2050	mm->brk		= prctl_map.brk;
2051	mm->start_stack	= prctl_map.start_stack;
2052	mm->arg_start	= prctl_map.arg_start;
2053	mm->arg_end	= prctl_map.arg_end;
2054	mm->env_start	= prctl_map.env_start;
2055	mm->env_end	= prctl_map.env_end;
2056
2057	error = 0;
2058out:
2059	up_write(&mm->mmap_sem);
 
2060	return error;
2061}
2062
2063#ifdef CONFIG_CHECKPOINT_RESTORE
2064static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2065{
2066	return put_user(me->clear_child_tid, tid_addr);
2067}
2068#else
2069static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2070{
2071	return -EINVAL;
2072}
2073#endif
2074
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2075SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2076		unsigned long, arg4, unsigned long, arg5)
2077{
2078	struct task_struct *me = current;
2079	unsigned char comm[sizeof(me->comm)];
2080	long error;
2081
2082	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2083	if (error != -ENOSYS)
2084		return error;
2085
2086	error = 0;
2087	switch (option) {
2088	case PR_SET_PDEATHSIG:
2089		if (!valid_signal(arg2)) {
2090			error = -EINVAL;
2091			break;
2092		}
2093		me->pdeath_signal = arg2;
2094		break;
2095	case PR_GET_PDEATHSIG:
2096		error = put_user(me->pdeath_signal, (int __user *)arg2);
2097		break;
2098	case PR_GET_DUMPABLE:
2099		error = get_dumpable(me->mm);
2100		break;
2101	case PR_SET_DUMPABLE:
2102		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2103			error = -EINVAL;
2104			break;
2105		}
2106		set_dumpable(me->mm, arg2);
2107		break;
2108
2109	case PR_SET_UNALIGN:
2110		error = SET_UNALIGN_CTL(me, arg2);
2111		break;
2112	case PR_GET_UNALIGN:
2113		error = GET_UNALIGN_CTL(me, arg2);
2114		break;
2115	case PR_SET_FPEMU:
2116		error = SET_FPEMU_CTL(me, arg2);
2117		break;
2118	case PR_GET_FPEMU:
2119		error = GET_FPEMU_CTL(me, arg2);
2120		break;
2121	case PR_SET_FPEXC:
2122		error = SET_FPEXC_CTL(me, arg2);
2123		break;
2124	case PR_GET_FPEXC:
2125		error = GET_FPEXC_CTL(me, arg2);
2126		break;
2127	case PR_GET_TIMING:
2128		error = PR_TIMING_STATISTICAL;
2129		break;
2130	case PR_SET_TIMING:
2131		if (arg2 != PR_TIMING_STATISTICAL)
2132			error = -EINVAL;
2133		break;
2134	case PR_SET_NAME:
2135		comm[sizeof(me->comm) - 1] = 0;
2136		if (strncpy_from_user(comm, (char __user *)arg2,
2137				      sizeof(me->comm) - 1) < 0)
2138			return -EFAULT;
2139		set_task_comm(me, comm);
2140		proc_comm_connector(me);
2141		break;
2142	case PR_GET_NAME:
2143		get_task_comm(comm, me);
2144		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2145			return -EFAULT;
2146		break;
2147	case PR_GET_ENDIAN:
2148		error = GET_ENDIAN(me, arg2);
2149		break;
2150	case PR_SET_ENDIAN:
2151		error = SET_ENDIAN(me, arg2);
2152		break;
2153	case PR_GET_SECCOMP:
2154		error = prctl_get_seccomp();
2155		break;
2156	case PR_SET_SECCOMP:
2157		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2158		break;
2159	case PR_GET_TSC:
2160		error = GET_TSC_CTL(arg2);
2161		break;
2162	case PR_SET_TSC:
2163		error = SET_TSC_CTL(arg2);
2164		break;
2165	case PR_TASK_PERF_EVENTS_DISABLE:
2166		error = perf_event_task_disable();
2167		break;
2168	case PR_TASK_PERF_EVENTS_ENABLE:
2169		error = perf_event_task_enable();
2170		break;
2171	case PR_GET_TIMERSLACK:
2172		if (current->timer_slack_ns > ULONG_MAX)
2173			error = ULONG_MAX;
2174		else
2175			error = current->timer_slack_ns;
2176		break;
2177	case PR_SET_TIMERSLACK:
2178		if (arg2 <= 0)
2179			current->timer_slack_ns =
2180					current->default_timer_slack_ns;
2181		else
2182			current->timer_slack_ns = arg2;
2183		break;
2184	case PR_MCE_KILL:
2185		if (arg4 | arg5)
2186			return -EINVAL;
2187		switch (arg2) {
2188		case PR_MCE_KILL_CLEAR:
2189			if (arg3 != 0)
2190				return -EINVAL;
2191			current->flags &= ~PF_MCE_PROCESS;
2192			break;
2193		case PR_MCE_KILL_SET:
2194			current->flags |= PF_MCE_PROCESS;
2195			if (arg3 == PR_MCE_KILL_EARLY)
2196				current->flags |= PF_MCE_EARLY;
2197			else if (arg3 == PR_MCE_KILL_LATE)
2198				current->flags &= ~PF_MCE_EARLY;
2199			else if (arg3 == PR_MCE_KILL_DEFAULT)
2200				current->flags &=
2201						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2202			else
2203				return -EINVAL;
2204			break;
2205		default:
2206			return -EINVAL;
2207		}
2208		break;
2209	case PR_MCE_KILL_GET:
2210		if (arg2 | arg3 | arg4 | arg5)
2211			return -EINVAL;
2212		if (current->flags & PF_MCE_PROCESS)
2213			error = (current->flags & PF_MCE_EARLY) ?
2214				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2215		else
2216			error = PR_MCE_KILL_DEFAULT;
2217		break;
2218	case PR_SET_MM:
2219		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2220		break;
2221	case PR_GET_TID_ADDRESS:
2222		error = prctl_get_tid_address(me, (int __user **)arg2);
2223		break;
2224	case PR_SET_CHILD_SUBREAPER:
2225		me->signal->is_child_subreaper = !!arg2;
 
 
 
 
2226		break;
2227	case PR_GET_CHILD_SUBREAPER:
2228		error = put_user(me->signal->is_child_subreaper,
2229				 (int __user *)arg2);
2230		break;
2231	case PR_SET_NO_NEW_PRIVS:
2232		if (arg2 != 1 || arg3 || arg4 || arg5)
2233			return -EINVAL;
2234
2235		task_set_no_new_privs(current);
2236		break;
2237	case PR_GET_NO_NEW_PRIVS:
2238		if (arg2 || arg3 || arg4 || arg5)
2239			return -EINVAL;
2240		return task_no_new_privs(current) ? 1 : 0;
2241	case PR_GET_THP_DISABLE:
2242		if (arg2 || arg3 || arg4 || arg5)
2243			return -EINVAL;
2244		error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2245		break;
2246	case PR_SET_THP_DISABLE:
2247		if (arg3 || arg4 || arg5)
2248			return -EINVAL;
2249		down_write(&me->mm->mmap_sem);
 
2250		if (arg2)
2251			me->mm->def_flags |= VM_NOHUGEPAGE;
2252		else
2253			me->mm->def_flags &= ~VM_NOHUGEPAGE;
2254		up_write(&me->mm->mmap_sem);
2255		break;
2256	case PR_MPX_ENABLE_MANAGEMENT:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2257		if (arg2 || arg3 || arg4 || arg5)
2258			return -EINVAL;
2259		error = MPX_ENABLE_MANAGEMENT();
2260		break;
2261	case PR_MPX_DISABLE_MANAGEMENT:
 
 
 
 
 
2262		if (arg2 || arg3 || arg4 || arg5)
2263			return -EINVAL;
2264		error = MPX_DISABLE_MANAGEMENT();
2265		break;
2266	case PR_SET_FP_MODE:
2267		error = SET_FP_MODE(me, arg2);
 
 
 
 
 
 
 
 
 
 
 
2268		break;
2269	case PR_GET_FP_MODE:
2270		error = GET_FP_MODE(me);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2271		break;
2272	default:
2273		error = -EINVAL;
2274		break;
2275	}
2276	return error;
2277}
2278
2279SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2280		struct getcpu_cache __user *, unused)
2281{
2282	int err = 0;
2283	int cpu = raw_smp_processor_id();
2284
2285	if (cpup)
2286		err |= put_user(cpu, cpup);
2287	if (nodep)
2288		err |= put_user(cpu_to_node(cpu), nodep);
2289	return err ? -EFAULT : 0;
2290}
2291
2292/**
2293 * do_sysinfo - fill in sysinfo struct
2294 * @info: pointer to buffer to fill
2295 */
2296static int do_sysinfo(struct sysinfo *info)
2297{
2298	unsigned long mem_total, sav_total;
2299	unsigned int mem_unit, bitcount;
2300	struct timespec tp;
2301
2302	memset(info, 0, sizeof(struct sysinfo));
2303
2304	get_monotonic_boottime(&tp);
 
2305	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2306
2307	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2308
2309	info->procs = nr_threads;
2310
2311	si_meminfo(info);
2312	si_swapinfo(info);
2313
2314	/*
2315	 * If the sum of all the available memory (i.e. ram + swap)
2316	 * is less than can be stored in a 32 bit unsigned long then
2317	 * we can be binary compatible with 2.2.x kernels.  If not,
2318	 * well, in that case 2.2.x was broken anyways...
2319	 *
2320	 *  -Erik Andersen <andersee@debian.org>
2321	 */
2322
2323	mem_total = info->totalram + info->totalswap;
2324	if (mem_total < info->totalram || mem_total < info->totalswap)
2325		goto out;
2326	bitcount = 0;
2327	mem_unit = info->mem_unit;
2328	while (mem_unit > 1) {
2329		bitcount++;
2330		mem_unit >>= 1;
2331		sav_total = mem_total;
2332		mem_total <<= 1;
2333		if (mem_total < sav_total)
2334			goto out;
2335	}
2336
2337	/*
2338	 * If mem_total did not overflow, multiply all memory values by
2339	 * info->mem_unit and set it to 1.  This leaves things compatible
2340	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2341	 * kernels...
2342	 */
2343
2344	info->mem_unit = 1;
2345	info->totalram <<= bitcount;
2346	info->freeram <<= bitcount;
2347	info->sharedram <<= bitcount;
2348	info->bufferram <<= bitcount;
2349	info->totalswap <<= bitcount;
2350	info->freeswap <<= bitcount;
2351	info->totalhigh <<= bitcount;
2352	info->freehigh <<= bitcount;
2353
2354out:
2355	return 0;
2356}
2357
2358SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2359{
2360	struct sysinfo val;
2361
2362	do_sysinfo(&val);
2363
2364	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2365		return -EFAULT;
2366
2367	return 0;
2368}
2369
2370#ifdef CONFIG_COMPAT
2371struct compat_sysinfo {
2372	s32 uptime;
2373	u32 loads[3];
2374	u32 totalram;
2375	u32 freeram;
2376	u32 sharedram;
2377	u32 bufferram;
2378	u32 totalswap;
2379	u32 freeswap;
2380	u16 procs;
2381	u16 pad;
2382	u32 totalhigh;
2383	u32 freehigh;
2384	u32 mem_unit;
2385	char _f[20-2*sizeof(u32)-sizeof(int)];
2386};
2387
2388COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2389{
2390	struct sysinfo s;
 
2391
2392	do_sysinfo(&s);
2393
2394	/* Check to see if any memory value is too large for 32-bit and scale
2395	 *  down if needed
2396	 */
2397	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2398		int bitcount = 0;
2399
2400		while (s.mem_unit < PAGE_SIZE) {
2401			s.mem_unit <<= 1;
2402			bitcount++;
2403		}
2404
2405		s.totalram >>= bitcount;
2406		s.freeram >>= bitcount;
2407		s.sharedram >>= bitcount;
2408		s.bufferram >>= bitcount;
2409		s.totalswap >>= bitcount;
2410		s.freeswap >>= bitcount;
2411		s.totalhigh >>= bitcount;
2412		s.freehigh >>= bitcount;
2413	}
2414
2415	if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2416	    __put_user(s.uptime, &info->uptime) ||
2417	    __put_user(s.loads[0], &info->loads[0]) ||
2418	    __put_user(s.loads[1], &info->loads[1]) ||
2419	    __put_user(s.loads[2], &info->loads[2]) ||
2420	    __put_user(s.totalram, &info->totalram) ||
2421	    __put_user(s.freeram, &info->freeram) ||
2422	    __put_user(s.sharedram, &info->sharedram) ||
2423	    __put_user(s.bufferram, &info->bufferram) ||
2424	    __put_user(s.totalswap, &info->totalswap) ||
2425	    __put_user(s.freeswap, &info->freeswap) ||
2426	    __put_user(s.procs, &info->procs) ||
2427	    __put_user(s.totalhigh, &info->totalhigh) ||
2428	    __put_user(s.freehigh, &info->freehigh) ||
2429	    __put_user(s.mem_unit, &info->mem_unit))
 
2430		return -EFAULT;
2431
2432	return 0;
2433}
2434#endif /* CONFIG_COMPAT */
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/kernel/sys.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 */
   7
   8#include <linux/export.h>
   9#include <linux/mm.h>
  10#include <linux/mm_inline.h>
  11#include <linux/utsname.h>
  12#include <linux/mman.h>
  13#include <linux/reboot.h>
  14#include <linux/prctl.h>
  15#include <linux/highuid.h>
  16#include <linux/fs.h>
  17#include <linux/kmod.h>
  18#include <linux/ksm.h>
  19#include <linux/perf_event.h>
  20#include <linux/resource.h>
  21#include <linux/kernel.h>
  22#include <linux/workqueue.h>
  23#include <linux/capability.h>
  24#include <linux/device.h>
  25#include <linux/key.h>
  26#include <linux/times.h>
  27#include <linux/posix-timers.h>
  28#include <linux/security.h>
  29#include <linux/random.h>
  30#include <linux/suspend.h>
  31#include <linux/tty.h>
  32#include <linux/signal.h>
  33#include <linux/cn_proc.h>
  34#include <linux/getcpu.h>
  35#include <linux/task_io_accounting_ops.h>
  36#include <linux/seccomp.h>
  37#include <linux/cpu.h>
  38#include <linux/personality.h>
  39#include <linux/ptrace.h>
  40#include <linux/fs_struct.h>
  41#include <linux/file.h>
  42#include <linux/mount.h>
  43#include <linux/gfp.h>
  44#include <linux/syscore_ops.h>
  45#include <linux/version.h>
  46#include <linux/ctype.h>
  47#include <linux/syscall_user_dispatch.h>
  48
  49#include <linux/compat.h>
  50#include <linux/syscalls.h>
  51#include <linux/kprobes.h>
  52#include <linux/user_namespace.h>
  53#include <linux/time_namespace.h>
  54#include <linux/binfmts.h>
  55
  56#include <linux/sched.h>
  57#include <linux/sched/autogroup.h>
  58#include <linux/sched/loadavg.h>
  59#include <linux/sched/stat.h>
  60#include <linux/sched/mm.h>
  61#include <linux/sched/coredump.h>
  62#include <linux/sched/task.h>
  63#include <linux/sched/cputime.h>
  64#include <linux/rcupdate.h>
  65#include <linux/uidgid.h>
  66#include <linux/cred.h>
  67
  68#include <linux/nospec.h>
  69
  70#include <linux/kmsg_dump.h>
  71/* Move somewhere else to avoid recompiling? */
  72#include <generated/utsrelease.h>
  73
  74#include <linux/uaccess.h>
  75#include <asm/io.h>
  76#include <asm/unistd.h>
  77
  78#include "uid16.h"
  79
  80#ifndef SET_UNALIGN_CTL
  81# define SET_UNALIGN_CTL(a, b)	(-EINVAL)
  82#endif
  83#ifndef GET_UNALIGN_CTL
  84# define GET_UNALIGN_CTL(a, b)	(-EINVAL)
  85#endif
  86#ifndef SET_FPEMU_CTL
  87# define SET_FPEMU_CTL(a, b)	(-EINVAL)
  88#endif
  89#ifndef GET_FPEMU_CTL
  90# define GET_FPEMU_CTL(a, b)	(-EINVAL)
  91#endif
  92#ifndef SET_FPEXC_CTL
  93# define SET_FPEXC_CTL(a, b)	(-EINVAL)
  94#endif
  95#ifndef GET_FPEXC_CTL
  96# define GET_FPEXC_CTL(a, b)	(-EINVAL)
  97#endif
  98#ifndef GET_ENDIAN
  99# define GET_ENDIAN(a, b)	(-EINVAL)
 100#endif
 101#ifndef SET_ENDIAN
 102# define SET_ENDIAN(a, b)	(-EINVAL)
 103#endif
 104#ifndef GET_TSC_CTL
 105# define GET_TSC_CTL(a)		(-EINVAL)
 106#endif
 107#ifndef SET_TSC_CTL
 108# define SET_TSC_CTL(a)		(-EINVAL)
 109#endif
 
 
 
 
 
 
 110#ifndef GET_FP_MODE
 111# define GET_FP_MODE(a)		(-EINVAL)
 112#endif
 113#ifndef SET_FP_MODE
 114# define SET_FP_MODE(a,b)	(-EINVAL)
 115#endif
 116#ifndef SVE_SET_VL
 117# define SVE_SET_VL(a)		(-EINVAL)
 118#endif
 119#ifndef SVE_GET_VL
 120# define SVE_GET_VL()		(-EINVAL)
 121#endif
 122#ifndef SME_SET_VL
 123# define SME_SET_VL(a)		(-EINVAL)
 124#endif
 125#ifndef SME_GET_VL
 126# define SME_GET_VL()		(-EINVAL)
 127#endif
 128#ifndef PAC_RESET_KEYS
 129# define PAC_RESET_KEYS(a, b)	(-EINVAL)
 130#endif
 131#ifndef PAC_SET_ENABLED_KEYS
 132# define PAC_SET_ENABLED_KEYS(a, b, c)	(-EINVAL)
 133#endif
 134#ifndef PAC_GET_ENABLED_KEYS
 135# define PAC_GET_ENABLED_KEYS(a)	(-EINVAL)
 136#endif
 137#ifndef SET_TAGGED_ADDR_CTRL
 138# define SET_TAGGED_ADDR_CTRL(a)	(-EINVAL)
 139#endif
 140#ifndef GET_TAGGED_ADDR_CTRL
 141# define GET_TAGGED_ADDR_CTRL()		(-EINVAL)
 142#endif
 143#ifndef RISCV_V_SET_CONTROL
 144# define RISCV_V_SET_CONTROL(a)		(-EINVAL)
 145#endif
 146#ifndef RISCV_V_GET_CONTROL
 147# define RISCV_V_GET_CONTROL()		(-EINVAL)
 148#endif
 149
 150/*
 151 * this is where the system-wide overflow UID and GID are defined, for
 152 * architectures that now have 32-bit UID/GID but didn't in the past
 153 */
 154
 155int overflowuid = DEFAULT_OVERFLOWUID;
 156int overflowgid = DEFAULT_OVERFLOWGID;
 157
 158EXPORT_SYMBOL(overflowuid);
 159EXPORT_SYMBOL(overflowgid);
 160
 161/*
 162 * the same as above, but for filesystems which can only store a 16-bit
 163 * UID and GID. as such, this is needed on all architectures
 164 */
 165
 166int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
 167int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
 168
 169EXPORT_SYMBOL(fs_overflowuid);
 170EXPORT_SYMBOL(fs_overflowgid);
 171
 172/*
 173 * Returns true if current's euid is same as p's uid or euid,
 174 * or has CAP_SYS_NICE to p's user_ns.
 175 *
 176 * Called with rcu_read_lock, creds are safe
 177 */
 178static bool set_one_prio_perm(struct task_struct *p)
 179{
 180	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
 181
 182	if (uid_eq(pcred->uid,  cred->euid) ||
 183	    uid_eq(pcred->euid, cred->euid))
 184		return true;
 185	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
 186		return true;
 187	return false;
 188}
 189
 190/*
 191 * set the priority of a task
 192 * - the caller must hold the RCU read lock
 193 */
 194static int set_one_prio(struct task_struct *p, int niceval, int error)
 195{
 196	int no_nice;
 197
 198	if (!set_one_prio_perm(p)) {
 199		error = -EPERM;
 200		goto out;
 201	}
 202	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
 203		error = -EACCES;
 204		goto out;
 205	}
 206	no_nice = security_task_setnice(p, niceval);
 207	if (no_nice) {
 208		error = no_nice;
 209		goto out;
 210	}
 211	if (error == -ESRCH)
 212		error = 0;
 213	set_user_nice(p, niceval);
 214out:
 215	return error;
 216}
 217
 218SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 219{
 220	struct task_struct *g, *p;
 221	struct user_struct *user;
 222	const struct cred *cred = current_cred();
 223	int error = -EINVAL;
 224	struct pid *pgrp;
 225	kuid_t uid;
 226
 227	if (which > PRIO_USER || which < PRIO_PROCESS)
 228		goto out;
 229
 230	/* normalize: avoid signed division (rounding problems) */
 231	error = -ESRCH;
 232	if (niceval < MIN_NICE)
 233		niceval = MIN_NICE;
 234	if (niceval > MAX_NICE)
 235		niceval = MAX_NICE;
 236
 237	rcu_read_lock();
 
 238	switch (which) {
 239	case PRIO_PROCESS:
 240		if (who)
 241			p = find_task_by_vpid(who);
 242		else
 243			p = current;
 244		if (p)
 245			error = set_one_prio(p, niceval, error);
 246		break;
 247	case PRIO_PGRP:
 248		if (who)
 249			pgrp = find_vpid(who);
 250		else
 251			pgrp = task_pgrp(current);
 252		read_lock(&tasklist_lock);
 253		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 254			error = set_one_prio(p, niceval, error);
 255		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 256		read_unlock(&tasklist_lock);
 257		break;
 258	case PRIO_USER:
 259		uid = make_kuid(cred->user_ns, who);
 260		user = cred->user;
 261		if (!who)
 262			uid = cred->uid;
 263		else if (!uid_eq(uid, cred->uid)) {
 264			user = find_user(uid);
 265			if (!user)
 266				goto out_unlock;	/* No processes for this user */
 267		}
 268		for_each_process_thread(g, p) {
 269			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
 270				error = set_one_prio(p, niceval, error);
 271		}
 272		if (!uid_eq(uid, cred->uid))
 273			free_uid(user);		/* For find_user() */
 274		break;
 275	}
 276out_unlock:
 
 277	rcu_read_unlock();
 278out:
 279	return error;
 280}
 281
 282/*
 283 * Ugh. To avoid negative return values, "getpriority()" will
 284 * not return the normal nice-value, but a negated value that
 285 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
 286 * to stay compatible.
 287 */
 288SYSCALL_DEFINE2(getpriority, int, which, int, who)
 289{
 290	struct task_struct *g, *p;
 291	struct user_struct *user;
 292	const struct cred *cred = current_cred();
 293	long niceval, retval = -ESRCH;
 294	struct pid *pgrp;
 295	kuid_t uid;
 296
 297	if (which > PRIO_USER || which < PRIO_PROCESS)
 298		return -EINVAL;
 299
 300	rcu_read_lock();
 
 301	switch (which) {
 302	case PRIO_PROCESS:
 303		if (who)
 304			p = find_task_by_vpid(who);
 305		else
 306			p = current;
 307		if (p) {
 308			niceval = nice_to_rlimit(task_nice(p));
 309			if (niceval > retval)
 310				retval = niceval;
 311		}
 312		break;
 313	case PRIO_PGRP:
 314		if (who)
 315			pgrp = find_vpid(who);
 316		else
 317			pgrp = task_pgrp(current);
 318		read_lock(&tasklist_lock);
 319		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
 320			niceval = nice_to_rlimit(task_nice(p));
 321			if (niceval > retval)
 322				retval = niceval;
 323		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 324		read_unlock(&tasklist_lock);
 325		break;
 326	case PRIO_USER:
 327		uid = make_kuid(cred->user_ns, who);
 328		user = cred->user;
 329		if (!who)
 330			uid = cred->uid;
 331		else if (!uid_eq(uid, cred->uid)) {
 332			user = find_user(uid);
 333			if (!user)
 334				goto out_unlock;	/* No processes for this user */
 335		}
 336		for_each_process_thread(g, p) {
 337			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
 338				niceval = nice_to_rlimit(task_nice(p));
 339				if (niceval > retval)
 340					retval = niceval;
 341			}
 342		}
 343		if (!uid_eq(uid, cred->uid))
 344			free_uid(user);		/* for find_user() */
 345		break;
 346	}
 347out_unlock:
 
 348	rcu_read_unlock();
 349
 350	return retval;
 351}
 352
 353/*
 354 * Unprivileged users may change the real gid to the effective gid
 355 * or vice versa.  (BSD-style)
 356 *
 357 * If you set the real gid at all, or set the effective gid to a value not
 358 * equal to the real gid, then the saved gid is set to the new effective gid.
 359 *
 360 * This makes it possible for a setgid program to completely drop its
 361 * privileges, which is often a useful assertion to make when you are doing
 362 * a security audit over a program.
 363 *
 364 * The general idea is that a program which uses just setregid() will be
 365 * 100% compatible with BSD.  A program which uses just setgid() will be
 366 * 100% compatible with POSIX with saved IDs.
 367 *
 368 * SMP: There are not races, the GIDs are checked only by filesystem
 369 *      operations (as far as semantic preservation is concerned).
 370 */
 371#ifdef CONFIG_MULTIUSER
 372long __sys_setregid(gid_t rgid, gid_t egid)
 373{
 374	struct user_namespace *ns = current_user_ns();
 375	const struct cred *old;
 376	struct cred *new;
 377	int retval;
 378	kgid_t krgid, kegid;
 379
 380	krgid = make_kgid(ns, rgid);
 381	kegid = make_kgid(ns, egid);
 382
 383	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 384		return -EINVAL;
 385	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 386		return -EINVAL;
 387
 388	new = prepare_creds();
 389	if (!new)
 390		return -ENOMEM;
 391	old = current_cred();
 392
 393	retval = -EPERM;
 394	if (rgid != (gid_t) -1) {
 395		if (gid_eq(old->gid, krgid) ||
 396		    gid_eq(old->egid, krgid) ||
 397		    ns_capable_setid(old->user_ns, CAP_SETGID))
 398			new->gid = krgid;
 399		else
 400			goto error;
 401	}
 402	if (egid != (gid_t) -1) {
 403		if (gid_eq(old->gid, kegid) ||
 404		    gid_eq(old->egid, kegid) ||
 405		    gid_eq(old->sgid, kegid) ||
 406		    ns_capable_setid(old->user_ns, CAP_SETGID))
 407			new->egid = kegid;
 408		else
 409			goto error;
 410	}
 411
 412	if (rgid != (gid_t) -1 ||
 413	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
 414		new->sgid = new->egid;
 415	new->fsgid = new->egid;
 416
 417	retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
 418	if (retval < 0)
 419		goto error;
 420
 421	return commit_creds(new);
 422
 423error:
 424	abort_creds(new);
 425	return retval;
 426}
 427
 428SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
 429{
 430	return __sys_setregid(rgid, egid);
 431}
 432
 433/*
 434 * setgid() is implemented like SysV w/ SAVED_IDS
 435 *
 436 * SMP: Same implicit races as above.
 437 */
 438long __sys_setgid(gid_t gid)
 439{
 440	struct user_namespace *ns = current_user_ns();
 441	const struct cred *old;
 442	struct cred *new;
 443	int retval;
 444	kgid_t kgid;
 445
 446	kgid = make_kgid(ns, gid);
 447	if (!gid_valid(kgid))
 448		return -EINVAL;
 449
 450	new = prepare_creds();
 451	if (!new)
 452		return -ENOMEM;
 453	old = current_cred();
 454
 455	retval = -EPERM;
 456	if (ns_capable_setid(old->user_ns, CAP_SETGID))
 457		new->gid = new->egid = new->sgid = new->fsgid = kgid;
 458	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
 459		new->egid = new->fsgid = kgid;
 460	else
 461		goto error;
 462
 463	retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
 464	if (retval < 0)
 465		goto error;
 466
 467	return commit_creds(new);
 468
 469error:
 470	abort_creds(new);
 471	return retval;
 472}
 473
 474SYSCALL_DEFINE1(setgid, gid_t, gid)
 475{
 476	return __sys_setgid(gid);
 477}
 478
 479/*
 480 * change the user struct in a credentials set to match the new UID
 481 */
 482static int set_user(struct cred *new)
 483{
 484	struct user_struct *new_user;
 485
 486	new_user = alloc_uid(new->uid);
 487	if (!new_user)
 488		return -EAGAIN;
 489
 490	free_uid(new->user);
 491	new->user = new_user;
 492	return 0;
 493}
 494
 495static void flag_nproc_exceeded(struct cred *new)
 496{
 497	if (new->ucounts == current_ucounts())
 498		return;
 499
 500	/*
 501	 * We don't fail in case of NPROC limit excess here because too many
 502	 * poorly written programs don't check set*uid() return code, assuming
 503	 * it never fails if called by root.  We may still enforce NPROC limit
 504	 * for programs doing set*uid()+execve() by harmlessly deferring the
 505	 * failure to the execve() stage.
 506	 */
 507	if (is_rlimit_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
 508			new->user != INIT_USER)
 509		current->flags |= PF_NPROC_EXCEEDED;
 510	else
 511		current->flags &= ~PF_NPROC_EXCEEDED;
 
 
 
 
 512}
 513
 514/*
 515 * Unprivileged users may change the real uid to the effective uid
 516 * or vice versa.  (BSD-style)
 517 *
 518 * If you set the real uid at all, or set the effective uid to a value not
 519 * equal to the real uid, then the saved uid is set to the new effective uid.
 520 *
 521 * This makes it possible for a setuid program to completely drop its
 522 * privileges, which is often a useful assertion to make when you are doing
 523 * a security audit over a program.
 524 *
 525 * The general idea is that a program which uses just setreuid() will be
 526 * 100% compatible with BSD.  A program which uses just setuid() will be
 527 * 100% compatible with POSIX with saved IDs.
 528 */
 529long __sys_setreuid(uid_t ruid, uid_t euid)
 530{
 531	struct user_namespace *ns = current_user_ns();
 532	const struct cred *old;
 533	struct cred *new;
 534	int retval;
 535	kuid_t kruid, keuid;
 536
 537	kruid = make_kuid(ns, ruid);
 538	keuid = make_kuid(ns, euid);
 539
 540	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 541		return -EINVAL;
 542	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 543		return -EINVAL;
 544
 545	new = prepare_creds();
 546	if (!new)
 547		return -ENOMEM;
 548	old = current_cred();
 549
 550	retval = -EPERM;
 551	if (ruid != (uid_t) -1) {
 552		new->uid = kruid;
 553		if (!uid_eq(old->uid, kruid) &&
 554		    !uid_eq(old->euid, kruid) &&
 555		    !ns_capable_setid(old->user_ns, CAP_SETUID))
 556			goto error;
 557	}
 558
 559	if (euid != (uid_t) -1) {
 560		new->euid = keuid;
 561		if (!uid_eq(old->uid, keuid) &&
 562		    !uid_eq(old->euid, keuid) &&
 563		    !uid_eq(old->suid, keuid) &&
 564		    !ns_capable_setid(old->user_ns, CAP_SETUID))
 565			goto error;
 566	}
 567
 568	if (!uid_eq(new->uid, old->uid)) {
 569		retval = set_user(new);
 570		if (retval < 0)
 571			goto error;
 572	}
 573	if (ruid != (uid_t) -1 ||
 574	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
 575		new->suid = new->euid;
 576	new->fsuid = new->euid;
 577
 578	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
 579	if (retval < 0)
 580		goto error;
 581
 582	retval = set_cred_ucounts(new);
 583	if (retval < 0)
 584		goto error;
 585
 586	flag_nproc_exceeded(new);
 587	return commit_creds(new);
 588
 589error:
 590	abort_creds(new);
 591	return retval;
 592}
 593
 594SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 595{
 596	return __sys_setreuid(ruid, euid);
 597}
 598
 599/*
 600 * setuid() is implemented like SysV with SAVED_IDS
 601 *
 602 * Note that SAVED_ID's is deficient in that a setuid root program
 603 * like sendmail, for example, cannot set its uid to be a normal
 604 * user and then switch back, because if you're root, setuid() sets
 605 * the saved uid too.  If you don't like this, blame the bright people
 606 * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
 607 * will allow a root program to temporarily drop privileges and be able to
 608 * regain them by swapping the real and effective uid.
 609 */
 610long __sys_setuid(uid_t uid)
 611{
 612	struct user_namespace *ns = current_user_ns();
 613	const struct cred *old;
 614	struct cred *new;
 615	int retval;
 616	kuid_t kuid;
 617
 618	kuid = make_kuid(ns, uid);
 619	if (!uid_valid(kuid))
 620		return -EINVAL;
 621
 622	new = prepare_creds();
 623	if (!new)
 624		return -ENOMEM;
 625	old = current_cred();
 626
 627	retval = -EPERM;
 628	if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
 629		new->suid = new->uid = kuid;
 630		if (!uid_eq(kuid, old->uid)) {
 631			retval = set_user(new);
 632			if (retval < 0)
 633				goto error;
 634		}
 635	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
 636		goto error;
 637	}
 638
 639	new->fsuid = new->euid = kuid;
 640
 641	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
 642	if (retval < 0)
 643		goto error;
 644
 645	retval = set_cred_ucounts(new);
 646	if (retval < 0)
 647		goto error;
 648
 649	flag_nproc_exceeded(new);
 650	return commit_creds(new);
 651
 652error:
 653	abort_creds(new);
 654	return retval;
 655}
 656
 657SYSCALL_DEFINE1(setuid, uid_t, uid)
 658{
 659	return __sys_setuid(uid);
 660}
 661
 662
 663/*
 664 * This function implements a generic ability to update ruid, euid,
 665 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 666 */
 667long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
 668{
 669	struct user_namespace *ns = current_user_ns();
 670	const struct cred *old;
 671	struct cred *new;
 672	int retval;
 673	kuid_t kruid, keuid, ksuid;
 674	bool ruid_new, euid_new, suid_new;
 675
 676	kruid = make_kuid(ns, ruid);
 677	keuid = make_kuid(ns, euid);
 678	ksuid = make_kuid(ns, suid);
 679
 680	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
 681		return -EINVAL;
 682
 683	if ((euid != (uid_t) -1) && !uid_valid(keuid))
 684		return -EINVAL;
 685
 686	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
 687		return -EINVAL;
 688
 689	old = current_cred();
 690
 691	/* check for no-op */
 692	if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
 693	    (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
 694				    uid_eq(keuid, old->fsuid))) &&
 695	    (suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
 696		return 0;
 697
 698	ruid_new = ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
 699		   !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
 700	euid_new = euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
 701		   !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
 702	suid_new = suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
 703		   !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
 704	if ((ruid_new || euid_new || suid_new) &&
 705	    !ns_capable_setid(old->user_ns, CAP_SETUID))
 706		return -EPERM;
 707
 708	new = prepare_creds();
 709	if (!new)
 710		return -ENOMEM;
 711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 712	if (ruid != (uid_t) -1) {
 713		new->uid = kruid;
 714		if (!uid_eq(kruid, old->uid)) {
 715			retval = set_user(new);
 716			if (retval < 0)
 717				goto error;
 718		}
 719	}
 720	if (euid != (uid_t) -1)
 721		new->euid = keuid;
 722	if (suid != (uid_t) -1)
 723		new->suid = ksuid;
 724	new->fsuid = new->euid;
 725
 726	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
 727	if (retval < 0)
 728		goto error;
 729
 730	retval = set_cred_ucounts(new);
 731	if (retval < 0)
 732		goto error;
 733
 734	flag_nproc_exceeded(new);
 735	return commit_creds(new);
 736
 737error:
 738	abort_creds(new);
 739	return retval;
 740}
 741
 742SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 743{
 744	return __sys_setresuid(ruid, euid, suid);
 745}
 746
 747SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
 748{
 749	const struct cred *cred = current_cred();
 750	int retval;
 751	uid_t ruid, euid, suid;
 752
 753	ruid = from_kuid_munged(cred->user_ns, cred->uid);
 754	euid = from_kuid_munged(cred->user_ns, cred->euid);
 755	suid = from_kuid_munged(cred->user_ns, cred->suid);
 756
 757	retval = put_user(ruid, ruidp);
 758	if (!retval) {
 759		retval = put_user(euid, euidp);
 760		if (!retval)
 761			return put_user(suid, suidp);
 762	}
 763	return retval;
 764}
 765
 766/*
 767 * Same as above, but for rgid, egid, sgid.
 768 */
 769long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
 770{
 771	struct user_namespace *ns = current_user_ns();
 772	const struct cred *old;
 773	struct cred *new;
 774	int retval;
 775	kgid_t krgid, kegid, ksgid;
 776	bool rgid_new, egid_new, sgid_new;
 777
 778	krgid = make_kgid(ns, rgid);
 779	kegid = make_kgid(ns, egid);
 780	ksgid = make_kgid(ns, sgid);
 781
 782	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
 783		return -EINVAL;
 784	if ((egid != (gid_t) -1) && !gid_valid(kegid))
 785		return -EINVAL;
 786	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
 787		return -EINVAL;
 788
 789	old = current_cred();
 790
 791	/* check for no-op */
 792	if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
 793	    (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
 794				    gid_eq(kegid, old->fsgid))) &&
 795	    (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
 796		return 0;
 797
 798	rgid_new = rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
 799		   !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
 800	egid_new = egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
 801		   !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
 802	sgid_new = sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
 803		   !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
 804	if ((rgid_new || egid_new || sgid_new) &&
 805	    !ns_capable_setid(old->user_ns, CAP_SETGID))
 806		return -EPERM;
 807
 808	new = prepare_creds();
 809	if (!new)
 810		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811
 812	if (rgid != (gid_t) -1)
 813		new->gid = krgid;
 814	if (egid != (gid_t) -1)
 815		new->egid = kegid;
 816	if (sgid != (gid_t) -1)
 817		new->sgid = ksgid;
 818	new->fsgid = new->egid;
 819
 820	retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
 821	if (retval < 0)
 822		goto error;
 823
 824	return commit_creds(new);
 825
 826error:
 827	abort_creds(new);
 828	return retval;
 829}
 830
 831SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
 832{
 833	return __sys_setresgid(rgid, egid, sgid);
 834}
 835
 836SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
 837{
 838	const struct cred *cred = current_cred();
 839	int retval;
 840	gid_t rgid, egid, sgid;
 841
 842	rgid = from_kgid_munged(cred->user_ns, cred->gid);
 843	egid = from_kgid_munged(cred->user_ns, cred->egid);
 844	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
 845
 846	retval = put_user(rgid, rgidp);
 847	if (!retval) {
 848		retval = put_user(egid, egidp);
 849		if (!retval)
 850			retval = put_user(sgid, sgidp);
 851	}
 852
 853	return retval;
 854}
 855
 856
 857/*
 858 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
 859 * is used for "access()" and for the NFS daemon (letting nfsd stay at
 860 * whatever uid it wants to). It normally shadows "euid", except when
 861 * explicitly set by setfsuid() or for access..
 862 */
 863long __sys_setfsuid(uid_t uid)
 864{
 865	const struct cred *old;
 866	struct cred *new;
 867	uid_t old_fsuid;
 868	kuid_t kuid;
 869
 870	old = current_cred();
 871	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
 872
 873	kuid = make_kuid(old->user_ns, uid);
 874	if (!uid_valid(kuid))
 875		return old_fsuid;
 876
 877	new = prepare_creds();
 878	if (!new)
 879		return old_fsuid;
 880
 881	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
 882	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
 883	    ns_capable_setid(old->user_ns, CAP_SETUID)) {
 884		if (!uid_eq(kuid, old->fsuid)) {
 885			new->fsuid = kuid;
 886			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
 887				goto change_okay;
 888		}
 889	}
 890
 891	abort_creds(new);
 892	return old_fsuid;
 893
 894change_okay:
 895	commit_creds(new);
 896	return old_fsuid;
 897}
 898
 899SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 900{
 901	return __sys_setfsuid(uid);
 902}
 903
 904/*
 905 * Samma på svenska..
 906 */
 907long __sys_setfsgid(gid_t gid)
 908{
 909	const struct cred *old;
 910	struct cred *new;
 911	gid_t old_fsgid;
 912	kgid_t kgid;
 913
 914	old = current_cred();
 915	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
 916
 917	kgid = make_kgid(old->user_ns, gid);
 918	if (!gid_valid(kgid))
 919		return old_fsgid;
 920
 921	new = prepare_creds();
 922	if (!new)
 923		return old_fsgid;
 924
 925	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
 926	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
 927	    ns_capable_setid(old->user_ns, CAP_SETGID)) {
 928		if (!gid_eq(kgid, old->fsgid)) {
 929			new->fsgid = kgid;
 930			if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
 931				goto change_okay;
 932		}
 933	}
 934
 935	abort_creds(new);
 936	return old_fsgid;
 937
 938change_okay:
 939	commit_creds(new);
 940	return old_fsgid;
 941}
 942
 943SYSCALL_DEFINE1(setfsgid, gid_t, gid)
 944{
 945	return __sys_setfsgid(gid);
 946}
 947#endif /* CONFIG_MULTIUSER */
 948
 949/**
 950 * sys_getpid - return the thread group id of the current process
 951 *
 952 * Note, despite the name, this returns the tgid not the pid.  The tgid and
 953 * the pid are identical unless CLONE_THREAD was specified on clone() in
 954 * which case the tgid is the same in all threads of the same group.
 955 *
 956 * This is SMP safe as current->tgid does not change.
 957 */
 958SYSCALL_DEFINE0(getpid)
 959{
 960	return task_tgid_vnr(current);
 961}
 962
 963/* Thread ID - the internal kernel "pid" */
 964SYSCALL_DEFINE0(gettid)
 965{
 966	return task_pid_vnr(current);
 967}
 968
 969/*
 970 * Accessing ->real_parent is not SMP-safe, it could
 971 * change from under us. However, we can use a stale
 972 * value of ->real_parent under rcu_read_lock(), see
 973 * release_task()->call_rcu(delayed_put_task_struct).
 974 */
 975SYSCALL_DEFINE0(getppid)
 976{
 977	int pid;
 978
 979	rcu_read_lock();
 980	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
 981	rcu_read_unlock();
 982
 983	return pid;
 984}
 985
 986SYSCALL_DEFINE0(getuid)
 987{
 988	/* Only we change this so SMP safe */
 989	return from_kuid_munged(current_user_ns(), current_uid());
 990}
 991
 992SYSCALL_DEFINE0(geteuid)
 993{
 994	/* Only we change this so SMP safe */
 995	return from_kuid_munged(current_user_ns(), current_euid());
 996}
 997
 998SYSCALL_DEFINE0(getgid)
 999{
1000	/* Only we change this so SMP safe */
1001	return from_kgid_munged(current_user_ns(), current_gid());
1002}
1003
1004SYSCALL_DEFINE0(getegid)
1005{
1006	/* Only we change this so SMP safe */
1007	return from_kgid_munged(current_user_ns(), current_egid());
1008}
1009
1010static void do_sys_times(struct tms *tms)
1011{
1012	u64 tgutime, tgstime, cutime, cstime;
1013
1014	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
1015	cutime = current->signal->cutime;
1016	cstime = current->signal->cstime;
1017	tms->tms_utime = nsec_to_clock_t(tgutime);
1018	tms->tms_stime = nsec_to_clock_t(tgstime);
1019	tms->tms_cutime = nsec_to_clock_t(cutime);
1020	tms->tms_cstime = nsec_to_clock_t(cstime);
1021}
1022
1023SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1024{
1025	if (tbuf) {
1026		struct tms tmp;
1027
1028		do_sys_times(&tmp);
1029		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1030			return -EFAULT;
1031	}
1032	force_successful_syscall_return();
1033	return (long) jiffies_64_to_clock_t(get_jiffies_64());
1034}
1035
1036#ifdef CONFIG_COMPAT
1037static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
1038{
1039	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
1040}
1041
1042COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
1043{
1044	if (tbuf) {
1045		struct tms tms;
1046		struct compat_tms tmp;
1047
1048		do_sys_times(&tms);
1049		/* Convert our struct tms to the compat version. */
1050		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
1051		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
1052		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1053		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1054		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1055			return -EFAULT;
1056	}
1057	force_successful_syscall_return();
1058	return compat_jiffies_to_clock_t(jiffies);
1059}
1060#endif
1061
1062/*
1063 * This needs some heavy checking ...
1064 * I just haven't the stomach for it. I also don't fully
1065 * understand sessions/pgrp etc. Let somebody who does explain it.
1066 *
1067 * OK, I think I have the protection semantics right.... this is really
1068 * only important on a multi-user system anyway, to make sure one user
1069 * can't send a signal to a process owned by another.  -TYT, 12/12/91
1070 *
1071 * !PF_FORKNOEXEC check to conform completely to POSIX.
1072 */
1073SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1074{
1075	struct task_struct *p;
1076	struct task_struct *group_leader = current->group_leader;
1077	struct pid *pgrp;
1078	int err;
1079
1080	if (!pid)
1081		pid = task_pid_vnr(group_leader);
1082	if (!pgid)
1083		pgid = pid;
1084	if (pgid < 0)
1085		return -EINVAL;
1086	rcu_read_lock();
1087
1088	/* From this point forward we keep holding onto the tasklist lock
1089	 * so that our parent does not change from under us. -DaveM
1090	 */
1091	write_lock_irq(&tasklist_lock);
1092
1093	err = -ESRCH;
1094	p = find_task_by_vpid(pid);
1095	if (!p)
1096		goto out;
1097
1098	err = -EINVAL;
1099	if (!thread_group_leader(p))
1100		goto out;
1101
1102	if (same_thread_group(p->real_parent, group_leader)) {
1103		err = -EPERM;
1104		if (task_session(p) != task_session(group_leader))
1105			goto out;
1106		err = -EACCES;
1107		if (!(p->flags & PF_FORKNOEXEC))
1108			goto out;
1109	} else {
1110		err = -ESRCH;
1111		if (p != group_leader)
1112			goto out;
1113	}
1114
1115	err = -EPERM;
1116	if (p->signal->leader)
1117		goto out;
1118
1119	pgrp = task_pid(p);
1120	if (pgid != pid) {
1121		struct task_struct *g;
1122
1123		pgrp = find_vpid(pgid);
1124		g = pid_task(pgrp, PIDTYPE_PGID);
1125		if (!g || task_session(g) != task_session(group_leader))
1126			goto out;
1127	}
1128
1129	err = security_task_setpgid(p, pgid);
1130	if (err)
1131		goto out;
1132
1133	if (task_pgrp(p) != pgrp)
1134		change_pid(p, PIDTYPE_PGID, pgrp);
1135
1136	err = 0;
1137out:
1138	/* All paths lead to here, thus we are safe. -DaveM */
1139	write_unlock_irq(&tasklist_lock);
1140	rcu_read_unlock();
1141	return err;
1142}
1143
1144static int do_getpgid(pid_t pid)
1145{
1146	struct task_struct *p;
1147	struct pid *grp;
1148	int retval;
1149
1150	rcu_read_lock();
1151	if (!pid)
1152		grp = task_pgrp(current);
1153	else {
1154		retval = -ESRCH;
1155		p = find_task_by_vpid(pid);
1156		if (!p)
1157			goto out;
1158		grp = task_pgrp(p);
1159		if (!grp)
1160			goto out;
1161
1162		retval = security_task_getpgid(p);
1163		if (retval)
1164			goto out;
1165	}
1166	retval = pid_vnr(grp);
1167out:
1168	rcu_read_unlock();
1169	return retval;
1170}
1171
1172SYSCALL_DEFINE1(getpgid, pid_t, pid)
1173{
1174	return do_getpgid(pid);
1175}
1176
1177#ifdef __ARCH_WANT_SYS_GETPGRP
1178
1179SYSCALL_DEFINE0(getpgrp)
1180{
1181	return do_getpgid(0);
1182}
1183
1184#endif
1185
1186SYSCALL_DEFINE1(getsid, pid_t, pid)
1187{
1188	struct task_struct *p;
1189	struct pid *sid;
1190	int retval;
1191
1192	rcu_read_lock();
1193	if (!pid)
1194		sid = task_session(current);
1195	else {
1196		retval = -ESRCH;
1197		p = find_task_by_vpid(pid);
1198		if (!p)
1199			goto out;
1200		sid = task_session(p);
1201		if (!sid)
1202			goto out;
1203
1204		retval = security_task_getsid(p);
1205		if (retval)
1206			goto out;
1207	}
1208	retval = pid_vnr(sid);
1209out:
1210	rcu_read_unlock();
1211	return retval;
1212}
1213
1214static void set_special_pids(struct pid *pid)
1215{
1216	struct task_struct *curr = current->group_leader;
1217
1218	if (task_session(curr) != pid)
1219		change_pid(curr, PIDTYPE_SID, pid);
1220
1221	if (task_pgrp(curr) != pid)
1222		change_pid(curr, PIDTYPE_PGID, pid);
1223}
1224
1225int ksys_setsid(void)
1226{
1227	struct task_struct *group_leader = current->group_leader;
1228	struct pid *sid = task_pid(group_leader);
1229	pid_t session = pid_vnr(sid);
1230	int err = -EPERM;
1231
1232	write_lock_irq(&tasklist_lock);
1233	/* Fail if I am already a session leader */
1234	if (group_leader->signal->leader)
1235		goto out;
1236
1237	/* Fail if a process group id already exists that equals the
1238	 * proposed session id.
1239	 */
1240	if (pid_task(sid, PIDTYPE_PGID))
1241		goto out;
1242
1243	group_leader->signal->leader = 1;
1244	set_special_pids(sid);
1245
1246	proc_clear_tty(group_leader);
1247
1248	err = session;
1249out:
1250	write_unlock_irq(&tasklist_lock);
1251	if (err > 0) {
1252		proc_sid_connector(group_leader);
1253		sched_autogroup_create_attach(group_leader);
1254	}
1255	return err;
1256}
1257
1258SYSCALL_DEFINE0(setsid)
1259{
1260	return ksys_setsid();
1261}
1262
1263DECLARE_RWSEM(uts_sem);
1264
1265#ifdef COMPAT_UTS_MACHINE
1266#define override_architecture(name) \
1267	(personality(current->personality) == PER_LINUX32 && \
1268	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1269		      sizeof(COMPAT_UTS_MACHINE)))
1270#else
1271#define override_architecture(name)	0
1272#endif
1273
1274/*
1275 * Work around broken programs that cannot handle "Linux 3.0".
1276 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1277 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1278 * 2.6.60.
1279 */
1280static int override_release(char __user *release, size_t len)
1281{
1282	int ret = 0;
1283
1284	if (current->personality & UNAME26) {
1285		const char *rest = UTS_RELEASE;
1286		char buf[65] = { 0 };
1287		int ndots = 0;
1288		unsigned v;
1289		size_t copy;
1290
1291		while (*rest) {
1292			if (*rest == '.' && ++ndots >= 3)
1293				break;
1294			if (!isdigit(*rest) && *rest != '.')
1295				break;
1296			rest++;
1297		}
1298		v = LINUX_VERSION_PATCHLEVEL + 60;
1299		copy = clamp_t(size_t, len, 1, sizeof(buf));
1300		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1301		ret = copy_to_user(release, buf, copy + 1);
1302	}
1303	return ret;
1304}
1305
1306SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1307{
1308	struct new_utsname tmp;
1309
1310	down_read(&uts_sem);
1311	memcpy(&tmp, utsname(), sizeof(tmp));
 
1312	up_read(&uts_sem);
1313	if (copy_to_user(name, &tmp, sizeof(tmp)))
1314		return -EFAULT;
1315
1316	if (override_release(name->release, sizeof(name->release)))
1317		return -EFAULT;
1318	if (override_architecture(name))
1319		return -EFAULT;
1320	return 0;
1321}
1322
1323#ifdef __ARCH_WANT_SYS_OLD_UNAME
1324/*
1325 * Old cruft
1326 */
1327SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1328{
1329	struct old_utsname tmp;
1330
1331	if (!name)
1332		return -EFAULT;
1333
1334	down_read(&uts_sem);
1335	memcpy(&tmp, utsname(), sizeof(tmp));
 
1336	up_read(&uts_sem);
1337	if (copy_to_user(name, &tmp, sizeof(tmp)))
1338		return -EFAULT;
1339
1340	if (override_release(name->release, sizeof(name->release)))
1341		return -EFAULT;
1342	if (override_architecture(name))
1343		return -EFAULT;
1344	return 0;
1345}
1346
1347SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1348{
1349	struct oldold_utsname tmp;
1350
1351	if (!name)
1352		return -EFAULT;
1353
1354	memset(&tmp, 0, sizeof(tmp));
1355
1356	down_read(&uts_sem);
1357	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1358	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1359	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1360	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1361	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
 
 
 
 
 
 
 
 
 
 
1362	up_read(&uts_sem);
1363	if (copy_to_user(name, &tmp, sizeof(tmp)))
1364		return -EFAULT;
1365
1366	if (override_architecture(name))
1367		return -EFAULT;
1368	if (override_release(name->release, sizeof(name->release)))
1369		return -EFAULT;
1370	return 0;
1371}
1372#endif
1373
1374SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1375{
1376	int errno;
1377	char tmp[__NEW_UTS_LEN];
1378
1379	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1380		return -EPERM;
1381
1382	if (len < 0 || len > __NEW_UTS_LEN)
1383		return -EINVAL;
 
1384	errno = -EFAULT;
1385	if (!copy_from_user(tmp, name, len)) {
1386		struct new_utsname *u;
1387
1388		add_device_randomness(tmp, len);
1389		down_write(&uts_sem);
1390		u = utsname();
1391		memcpy(u->nodename, tmp, len);
1392		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1393		errno = 0;
1394		uts_proc_notify(UTS_PROC_HOSTNAME);
1395		up_write(&uts_sem);
1396	}
 
1397	return errno;
1398}
1399
1400#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1401
1402SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1403{
1404	int i;
1405	struct new_utsname *u;
1406	char tmp[__NEW_UTS_LEN + 1];
1407
1408	if (len < 0)
1409		return -EINVAL;
1410	down_read(&uts_sem);
1411	u = utsname();
1412	i = 1 + strlen(u->nodename);
1413	if (i > len)
1414		i = len;
1415	memcpy(tmp, u->nodename, i);
 
 
1416	up_read(&uts_sem);
1417	if (copy_to_user(name, tmp, i))
1418		return -EFAULT;
1419	return 0;
1420}
1421
1422#endif
1423
1424/*
1425 * Only setdomainname; getdomainname can be implemented by calling
1426 * uname()
1427 */
1428SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1429{
1430	int errno;
1431	char tmp[__NEW_UTS_LEN];
1432
1433	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1434		return -EPERM;
1435	if (len < 0 || len > __NEW_UTS_LEN)
1436		return -EINVAL;
1437
 
1438	errno = -EFAULT;
1439	if (!copy_from_user(tmp, name, len)) {
1440		struct new_utsname *u;
1441
1442		add_device_randomness(tmp, len);
1443		down_write(&uts_sem);
1444		u = utsname();
1445		memcpy(u->domainname, tmp, len);
1446		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1447		errno = 0;
1448		uts_proc_notify(UTS_PROC_DOMAINNAME);
1449		up_write(&uts_sem);
1450	}
 
1451	return errno;
1452}
1453
1454/* make sure you are allowed to change @tsk limits before calling this */
1455static int do_prlimit(struct task_struct *tsk, unsigned int resource,
1456		      struct rlimit *new_rlim, struct rlimit *old_rlim)
1457{
1458	struct rlimit *rlim;
1459	int retval = 0;
1460
1461	if (resource >= RLIM_NLIMITS)
1462		return -EINVAL;
1463	resource = array_index_nospec(resource, RLIM_NLIMITS);
1464
1465	if (new_rlim) {
1466		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1467			return -EINVAL;
1468		if (resource == RLIMIT_NOFILE &&
1469				new_rlim->rlim_max > sysctl_nr_open)
1470			return -EPERM;
1471	}
1472
1473	/* Holding a refcount on tsk protects tsk->signal from disappearing. */
1474	rlim = tsk->signal->rlim + resource;
1475	task_lock(tsk->group_leader);
1476	if (new_rlim) {
1477		/*
1478		 * Keep the capable check against init_user_ns until cgroups can
1479		 * contain all limits.
1480		 */
1481		if (new_rlim->rlim_max > rlim->rlim_max &&
1482				!capable(CAP_SYS_RESOURCE))
1483			retval = -EPERM;
1484		if (!retval)
1485			retval = security_task_setrlimit(tsk, resource, new_rlim);
1486	}
1487	if (!retval) {
1488		if (old_rlim)
1489			*old_rlim = *rlim;
1490		if (new_rlim)
1491			*rlim = *new_rlim;
1492	}
1493	task_unlock(tsk->group_leader);
1494
1495	/*
1496	 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1497	 * infinite. In case of RLIM_INFINITY the posix CPU timer code
1498	 * ignores the rlimit.
1499	 */
1500	if (!retval && new_rlim && resource == RLIMIT_CPU &&
1501	    new_rlim->rlim_cur != RLIM_INFINITY &&
1502	    IS_ENABLED(CONFIG_POSIX_TIMERS)) {
1503		/*
1504		 * update_rlimit_cpu can fail if the task is exiting, but there
1505		 * may be other tasks in the thread group that are not exiting,
1506		 * and they need their cpu timers adjusted.
1507		 *
1508		 * The group_leader is the last task to be released, so if we
1509		 * cannot update_rlimit_cpu on it, then the entire process is
1510		 * exiting and we do not need to update at all.
1511		 */
1512		update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur);
1513	}
1514
1515	return retval;
1516}
1517
1518SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1519{
1520	struct rlimit value;
1521	int ret;
1522
1523	ret = do_prlimit(current, resource, NULL, &value);
1524	if (!ret)
1525		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1526
1527	return ret;
1528}
1529
1530#ifdef CONFIG_COMPAT
1531
1532COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1533		       struct compat_rlimit __user *, rlim)
1534{
1535	struct rlimit r;
1536	struct compat_rlimit r32;
1537
1538	if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1539		return -EFAULT;
1540
1541	if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1542		r.rlim_cur = RLIM_INFINITY;
1543	else
1544		r.rlim_cur = r32.rlim_cur;
1545	if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1546		r.rlim_max = RLIM_INFINITY;
1547	else
1548		r.rlim_max = r32.rlim_max;
1549	return do_prlimit(current, resource, &r, NULL);
1550}
1551
1552COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1553		       struct compat_rlimit __user *, rlim)
1554{
1555	struct rlimit r;
1556	int ret;
1557
1558	ret = do_prlimit(current, resource, NULL, &r);
1559	if (!ret) {
1560		struct compat_rlimit r32;
1561		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1562			r32.rlim_cur = COMPAT_RLIM_INFINITY;
1563		else
1564			r32.rlim_cur = r.rlim_cur;
1565		if (r.rlim_max > COMPAT_RLIM_INFINITY)
1566			r32.rlim_max = COMPAT_RLIM_INFINITY;
1567		else
1568			r32.rlim_max = r.rlim_max;
1569
1570		if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1571			return -EFAULT;
1572	}
1573	return ret;
1574}
1575
1576#endif
1577
1578#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1579
1580/*
1581 *	Back compatibility for getrlimit. Needed for some apps.
1582 */
1583SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1584		struct rlimit __user *, rlim)
1585{
1586	struct rlimit x;
1587	if (resource >= RLIM_NLIMITS)
1588		return -EINVAL;
1589
1590	resource = array_index_nospec(resource, RLIM_NLIMITS);
1591	task_lock(current->group_leader);
1592	x = current->signal->rlim[resource];
1593	task_unlock(current->group_leader);
1594	if (x.rlim_cur > 0x7FFFFFFF)
1595		x.rlim_cur = 0x7FFFFFFF;
1596	if (x.rlim_max > 0x7FFFFFFF)
1597		x.rlim_max = 0x7FFFFFFF;
1598	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1599}
1600
1601#ifdef CONFIG_COMPAT
1602COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1603		       struct compat_rlimit __user *, rlim)
1604{
1605	struct rlimit r;
1606
1607	if (resource >= RLIM_NLIMITS)
1608		return -EINVAL;
1609
1610	resource = array_index_nospec(resource, RLIM_NLIMITS);
1611	task_lock(current->group_leader);
1612	r = current->signal->rlim[resource];
1613	task_unlock(current->group_leader);
1614	if (r.rlim_cur > 0x7FFFFFFF)
1615		r.rlim_cur = 0x7FFFFFFF;
1616	if (r.rlim_max > 0x7FFFFFFF)
1617		r.rlim_max = 0x7FFFFFFF;
1618
1619	if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1620	    put_user(r.rlim_max, &rlim->rlim_max))
1621		return -EFAULT;
1622	return 0;
1623}
1624#endif
1625
1626#endif
1627
1628static inline bool rlim64_is_infinity(__u64 rlim64)
1629{
1630#if BITS_PER_LONG < 64
1631	return rlim64 >= ULONG_MAX;
1632#else
1633	return rlim64 == RLIM64_INFINITY;
1634#endif
1635}
1636
1637static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1638{
1639	if (rlim->rlim_cur == RLIM_INFINITY)
1640		rlim64->rlim_cur = RLIM64_INFINITY;
1641	else
1642		rlim64->rlim_cur = rlim->rlim_cur;
1643	if (rlim->rlim_max == RLIM_INFINITY)
1644		rlim64->rlim_max = RLIM64_INFINITY;
1645	else
1646		rlim64->rlim_max = rlim->rlim_max;
1647}
1648
1649static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1650{
1651	if (rlim64_is_infinity(rlim64->rlim_cur))
1652		rlim->rlim_cur = RLIM_INFINITY;
1653	else
1654		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1655	if (rlim64_is_infinity(rlim64->rlim_max))
1656		rlim->rlim_max = RLIM_INFINITY;
1657	else
1658		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1659}
1660
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661/* rcu lock must be held */
1662static int check_prlimit_permission(struct task_struct *task,
1663				    unsigned int flags)
1664{
1665	const struct cred *cred = current_cred(), *tcred;
1666	bool id_match;
1667
1668	if (current == task)
1669		return 0;
1670
1671	tcred = __task_cred(task);
1672	id_match = (uid_eq(cred->uid, tcred->euid) &&
1673		    uid_eq(cred->uid, tcred->suid) &&
1674		    uid_eq(cred->uid, tcred->uid)  &&
1675		    gid_eq(cred->gid, tcred->egid) &&
1676		    gid_eq(cred->gid, tcred->sgid) &&
1677		    gid_eq(cred->gid, tcred->gid));
1678	if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1679		return -EPERM;
 
1680
1681	return security_task_prlimit(cred, tcred, flags);
1682}
1683
1684SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1685		const struct rlimit64 __user *, new_rlim,
1686		struct rlimit64 __user *, old_rlim)
1687{
1688	struct rlimit64 old64, new64;
1689	struct rlimit old, new;
1690	struct task_struct *tsk;
1691	unsigned int checkflags = 0;
1692	int ret;
1693
1694	if (old_rlim)
1695		checkflags |= LSM_PRLIMIT_READ;
1696
1697	if (new_rlim) {
1698		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1699			return -EFAULT;
1700		rlim64_to_rlim(&new64, &new);
1701		checkflags |= LSM_PRLIMIT_WRITE;
1702	}
1703
1704	rcu_read_lock();
1705	tsk = pid ? find_task_by_vpid(pid) : current;
1706	if (!tsk) {
1707		rcu_read_unlock();
1708		return -ESRCH;
1709	}
1710	ret = check_prlimit_permission(tsk, checkflags);
1711	if (ret) {
1712		rcu_read_unlock();
1713		return ret;
1714	}
1715	get_task_struct(tsk);
1716	rcu_read_unlock();
1717
1718	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1719			old_rlim ? &old : NULL);
1720
1721	if (!ret && old_rlim) {
1722		rlim_to_rlim64(&old, &old64);
1723		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1724			ret = -EFAULT;
1725	}
1726
1727	put_task_struct(tsk);
1728	return ret;
1729}
1730
1731SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1732{
1733	struct rlimit new_rlim;
1734
1735	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1736		return -EFAULT;
1737	return do_prlimit(current, resource, &new_rlim, NULL);
1738}
1739
1740/*
1741 * It would make sense to put struct rusage in the task_struct,
1742 * except that would make the task_struct be *really big*.  After
1743 * task_struct gets moved into malloc'ed memory, it would
1744 * make sense to do this.  It will make moving the rest of the information
1745 * a lot simpler!  (Which we're not doing right now because we're not
1746 * measuring them yet).
1747 *
1748 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1749 * races with threads incrementing their own counters.  But since word
1750 * reads are atomic, we either get new values or old values and we don't
1751 * care which for the sums.  We always take the siglock to protect reading
1752 * the c* fields from p->signal from races with exit.c updating those
1753 * fields when reaping, so a sample either gets all the additions of a
1754 * given child after it's reaped, or none so this sample is before reaping.
1755 *
1756 * Locking:
1757 * We need to take the siglock for CHILDEREN, SELF and BOTH
1758 * for  the cases current multithreaded, non-current single threaded
1759 * non-current multithreaded.  Thread traversal is now safe with
1760 * the siglock held.
1761 * Strictly speaking, we donot need to take the siglock if we are current and
1762 * single threaded,  as no one else can take our signal_struct away, no one
1763 * else can  reap the  children to update signal->c* counters, and no one else
1764 * can race with the signal-> fields. If we do not take any lock, the
1765 * signal-> fields could be read out of order while another thread was just
1766 * exiting. So we should  place a read memory barrier when we avoid the lock.
1767 * On the writer side,  write memory barrier is implied in  __exit_signal
1768 * as __exit_signal releases  the siglock spinlock after updating the signal->
1769 * fields. But we don't do this yet to keep things simple.
1770 *
1771 */
1772
1773static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1774{
1775	r->ru_nvcsw += t->nvcsw;
1776	r->ru_nivcsw += t->nivcsw;
1777	r->ru_minflt += t->min_flt;
1778	r->ru_majflt += t->maj_flt;
1779	r->ru_inblock += task_io_get_inblock(t);
1780	r->ru_oublock += task_io_get_oublock(t);
1781}
1782
1783void getrusage(struct task_struct *p, int who, struct rusage *r)
1784{
1785	struct task_struct *t;
1786	unsigned long flags;
1787	u64 tgutime, tgstime, utime, stime;
1788	unsigned long maxrss;
1789	struct mm_struct *mm;
1790	struct signal_struct *sig = p->signal;
1791	unsigned int seq = 0;
1792
1793retry:
1794	memset(r, 0, sizeof(*r));
1795	utime = stime = 0;
1796	maxrss = 0;
1797
1798	if (who == RUSAGE_THREAD) {
1799		task_cputime_adjusted(current, &utime, &stime);
1800		accumulate_thread_rusage(p, r);
1801		maxrss = sig->maxrss;
1802		goto out_thread;
1803	}
1804
1805	flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
 
1806
1807	switch (who) {
1808	case RUSAGE_BOTH:
1809	case RUSAGE_CHILDREN:
1810		utime = sig->cutime;
1811		stime = sig->cstime;
1812		r->ru_nvcsw = sig->cnvcsw;
1813		r->ru_nivcsw = sig->cnivcsw;
1814		r->ru_minflt = sig->cmin_flt;
1815		r->ru_majflt = sig->cmaj_flt;
1816		r->ru_inblock = sig->cinblock;
1817		r->ru_oublock = sig->coublock;
1818		maxrss = sig->cmaxrss;
1819
1820		if (who == RUSAGE_CHILDREN)
1821			break;
1822		fallthrough;
1823
1824	case RUSAGE_SELF:
1825		r->ru_nvcsw += sig->nvcsw;
1826		r->ru_nivcsw += sig->nivcsw;
1827		r->ru_minflt += sig->min_flt;
1828		r->ru_majflt += sig->maj_flt;
1829		r->ru_inblock += sig->inblock;
1830		r->ru_oublock += sig->oublock;
1831		if (maxrss < sig->maxrss)
1832			maxrss = sig->maxrss;
1833
1834		rcu_read_lock();
1835		__for_each_thread(sig, t)
 
 
1836			accumulate_thread_rusage(t, r);
1837		rcu_read_unlock();
1838
1839		break;
1840
1841	default:
1842		BUG();
1843	}
 
1844
1845	if (need_seqretry(&sig->stats_lock, seq)) {
1846		seq = 1;
1847		goto retry;
1848	}
1849	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
1850
1851	if (who == RUSAGE_CHILDREN)
1852		goto out_children;
1853
1854	thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1855	utime += tgutime;
1856	stime += tgstime;
1857
1858out_thread:
1859	mm = get_task_mm(p);
1860	if (mm) {
1861		setmax_mm_hiwater_rss(&maxrss, mm);
1862		mmput(mm);
1863	}
1864
1865out_children:
1866	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1867	r->ru_utime = ns_to_kernel_old_timeval(utime);
1868	r->ru_stime = ns_to_kernel_old_timeval(stime);
1869}
1870
1871SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1872{
1873	struct rusage r;
1874
 
 
 
 
 
 
1875	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1876	    who != RUSAGE_THREAD)
1877		return -EINVAL;
1878
1879	getrusage(current, who, &r);
1880	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1881}
1882
1883#ifdef CONFIG_COMPAT
1884COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1885{
1886	struct rusage r;
1887
1888	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1889	    who != RUSAGE_THREAD)
1890		return -EINVAL;
1891
1892	getrusage(current, who, &r);
1893	return put_compat_rusage(&r, ru);
1894}
1895#endif
1896
1897SYSCALL_DEFINE1(umask, int, mask)
1898{
1899	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1900	return mask;
1901}
1902
1903static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1904{
1905	struct fd exe;
 
1906	struct inode *inode;
1907	int err;
1908
1909	exe = fdget(fd);
1910	if (!exe.file)
1911		return -EBADF;
1912
1913	inode = file_inode(exe.file);
1914
1915	/*
1916	 * Because the original mm->exe_file points to executable file, make
1917	 * sure that this one is executable as well, to avoid breaking an
1918	 * overall picture.
1919	 */
1920	err = -EACCES;
1921	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1922		goto exit;
1923
1924	err = file_permission(exe.file, MAY_EXEC);
1925	if (err)
1926		goto exit;
1927
1928	err = replace_mm_exe_file(mm, exe.file);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1929exit:
1930	fdput(exe);
1931	return err;
 
 
 
 
1932}
1933
1934/*
1935 * Check arithmetic relations of passed addresses.
1936 *
1937 * WARNING: we don't require any capability here so be very careful
1938 * in what is allowed for modification from userspace.
1939 */
1940static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1941{
1942	unsigned long mmap_max_addr = TASK_SIZE;
 
1943	int error = -EINVAL, i;
1944
1945	static const unsigned char offsets[] = {
1946		offsetof(struct prctl_mm_map, start_code),
1947		offsetof(struct prctl_mm_map, end_code),
1948		offsetof(struct prctl_mm_map, start_data),
1949		offsetof(struct prctl_mm_map, end_data),
1950		offsetof(struct prctl_mm_map, start_brk),
1951		offsetof(struct prctl_mm_map, brk),
1952		offsetof(struct prctl_mm_map, start_stack),
1953		offsetof(struct prctl_mm_map, arg_start),
1954		offsetof(struct prctl_mm_map, arg_end),
1955		offsetof(struct prctl_mm_map, env_start),
1956		offsetof(struct prctl_mm_map, env_end),
1957	};
1958
1959	/*
1960	 * Make sure the members are not somewhere outside
1961	 * of allowed address space.
1962	 */
1963	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1964		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1965
1966		if ((unsigned long)val >= mmap_max_addr ||
1967		    (unsigned long)val < mmap_min_addr)
1968			goto out;
1969	}
1970
1971	/*
1972	 * Make sure the pairs are ordered.
1973	 */
1974#define __prctl_check_order(__m1, __op, __m2)				\
1975	((unsigned long)prctl_map->__m1 __op				\
1976	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1977	error  = __prctl_check_order(start_code, <, end_code);
1978	error |= __prctl_check_order(start_data,<=, end_data);
1979	error |= __prctl_check_order(start_brk, <=, brk);
1980	error |= __prctl_check_order(arg_start, <=, arg_end);
1981	error |= __prctl_check_order(env_start, <=, env_end);
1982	if (error)
1983		goto out;
1984#undef __prctl_check_order
1985
1986	error = -EINVAL;
1987
1988	/*
 
 
 
 
 
 
 
1989	 * Neither we should allow to override limits if they set.
1990	 */
1991	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1992			      prctl_map->start_brk, prctl_map->end_data,
1993			      prctl_map->start_data))
1994			goto out;
1995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1996	error = 0;
1997out:
1998	return error;
1999}
2000
2001#ifdef CONFIG_CHECKPOINT_RESTORE
2002static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
2003{
2004	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
2005	unsigned long user_auxv[AT_VECTOR_SIZE];
2006	struct mm_struct *mm = current->mm;
2007	int error;
2008
2009	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2010	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
2011
2012	if (opt == PR_SET_MM_MAP_SIZE)
2013		return put_user((unsigned int)sizeof(prctl_map),
2014				(unsigned int __user *)addr);
2015
2016	if (data_size != sizeof(prctl_map))
2017		return -EINVAL;
2018
2019	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
2020		return -EFAULT;
2021
2022	error = validate_prctl_map_addr(&prctl_map);
2023	if (error)
2024		return error;
2025
2026	if (prctl_map.auxv_size) {
2027		/*
2028		 * Someone is trying to cheat the auxv vector.
2029		 */
2030		if (!prctl_map.auxv ||
2031				prctl_map.auxv_size > sizeof(mm->saved_auxv))
2032			return -EINVAL;
2033
2034		memset(user_auxv, 0, sizeof(user_auxv));
2035		if (copy_from_user(user_auxv,
2036				   (const void __user *)prctl_map.auxv,
2037				   prctl_map.auxv_size))
2038			return -EFAULT;
2039
2040		/* Last entry must be AT_NULL as specification requires */
2041		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2042		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2043	}
2044
2045	if (prctl_map.exe_fd != (u32)-1) {
2046		/*
2047		 * Check if the current user is checkpoint/restore capable.
2048		 * At the time of this writing, it checks for CAP_SYS_ADMIN
2049		 * or CAP_CHECKPOINT_RESTORE.
2050		 * Note that a user with access to ptrace can masquerade an
2051		 * arbitrary program as any executable, even setuid ones.
2052		 * This may have implications in the tomoyo subsystem.
2053		 */
2054		if (!checkpoint_restore_ns_capable(current_user_ns()))
2055			return -EPERM;
2056
2057		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2058		if (error)
2059			return error;
2060	}
2061
2062	/*
2063	 * arg_lock protects concurrent updates but we still need mmap_lock for
2064	 * read to exclude races with sys_brk.
2065	 */
2066	mmap_read_lock(mm);
2067
2068	/*
2069	 * We don't validate if these members are pointing to
2070	 * real present VMAs because application may have correspond
2071	 * VMAs already unmapped and kernel uses these members for statistics
2072	 * output in procfs mostly, except
2073	 *
2074	 *  - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2075	 *    for VMAs when updating these members so anything wrong written
2076	 *    here cause kernel to swear at userspace program but won't lead
2077	 *    to any problem in kernel itself
2078	 */
2079
2080	spin_lock(&mm->arg_lock);
2081	mm->start_code	= prctl_map.start_code;
2082	mm->end_code	= prctl_map.end_code;
2083	mm->start_data	= prctl_map.start_data;
2084	mm->end_data	= prctl_map.end_data;
2085	mm->start_brk	= prctl_map.start_brk;
2086	mm->brk		= prctl_map.brk;
2087	mm->start_stack	= prctl_map.start_stack;
2088	mm->arg_start	= prctl_map.arg_start;
2089	mm->arg_end	= prctl_map.arg_end;
2090	mm->env_start	= prctl_map.env_start;
2091	mm->env_end	= prctl_map.env_end;
2092	spin_unlock(&mm->arg_lock);
2093
2094	/*
2095	 * Note this update of @saved_auxv is lockless thus
2096	 * if someone reads this member in procfs while we're
2097	 * updating -- it may get partly updated results. It's
2098	 * known and acceptable trade off: we leave it as is to
2099	 * not introduce additional locks here making the kernel
2100	 * more complex.
2101	 */
2102	if (prctl_map.auxv_size)
2103		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2104
2105	mmap_read_unlock(mm);
2106	return 0;
2107}
2108#endif /* CONFIG_CHECKPOINT_RESTORE */
2109
2110static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2111			  unsigned long len)
2112{
2113	/*
2114	 * This doesn't move the auxiliary vector itself since it's pinned to
2115	 * mm_struct, but it permits filling the vector with new values.  It's
2116	 * up to the caller to provide sane values here, otherwise userspace
2117	 * tools which use this vector might be unhappy.
2118	 */
2119	unsigned long user_auxv[AT_VECTOR_SIZE] = {};
2120
2121	if (len > sizeof(user_auxv))
2122		return -EINVAL;
2123
2124	if (copy_from_user(user_auxv, (const void __user *)addr, len))
2125		return -EFAULT;
2126
2127	/* Make sure the last entry is always AT_NULL */
2128	user_auxv[AT_VECTOR_SIZE - 2] = 0;
2129	user_auxv[AT_VECTOR_SIZE - 1] = 0;
2130
2131	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2132
2133	task_lock(current);
2134	memcpy(mm->saved_auxv, user_auxv, len);
2135	task_unlock(current);
2136
2137	return 0;
2138}
2139
2140static int prctl_set_mm(int opt, unsigned long addr,
2141			unsigned long arg4, unsigned long arg5)
2142{
2143	struct mm_struct *mm = current->mm;
2144	struct prctl_mm_map prctl_map = {
2145		.auxv = NULL,
2146		.auxv_size = 0,
2147		.exe_fd = -1,
2148	};
2149	struct vm_area_struct *vma;
2150	int error;
2151
2152	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2153			      opt != PR_SET_MM_MAP &&
2154			      opt != PR_SET_MM_MAP_SIZE)))
2155		return -EINVAL;
2156
2157#ifdef CONFIG_CHECKPOINT_RESTORE
2158	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2159		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2160#endif
2161
2162	if (!capable(CAP_SYS_RESOURCE))
2163		return -EPERM;
2164
2165	if (opt == PR_SET_MM_EXE_FILE)
2166		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2167
2168	if (opt == PR_SET_MM_AUXV)
2169		return prctl_set_auxv(mm, addr, arg4);
2170
2171	if (addr >= TASK_SIZE || addr < mmap_min_addr)
2172		return -EINVAL;
2173
2174	error = -EINVAL;
2175
2176	/*
2177	 * arg_lock protects concurrent updates of arg boundaries, we need
2178	 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2179	 * validation.
2180	 */
2181	mmap_read_lock(mm);
2182	vma = find_vma(mm, addr);
2183
2184	spin_lock(&mm->arg_lock);
2185	prctl_map.start_code	= mm->start_code;
2186	prctl_map.end_code	= mm->end_code;
2187	prctl_map.start_data	= mm->start_data;
2188	prctl_map.end_data	= mm->end_data;
2189	prctl_map.start_brk	= mm->start_brk;
2190	prctl_map.brk		= mm->brk;
2191	prctl_map.start_stack	= mm->start_stack;
2192	prctl_map.arg_start	= mm->arg_start;
2193	prctl_map.arg_end	= mm->arg_end;
2194	prctl_map.env_start	= mm->env_start;
2195	prctl_map.env_end	= mm->env_end;
 
 
 
2196
2197	switch (opt) {
2198	case PR_SET_MM_START_CODE:
2199		prctl_map.start_code = addr;
2200		break;
2201	case PR_SET_MM_END_CODE:
2202		prctl_map.end_code = addr;
2203		break;
2204	case PR_SET_MM_START_DATA:
2205		prctl_map.start_data = addr;
2206		break;
2207	case PR_SET_MM_END_DATA:
2208		prctl_map.end_data = addr;
2209		break;
2210	case PR_SET_MM_START_STACK:
2211		prctl_map.start_stack = addr;
2212		break;
2213	case PR_SET_MM_START_BRK:
2214		prctl_map.start_brk = addr;
2215		break;
2216	case PR_SET_MM_BRK:
2217		prctl_map.brk = addr;
2218		break;
2219	case PR_SET_MM_ARG_START:
2220		prctl_map.arg_start = addr;
2221		break;
2222	case PR_SET_MM_ARG_END:
2223		prctl_map.arg_end = addr;
2224		break;
2225	case PR_SET_MM_ENV_START:
2226		prctl_map.env_start = addr;
2227		break;
2228	case PR_SET_MM_ENV_END:
2229		prctl_map.env_end = addr;
2230		break;
2231	default:
2232		goto out;
2233	}
2234
2235	error = validate_prctl_map_addr(&prctl_map);
2236	if (error)
2237		goto out;
2238
2239	switch (opt) {
2240	/*
2241	 * If command line arguments and environment
2242	 * are placed somewhere else on stack, we can
2243	 * set them up here, ARG_START/END to setup
2244	 * command line arguments and ENV_START/END
2245	 * for environment.
2246	 */
2247	case PR_SET_MM_START_STACK:
2248	case PR_SET_MM_ARG_START:
2249	case PR_SET_MM_ARG_END:
2250	case PR_SET_MM_ENV_START:
2251	case PR_SET_MM_ENV_END:
2252		if (!vma) {
2253			error = -EFAULT;
2254			goto out;
2255		}
2256	}
2257
2258	mm->start_code	= prctl_map.start_code;
2259	mm->end_code	= prctl_map.end_code;
2260	mm->start_data	= prctl_map.start_data;
2261	mm->end_data	= prctl_map.end_data;
2262	mm->start_brk	= prctl_map.start_brk;
2263	mm->brk		= prctl_map.brk;
2264	mm->start_stack	= prctl_map.start_stack;
2265	mm->arg_start	= prctl_map.arg_start;
2266	mm->arg_end	= prctl_map.arg_end;
2267	mm->env_start	= prctl_map.env_start;
2268	mm->env_end	= prctl_map.env_end;
2269
2270	error = 0;
2271out:
2272	spin_unlock(&mm->arg_lock);
2273	mmap_read_unlock(mm);
2274	return error;
2275}
2276
2277#ifdef CONFIG_CHECKPOINT_RESTORE
2278static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2279{
2280	return put_user(me->clear_child_tid, tid_addr);
2281}
2282#else
2283static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2284{
2285	return -EINVAL;
2286}
2287#endif
2288
2289static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2290{
2291	/*
2292	 * If task has has_child_subreaper - all its descendants
2293	 * already have these flag too and new descendants will
2294	 * inherit it on fork, skip them.
2295	 *
2296	 * If we've found child_reaper - skip descendants in
2297	 * it's subtree as they will never get out pidns.
2298	 */
2299	if (p->signal->has_child_subreaper ||
2300	    is_child_reaper(task_pid(p)))
2301		return 0;
2302
2303	p->signal->has_child_subreaper = 1;
2304	return 1;
2305}
2306
2307int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2308{
2309	return -EINVAL;
2310}
2311
2312int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2313				    unsigned long ctrl)
2314{
2315	return -EINVAL;
2316}
2317
2318#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2319
2320#ifdef CONFIG_ANON_VMA_NAME
2321
2322#define ANON_VMA_NAME_MAX_LEN		80
2323#define ANON_VMA_NAME_INVALID_CHARS	"\\`$[]"
2324
2325static inline bool is_valid_name_char(char ch)
2326{
2327	/* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */
2328	return ch > 0x1f && ch < 0x7f &&
2329		!strchr(ANON_VMA_NAME_INVALID_CHARS, ch);
2330}
2331
2332static int prctl_set_vma(unsigned long opt, unsigned long addr,
2333			 unsigned long size, unsigned long arg)
2334{
2335	struct mm_struct *mm = current->mm;
2336	const char __user *uname;
2337	struct anon_vma_name *anon_name = NULL;
2338	int error;
2339
2340	switch (opt) {
2341	case PR_SET_VMA_ANON_NAME:
2342		uname = (const char __user *)arg;
2343		if (uname) {
2344			char *name, *pch;
2345
2346			name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
2347			if (IS_ERR(name))
2348				return PTR_ERR(name);
2349
2350			for (pch = name; *pch != '\0'; pch++) {
2351				if (!is_valid_name_char(*pch)) {
2352					kfree(name);
2353					return -EINVAL;
2354				}
2355			}
2356			/* anon_vma has its own copy */
2357			anon_name = anon_vma_name_alloc(name);
2358			kfree(name);
2359			if (!anon_name)
2360				return -ENOMEM;
2361
2362		}
2363
2364		mmap_write_lock(mm);
2365		error = madvise_set_anon_name(mm, addr, size, anon_name);
2366		mmap_write_unlock(mm);
2367		anon_vma_name_put(anon_name);
2368		break;
2369	default:
2370		error = -EINVAL;
2371	}
2372
2373	return error;
2374}
2375
2376#else /* CONFIG_ANON_VMA_NAME */
2377static int prctl_set_vma(unsigned long opt, unsigned long start,
2378			 unsigned long size, unsigned long arg)
2379{
2380	return -EINVAL;
2381}
2382#endif /* CONFIG_ANON_VMA_NAME */
2383
2384static inline unsigned long get_current_mdwe(void)
2385{
2386	unsigned long ret = 0;
2387
2388	if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
2389		ret |= PR_MDWE_REFUSE_EXEC_GAIN;
2390	if (test_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags))
2391		ret |= PR_MDWE_NO_INHERIT;
2392
2393	return ret;
2394}
2395
2396static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
2397				 unsigned long arg4, unsigned long arg5)
2398{
2399	unsigned long current_bits;
2400
2401	if (arg3 || arg4 || arg5)
2402		return -EINVAL;
2403
2404	if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT))
2405		return -EINVAL;
2406
2407	/* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */
2408	if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
2409		return -EINVAL;
2410
2411	/* PARISC cannot allow mdwe as it needs writable stacks */
2412	if (IS_ENABLED(CONFIG_PARISC))
2413		return -EINVAL;
2414
2415	current_bits = get_current_mdwe();
2416	if (current_bits && current_bits != bits)
2417		return -EPERM; /* Cannot unset the flags */
2418
2419	if (bits & PR_MDWE_NO_INHERIT)
2420		set_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags);
2421	if (bits & PR_MDWE_REFUSE_EXEC_GAIN)
2422		set_bit(MMF_HAS_MDWE, &current->mm->flags);
2423
2424	return 0;
2425}
2426
2427static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3,
2428				 unsigned long arg4, unsigned long arg5)
2429{
2430	if (arg2 || arg3 || arg4 || arg5)
2431		return -EINVAL;
2432	return get_current_mdwe();
2433}
2434
2435static int prctl_get_auxv(void __user *addr, unsigned long len)
2436{
2437	struct mm_struct *mm = current->mm;
2438	unsigned long size = min_t(unsigned long, sizeof(mm->saved_auxv), len);
2439
2440	if (size && copy_to_user(addr, mm->saved_auxv, size))
2441		return -EFAULT;
2442	return sizeof(mm->saved_auxv);
2443}
2444
2445SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2446		unsigned long, arg4, unsigned long, arg5)
2447{
2448	struct task_struct *me = current;
2449	unsigned char comm[sizeof(me->comm)];
2450	long error;
2451
2452	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2453	if (error != -ENOSYS)
2454		return error;
2455
2456	error = 0;
2457	switch (option) {
2458	case PR_SET_PDEATHSIG:
2459		if (!valid_signal(arg2)) {
2460			error = -EINVAL;
2461			break;
2462		}
2463		me->pdeath_signal = arg2;
2464		break;
2465	case PR_GET_PDEATHSIG:
2466		error = put_user(me->pdeath_signal, (int __user *)arg2);
2467		break;
2468	case PR_GET_DUMPABLE:
2469		error = get_dumpable(me->mm);
2470		break;
2471	case PR_SET_DUMPABLE:
2472		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2473			error = -EINVAL;
2474			break;
2475		}
2476		set_dumpable(me->mm, arg2);
2477		break;
2478
2479	case PR_SET_UNALIGN:
2480		error = SET_UNALIGN_CTL(me, arg2);
2481		break;
2482	case PR_GET_UNALIGN:
2483		error = GET_UNALIGN_CTL(me, arg2);
2484		break;
2485	case PR_SET_FPEMU:
2486		error = SET_FPEMU_CTL(me, arg2);
2487		break;
2488	case PR_GET_FPEMU:
2489		error = GET_FPEMU_CTL(me, arg2);
2490		break;
2491	case PR_SET_FPEXC:
2492		error = SET_FPEXC_CTL(me, arg2);
2493		break;
2494	case PR_GET_FPEXC:
2495		error = GET_FPEXC_CTL(me, arg2);
2496		break;
2497	case PR_GET_TIMING:
2498		error = PR_TIMING_STATISTICAL;
2499		break;
2500	case PR_SET_TIMING:
2501		if (arg2 != PR_TIMING_STATISTICAL)
2502			error = -EINVAL;
2503		break;
2504	case PR_SET_NAME:
2505		comm[sizeof(me->comm) - 1] = 0;
2506		if (strncpy_from_user(comm, (char __user *)arg2,
2507				      sizeof(me->comm) - 1) < 0)
2508			return -EFAULT;
2509		set_task_comm(me, comm);
2510		proc_comm_connector(me);
2511		break;
2512	case PR_GET_NAME:
2513		get_task_comm(comm, me);
2514		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2515			return -EFAULT;
2516		break;
2517	case PR_GET_ENDIAN:
2518		error = GET_ENDIAN(me, arg2);
2519		break;
2520	case PR_SET_ENDIAN:
2521		error = SET_ENDIAN(me, arg2);
2522		break;
2523	case PR_GET_SECCOMP:
2524		error = prctl_get_seccomp();
2525		break;
2526	case PR_SET_SECCOMP:
2527		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2528		break;
2529	case PR_GET_TSC:
2530		error = GET_TSC_CTL(arg2);
2531		break;
2532	case PR_SET_TSC:
2533		error = SET_TSC_CTL(arg2);
2534		break;
2535	case PR_TASK_PERF_EVENTS_DISABLE:
2536		error = perf_event_task_disable();
2537		break;
2538	case PR_TASK_PERF_EVENTS_ENABLE:
2539		error = perf_event_task_enable();
2540		break;
2541	case PR_GET_TIMERSLACK:
2542		if (current->timer_slack_ns > ULONG_MAX)
2543			error = ULONG_MAX;
2544		else
2545			error = current->timer_slack_ns;
2546		break;
2547	case PR_SET_TIMERSLACK:
2548		if (arg2 <= 0)
2549			current->timer_slack_ns =
2550					current->default_timer_slack_ns;
2551		else
2552			current->timer_slack_ns = arg2;
2553		break;
2554	case PR_MCE_KILL:
2555		if (arg4 | arg5)
2556			return -EINVAL;
2557		switch (arg2) {
2558		case PR_MCE_KILL_CLEAR:
2559			if (arg3 != 0)
2560				return -EINVAL;
2561			current->flags &= ~PF_MCE_PROCESS;
2562			break;
2563		case PR_MCE_KILL_SET:
2564			current->flags |= PF_MCE_PROCESS;
2565			if (arg3 == PR_MCE_KILL_EARLY)
2566				current->flags |= PF_MCE_EARLY;
2567			else if (arg3 == PR_MCE_KILL_LATE)
2568				current->flags &= ~PF_MCE_EARLY;
2569			else if (arg3 == PR_MCE_KILL_DEFAULT)
2570				current->flags &=
2571						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2572			else
2573				return -EINVAL;
2574			break;
2575		default:
2576			return -EINVAL;
2577		}
2578		break;
2579	case PR_MCE_KILL_GET:
2580		if (arg2 | arg3 | arg4 | arg5)
2581			return -EINVAL;
2582		if (current->flags & PF_MCE_PROCESS)
2583			error = (current->flags & PF_MCE_EARLY) ?
2584				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2585		else
2586			error = PR_MCE_KILL_DEFAULT;
2587		break;
2588	case PR_SET_MM:
2589		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2590		break;
2591	case PR_GET_TID_ADDRESS:
2592		error = prctl_get_tid_address(me, (int __user * __user *)arg2);
2593		break;
2594	case PR_SET_CHILD_SUBREAPER:
2595		me->signal->is_child_subreaper = !!arg2;
2596		if (!arg2)
2597			break;
2598
2599		walk_process_tree(me, propagate_has_child_subreaper, NULL);
2600		break;
2601	case PR_GET_CHILD_SUBREAPER:
2602		error = put_user(me->signal->is_child_subreaper,
2603				 (int __user *)arg2);
2604		break;
2605	case PR_SET_NO_NEW_PRIVS:
2606		if (arg2 != 1 || arg3 || arg4 || arg5)
2607			return -EINVAL;
2608
2609		task_set_no_new_privs(current);
2610		break;
2611	case PR_GET_NO_NEW_PRIVS:
2612		if (arg2 || arg3 || arg4 || arg5)
2613			return -EINVAL;
2614		return task_no_new_privs(current) ? 1 : 0;
2615	case PR_GET_THP_DISABLE:
2616		if (arg2 || arg3 || arg4 || arg5)
2617			return -EINVAL;
2618		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2619		break;
2620	case PR_SET_THP_DISABLE:
2621		if (arg3 || arg4 || arg5)
2622			return -EINVAL;
2623		if (mmap_write_lock_killable(me->mm))
2624			return -EINTR;
2625		if (arg2)
2626			set_bit(MMF_DISABLE_THP, &me->mm->flags);
2627		else
2628			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2629		mmap_write_unlock(me->mm);
2630		break;
2631	case PR_MPX_ENABLE_MANAGEMENT:
2632	case PR_MPX_DISABLE_MANAGEMENT:
2633		/* No longer implemented: */
2634		return -EINVAL;
2635	case PR_SET_FP_MODE:
2636		error = SET_FP_MODE(me, arg2);
2637		break;
2638	case PR_GET_FP_MODE:
2639		error = GET_FP_MODE(me);
2640		break;
2641	case PR_SVE_SET_VL:
2642		error = SVE_SET_VL(arg2);
2643		break;
2644	case PR_SVE_GET_VL:
2645		error = SVE_GET_VL();
2646		break;
2647	case PR_SME_SET_VL:
2648		error = SME_SET_VL(arg2);
2649		break;
2650	case PR_SME_GET_VL:
2651		error = SME_GET_VL();
2652		break;
2653	case PR_GET_SPECULATION_CTRL:
2654		if (arg3 || arg4 || arg5)
2655			return -EINVAL;
2656		error = arch_prctl_spec_ctrl_get(me, arg2);
2657		break;
2658	case PR_SET_SPECULATION_CTRL:
2659		if (arg4 || arg5)
2660			return -EINVAL;
2661		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2662		break;
2663	case PR_PAC_RESET_KEYS:
2664		if (arg3 || arg4 || arg5)
2665			return -EINVAL;
2666		error = PAC_RESET_KEYS(me, arg2);
2667		break;
2668	case PR_PAC_SET_ENABLED_KEYS:
2669		if (arg4 || arg5)
2670			return -EINVAL;
2671		error = PAC_SET_ENABLED_KEYS(me, arg2, arg3);
2672		break;
2673	case PR_PAC_GET_ENABLED_KEYS:
2674		if (arg2 || arg3 || arg4 || arg5)
2675			return -EINVAL;
2676		error = PAC_GET_ENABLED_KEYS(me);
2677		break;
2678	case PR_SET_TAGGED_ADDR_CTRL:
2679		if (arg3 || arg4 || arg5)
2680			return -EINVAL;
2681		error = SET_TAGGED_ADDR_CTRL(arg2);
2682		break;
2683	case PR_GET_TAGGED_ADDR_CTRL:
2684		if (arg2 || arg3 || arg4 || arg5)
2685			return -EINVAL;
2686		error = GET_TAGGED_ADDR_CTRL();
2687		break;
2688	case PR_SET_IO_FLUSHER:
2689		if (!capable(CAP_SYS_RESOURCE))
2690			return -EPERM;
2691
2692		if (arg3 || arg4 || arg5)
2693			return -EINVAL;
2694
2695		if (arg2 == 1)
2696			current->flags |= PR_IO_FLUSHER;
2697		else if (!arg2)
2698			current->flags &= ~PR_IO_FLUSHER;
2699		else
2700			return -EINVAL;
2701		break;
2702	case PR_GET_IO_FLUSHER:
2703		if (!capable(CAP_SYS_RESOURCE))
2704			return -EPERM;
2705
2706		if (arg2 || arg3 || arg4 || arg5)
2707			return -EINVAL;
2708
2709		error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2710		break;
2711	case PR_SET_SYSCALL_USER_DISPATCH:
2712		error = set_syscall_user_dispatch(arg2, arg3, arg4,
2713						  (char __user *) arg5);
2714		break;
2715#ifdef CONFIG_SCHED_CORE
2716	case PR_SCHED_CORE:
2717		error = sched_core_share_pid(arg2, arg3, arg4, arg5);
2718		break;
2719#endif
2720	case PR_SET_MDWE:
2721		error = prctl_set_mdwe(arg2, arg3, arg4, arg5);
2722		break;
2723	case PR_GET_MDWE:
2724		error = prctl_get_mdwe(arg2, arg3, arg4, arg5);
2725		break;
2726	case PR_SET_VMA:
2727		error = prctl_set_vma(arg2, arg3, arg4, arg5);
2728		break;
2729	case PR_GET_AUXV:
2730		if (arg4 || arg5)
2731			return -EINVAL;
2732		error = prctl_get_auxv((void __user *)arg2, arg3);
2733		break;
2734#ifdef CONFIG_KSM
2735	case PR_SET_MEMORY_MERGE:
2736		if (arg3 || arg4 || arg5)
2737			return -EINVAL;
2738		if (mmap_write_lock_killable(me->mm))
2739			return -EINTR;
2740
2741		if (arg2)
2742			error = ksm_enable_merge_any(me->mm);
2743		else
2744			error = ksm_disable_merge_any(me->mm);
2745		mmap_write_unlock(me->mm);
2746		break;
2747	case PR_GET_MEMORY_MERGE:
2748		if (arg2 || arg3 || arg4 || arg5)
2749			return -EINVAL;
2750
2751		error = !!test_bit(MMF_VM_MERGE_ANY, &me->mm->flags);
2752		break;
2753#endif
2754	case PR_RISCV_V_SET_CONTROL:
2755		error = RISCV_V_SET_CONTROL(arg2);
2756		break;
2757	case PR_RISCV_V_GET_CONTROL:
2758		error = RISCV_V_GET_CONTROL();
2759		break;
2760	default:
2761		error = -EINVAL;
2762		break;
2763	}
2764	return error;
2765}
2766
2767SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2768		struct getcpu_cache __user *, unused)
2769{
2770	int err = 0;
2771	int cpu = raw_smp_processor_id();
2772
2773	if (cpup)
2774		err |= put_user(cpu, cpup);
2775	if (nodep)
2776		err |= put_user(cpu_to_node(cpu), nodep);
2777	return err ? -EFAULT : 0;
2778}
2779
2780/**
2781 * do_sysinfo - fill in sysinfo struct
2782 * @info: pointer to buffer to fill
2783 */
2784static int do_sysinfo(struct sysinfo *info)
2785{
2786	unsigned long mem_total, sav_total;
2787	unsigned int mem_unit, bitcount;
2788	struct timespec64 tp;
2789
2790	memset(info, 0, sizeof(struct sysinfo));
2791
2792	ktime_get_boottime_ts64(&tp);
2793	timens_add_boottime(&tp);
2794	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2795
2796	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2797
2798	info->procs = nr_threads;
2799
2800	si_meminfo(info);
2801	si_swapinfo(info);
2802
2803	/*
2804	 * If the sum of all the available memory (i.e. ram + swap)
2805	 * is less than can be stored in a 32 bit unsigned long then
2806	 * we can be binary compatible with 2.2.x kernels.  If not,
2807	 * well, in that case 2.2.x was broken anyways...
2808	 *
2809	 *  -Erik Andersen <andersee@debian.org>
2810	 */
2811
2812	mem_total = info->totalram + info->totalswap;
2813	if (mem_total < info->totalram || mem_total < info->totalswap)
2814		goto out;
2815	bitcount = 0;
2816	mem_unit = info->mem_unit;
2817	while (mem_unit > 1) {
2818		bitcount++;
2819		mem_unit >>= 1;
2820		sav_total = mem_total;
2821		mem_total <<= 1;
2822		if (mem_total < sav_total)
2823			goto out;
2824	}
2825
2826	/*
2827	 * If mem_total did not overflow, multiply all memory values by
2828	 * info->mem_unit and set it to 1.  This leaves things compatible
2829	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2830	 * kernels...
2831	 */
2832
2833	info->mem_unit = 1;
2834	info->totalram <<= bitcount;
2835	info->freeram <<= bitcount;
2836	info->sharedram <<= bitcount;
2837	info->bufferram <<= bitcount;
2838	info->totalswap <<= bitcount;
2839	info->freeswap <<= bitcount;
2840	info->totalhigh <<= bitcount;
2841	info->freehigh <<= bitcount;
2842
2843out:
2844	return 0;
2845}
2846
2847SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2848{
2849	struct sysinfo val;
2850
2851	do_sysinfo(&val);
2852
2853	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2854		return -EFAULT;
2855
2856	return 0;
2857}
2858
2859#ifdef CONFIG_COMPAT
2860struct compat_sysinfo {
2861	s32 uptime;
2862	u32 loads[3];
2863	u32 totalram;
2864	u32 freeram;
2865	u32 sharedram;
2866	u32 bufferram;
2867	u32 totalswap;
2868	u32 freeswap;
2869	u16 procs;
2870	u16 pad;
2871	u32 totalhigh;
2872	u32 freehigh;
2873	u32 mem_unit;
2874	char _f[20-2*sizeof(u32)-sizeof(int)];
2875};
2876
2877COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2878{
2879	struct sysinfo s;
2880	struct compat_sysinfo s_32;
2881
2882	do_sysinfo(&s);
2883
2884	/* Check to see if any memory value is too large for 32-bit and scale
2885	 *  down if needed
2886	 */
2887	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2888		int bitcount = 0;
2889
2890		while (s.mem_unit < PAGE_SIZE) {
2891			s.mem_unit <<= 1;
2892			bitcount++;
2893		}
2894
2895		s.totalram >>= bitcount;
2896		s.freeram >>= bitcount;
2897		s.sharedram >>= bitcount;
2898		s.bufferram >>= bitcount;
2899		s.totalswap >>= bitcount;
2900		s.freeswap >>= bitcount;
2901		s.totalhigh >>= bitcount;
2902		s.freehigh >>= bitcount;
2903	}
2904
2905	memset(&s_32, 0, sizeof(s_32));
2906	s_32.uptime = s.uptime;
2907	s_32.loads[0] = s.loads[0];
2908	s_32.loads[1] = s.loads[1];
2909	s_32.loads[2] = s.loads[2];
2910	s_32.totalram = s.totalram;
2911	s_32.freeram = s.freeram;
2912	s_32.sharedram = s.sharedram;
2913	s_32.bufferram = s.bufferram;
2914	s_32.totalswap = s.totalswap;
2915	s_32.freeswap = s.freeswap;
2916	s_32.procs = s.procs;
2917	s_32.totalhigh = s.totalhigh;
2918	s_32.freehigh = s.freehigh;
2919	s_32.mem_unit = s.mem_unit;
2920	if (copy_to_user(info, &s_32, sizeof(s_32)))
2921		return -EFAULT;
 
2922	return 0;
2923}
2924#endif /* CONFIG_COMPAT */