Loading...
1/*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/utsname.h>
10#include <linux/mman.h>
11#include <linux/reboot.h>
12#include <linux/prctl.h>
13#include <linux/highuid.h>
14#include <linux/fs.h>
15#include <linux/perf_event.h>
16#include <linux/resource.h>
17#include <linux/kernel.h>
18#include <linux/kexec.h>
19#include <linux/workqueue.h>
20#include <linux/capability.h>
21#include <linux/device.h>
22#include <linux/key.h>
23#include <linux/times.h>
24#include <linux/posix-timers.h>
25#include <linux/security.h>
26#include <linux/dcookies.h>
27#include <linux/suspend.h>
28#include <linux/tty.h>
29#include <linux/signal.h>
30#include <linux/cn_proc.h>
31#include <linux/getcpu.h>
32#include <linux/task_io_accounting_ops.h>
33#include <linux/seccomp.h>
34#include <linux/cpu.h>
35#include <linux/personality.h>
36#include <linux/ptrace.h>
37#include <linux/fs_struct.h>
38#include <linux/gfp.h>
39#include <linux/syscore_ops.h>
40#include <linux/version.h>
41#include <linux/ctype.h>
42
43#include <linux/compat.h>
44#include <linux/syscalls.h>
45#include <linux/kprobes.h>
46#include <linux/user_namespace.h>
47
48#include <linux/kmsg_dump.h>
49/* Move somewhere else to avoid recompiling? */
50#include <generated/utsrelease.h>
51
52#include <asm/uaccess.h>
53#include <asm/io.h>
54#include <asm/unistd.h>
55
56#ifndef SET_UNALIGN_CTL
57# define SET_UNALIGN_CTL(a,b) (-EINVAL)
58#endif
59#ifndef GET_UNALIGN_CTL
60# define GET_UNALIGN_CTL(a,b) (-EINVAL)
61#endif
62#ifndef SET_FPEMU_CTL
63# define SET_FPEMU_CTL(a,b) (-EINVAL)
64#endif
65#ifndef GET_FPEMU_CTL
66# define GET_FPEMU_CTL(a,b) (-EINVAL)
67#endif
68#ifndef SET_FPEXC_CTL
69# define SET_FPEXC_CTL(a,b) (-EINVAL)
70#endif
71#ifndef GET_FPEXC_CTL
72# define GET_FPEXC_CTL(a,b) (-EINVAL)
73#endif
74#ifndef GET_ENDIAN
75# define GET_ENDIAN(a,b) (-EINVAL)
76#endif
77#ifndef SET_ENDIAN
78# define SET_ENDIAN(a,b) (-EINVAL)
79#endif
80#ifndef GET_TSC_CTL
81# define GET_TSC_CTL(a) (-EINVAL)
82#endif
83#ifndef SET_TSC_CTL
84# define SET_TSC_CTL(a) (-EINVAL)
85#endif
86
87/*
88 * this is where the system-wide overflow UID and GID are defined, for
89 * architectures that now have 32-bit UID/GID but didn't in the past
90 */
91
92int overflowuid = DEFAULT_OVERFLOWUID;
93int overflowgid = DEFAULT_OVERFLOWGID;
94
95#ifdef CONFIG_UID16
96EXPORT_SYMBOL(overflowuid);
97EXPORT_SYMBOL(overflowgid);
98#endif
99
100/*
101 * the same as above, but for filesystems which can only store a 16-bit
102 * UID and GID. as such, this is needed on all architectures
103 */
104
105int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
106int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
107
108EXPORT_SYMBOL(fs_overflowuid);
109EXPORT_SYMBOL(fs_overflowgid);
110
111/*
112 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
113 */
114
115int C_A_D = 1;
116struct pid *cad_pid;
117EXPORT_SYMBOL(cad_pid);
118
119/*
120 * If set, this is used for preparing the system to power off.
121 */
122
123void (*pm_power_off_prepare)(void);
124
125/*
126 * Returns true if current's euid is same as p's uid or euid,
127 * or has CAP_SYS_NICE to p's user_ns.
128 *
129 * Called with rcu_read_lock, creds are safe
130 */
131static bool set_one_prio_perm(struct task_struct *p)
132{
133 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
134
135 if (pcred->user->user_ns == cred->user->user_ns &&
136 (pcred->uid == cred->euid ||
137 pcred->euid == cred->euid))
138 return true;
139 if (ns_capable(pcred->user->user_ns, CAP_SYS_NICE))
140 return true;
141 return false;
142}
143
144/*
145 * set the priority of a task
146 * - the caller must hold the RCU read lock
147 */
148static int set_one_prio(struct task_struct *p, int niceval, int error)
149{
150 int no_nice;
151
152 if (!set_one_prio_perm(p)) {
153 error = -EPERM;
154 goto out;
155 }
156 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
157 error = -EACCES;
158 goto out;
159 }
160 no_nice = security_task_setnice(p, niceval);
161 if (no_nice) {
162 error = no_nice;
163 goto out;
164 }
165 if (error == -ESRCH)
166 error = 0;
167 set_user_nice(p, niceval);
168out:
169 return error;
170}
171
172SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
173{
174 struct task_struct *g, *p;
175 struct user_struct *user;
176 const struct cred *cred = current_cred();
177 int error = -EINVAL;
178 struct pid *pgrp;
179
180 if (which > PRIO_USER || which < PRIO_PROCESS)
181 goto out;
182
183 /* normalize: avoid signed division (rounding problems) */
184 error = -ESRCH;
185 if (niceval < -20)
186 niceval = -20;
187 if (niceval > 19)
188 niceval = 19;
189
190 rcu_read_lock();
191 read_lock(&tasklist_lock);
192 switch (which) {
193 case PRIO_PROCESS:
194 if (who)
195 p = find_task_by_vpid(who);
196 else
197 p = current;
198 if (p)
199 error = set_one_prio(p, niceval, error);
200 break;
201 case PRIO_PGRP:
202 if (who)
203 pgrp = find_vpid(who);
204 else
205 pgrp = task_pgrp(current);
206 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
207 error = set_one_prio(p, niceval, error);
208 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
209 break;
210 case PRIO_USER:
211 user = (struct user_struct *) cred->user;
212 if (!who)
213 who = cred->uid;
214 else if ((who != cred->uid) &&
215 !(user = find_user(who)))
216 goto out_unlock; /* No processes for this user */
217
218 do_each_thread(g, p) {
219 if (__task_cred(p)->uid == who)
220 error = set_one_prio(p, niceval, error);
221 } while_each_thread(g, p);
222 if (who != cred->uid)
223 free_uid(user); /* For find_user() */
224 break;
225 }
226out_unlock:
227 read_unlock(&tasklist_lock);
228 rcu_read_unlock();
229out:
230 return error;
231}
232
233/*
234 * Ugh. To avoid negative return values, "getpriority()" will
235 * not return the normal nice-value, but a negated value that
236 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
237 * to stay compatible.
238 */
239SYSCALL_DEFINE2(getpriority, int, which, int, who)
240{
241 struct task_struct *g, *p;
242 struct user_struct *user;
243 const struct cred *cred = current_cred();
244 long niceval, retval = -ESRCH;
245 struct pid *pgrp;
246
247 if (which > PRIO_USER || which < PRIO_PROCESS)
248 return -EINVAL;
249
250 rcu_read_lock();
251 read_lock(&tasklist_lock);
252 switch (which) {
253 case PRIO_PROCESS:
254 if (who)
255 p = find_task_by_vpid(who);
256 else
257 p = current;
258 if (p) {
259 niceval = 20 - task_nice(p);
260 if (niceval > retval)
261 retval = niceval;
262 }
263 break;
264 case PRIO_PGRP:
265 if (who)
266 pgrp = find_vpid(who);
267 else
268 pgrp = task_pgrp(current);
269 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
270 niceval = 20 - task_nice(p);
271 if (niceval > retval)
272 retval = niceval;
273 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
274 break;
275 case PRIO_USER:
276 user = (struct user_struct *) cred->user;
277 if (!who)
278 who = cred->uid;
279 else if ((who != cred->uid) &&
280 !(user = find_user(who)))
281 goto out_unlock; /* No processes for this user */
282
283 do_each_thread(g, p) {
284 if (__task_cred(p)->uid == who) {
285 niceval = 20 - task_nice(p);
286 if (niceval > retval)
287 retval = niceval;
288 }
289 } while_each_thread(g, p);
290 if (who != cred->uid)
291 free_uid(user); /* for find_user() */
292 break;
293 }
294out_unlock:
295 read_unlock(&tasklist_lock);
296 rcu_read_unlock();
297
298 return retval;
299}
300
301/**
302 * emergency_restart - reboot the system
303 *
304 * Without shutting down any hardware or taking any locks
305 * reboot the system. This is called when we know we are in
306 * trouble so this is our best effort to reboot. This is
307 * safe to call in interrupt context.
308 */
309void emergency_restart(void)
310{
311 kmsg_dump(KMSG_DUMP_EMERG);
312 machine_emergency_restart();
313}
314EXPORT_SYMBOL_GPL(emergency_restart);
315
316void kernel_restart_prepare(char *cmd)
317{
318 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
319 system_state = SYSTEM_RESTART;
320 usermodehelper_disable();
321 device_shutdown();
322 syscore_shutdown();
323}
324
325/**
326 * register_reboot_notifier - Register function to be called at reboot time
327 * @nb: Info about notifier function to be called
328 *
329 * Registers a function with the list of functions
330 * to be called at reboot time.
331 *
332 * Currently always returns zero, as blocking_notifier_chain_register()
333 * always returns zero.
334 */
335int register_reboot_notifier(struct notifier_block *nb)
336{
337 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
338}
339EXPORT_SYMBOL(register_reboot_notifier);
340
341/**
342 * unregister_reboot_notifier - Unregister previously registered reboot notifier
343 * @nb: Hook to be unregistered
344 *
345 * Unregisters a previously registered reboot
346 * notifier function.
347 *
348 * Returns zero on success, or %-ENOENT on failure.
349 */
350int unregister_reboot_notifier(struct notifier_block *nb)
351{
352 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
353}
354EXPORT_SYMBOL(unregister_reboot_notifier);
355
356/**
357 * kernel_restart - reboot the system
358 * @cmd: pointer to buffer containing command to execute for restart
359 * or %NULL
360 *
361 * Shutdown everything and perform a clean reboot.
362 * This is not safe to call in interrupt context.
363 */
364void kernel_restart(char *cmd)
365{
366 kernel_restart_prepare(cmd);
367 if (!cmd)
368 printk(KERN_EMERG "Restarting system.\n");
369 else
370 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
371 kmsg_dump(KMSG_DUMP_RESTART);
372 machine_restart(cmd);
373}
374EXPORT_SYMBOL_GPL(kernel_restart);
375
376static void kernel_shutdown_prepare(enum system_states state)
377{
378 blocking_notifier_call_chain(&reboot_notifier_list,
379 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
380 system_state = state;
381 usermodehelper_disable();
382 device_shutdown();
383}
384/**
385 * kernel_halt - halt the system
386 *
387 * Shutdown everything and perform a clean system halt.
388 */
389void kernel_halt(void)
390{
391 kernel_shutdown_prepare(SYSTEM_HALT);
392 syscore_shutdown();
393 printk(KERN_EMERG "System halted.\n");
394 kmsg_dump(KMSG_DUMP_HALT);
395 machine_halt();
396}
397
398EXPORT_SYMBOL_GPL(kernel_halt);
399
400/**
401 * kernel_power_off - power_off the system
402 *
403 * Shutdown everything and perform a clean system power_off.
404 */
405void kernel_power_off(void)
406{
407 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
408 if (pm_power_off_prepare)
409 pm_power_off_prepare();
410 disable_nonboot_cpus();
411 syscore_shutdown();
412 printk(KERN_EMERG "Power down.\n");
413 kmsg_dump(KMSG_DUMP_POWEROFF);
414 machine_power_off();
415}
416EXPORT_SYMBOL_GPL(kernel_power_off);
417
418static DEFINE_MUTEX(reboot_mutex);
419
420/*
421 * Reboot system call: for obvious reasons only root may call it,
422 * and even root needs to set up some magic numbers in the registers
423 * so that some mistake won't make this reboot the whole machine.
424 * You can also set the meaning of the ctrl-alt-del-key here.
425 *
426 * reboot doesn't sync: do that yourself before calling this.
427 */
428SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
429 void __user *, arg)
430{
431 char buffer[256];
432 int ret = 0;
433
434 /* We only trust the superuser with rebooting the system. */
435 if (!capable(CAP_SYS_BOOT))
436 return -EPERM;
437
438 /* For safety, we require "magic" arguments. */
439 if (magic1 != LINUX_REBOOT_MAGIC1 ||
440 (magic2 != LINUX_REBOOT_MAGIC2 &&
441 magic2 != LINUX_REBOOT_MAGIC2A &&
442 magic2 != LINUX_REBOOT_MAGIC2B &&
443 magic2 != LINUX_REBOOT_MAGIC2C))
444 return -EINVAL;
445
446 /* Instead of trying to make the power_off code look like
447 * halt when pm_power_off is not set do it the easy way.
448 */
449 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
450 cmd = LINUX_REBOOT_CMD_HALT;
451
452 mutex_lock(&reboot_mutex);
453 switch (cmd) {
454 case LINUX_REBOOT_CMD_RESTART:
455 kernel_restart(NULL);
456 break;
457
458 case LINUX_REBOOT_CMD_CAD_ON:
459 C_A_D = 1;
460 break;
461
462 case LINUX_REBOOT_CMD_CAD_OFF:
463 C_A_D = 0;
464 break;
465
466 case LINUX_REBOOT_CMD_HALT:
467 kernel_halt();
468 do_exit(0);
469 panic("cannot halt");
470
471 case LINUX_REBOOT_CMD_POWER_OFF:
472 kernel_power_off();
473 do_exit(0);
474 break;
475
476 case LINUX_REBOOT_CMD_RESTART2:
477 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
478 ret = -EFAULT;
479 break;
480 }
481 buffer[sizeof(buffer) - 1] = '\0';
482
483 kernel_restart(buffer);
484 break;
485
486#ifdef CONFIG_KEXEC
487 case LINUX_REBOOT_CMD_KEXEC:
488 ret = kernel_kexec();
489 break;
490#endif
491
492#ifdef CONFIG_HIBERNATION
493 case LINUX_REBOOT_CMD_SW_SUSPEND:
494 ret = hibernate();
495 break;
496#endif
497
498 default:
499 ret = -EINVAL;
500 break;
501 }
502 mutex_unlock(&reboot_mutex);
503 return ret;
504}
505
506static void deferred_cad(struct work_struct *dummy)
507{
508 kernel_restart(NULL);
509}
510
511/*
512 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
513 * As it's called within an interrupt, it may NOT sync: the only choice
514 * is whether to reboot at once, or just ignore the ctrl-alt-del.
515 */
516void ctrl_alt_del(void)
517{
518 static DECLARE_WORK(cad_work, deferred_cad);
519
520 if (C_A_D)
521 schedule_work(&cad_work);
522 else
523 kill_cad_pid(SIGINT, 1);
524}
525
526/*
527 * Unprivileged users may change the real gid to the effective gid
528 * or vice versa. (BSD-style)
529 *
530 * If you set the real gid at all, or set the effective gid to a value not
531 * equal to the real gid, then the saved gid is set to the new effective gid.
532 *
533 * This makes it possible for a setgid program to completely drop its
534 * privileges, which is often a useful assertion to make when you are doing
535 * a security audit over a program.
536 *
537 * The general idea is that a program which uses just setregid() will be
538 * 100% compatible with BSD. A program which uses just setgid() will be
539 * 100% compatible with POSIX with saved IDs.
540 *
541 * SMP: There are not races, the GIDs are checked only by filesystem
542 * operations (as far as semantic preservation is concerned).
543 */
544SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
545{
546 const struct cred *old;
547 struct cred *new;
548 int retval;
549
550 new = prepare_creds();
551 if (!new)
552 return -ENOMEM;
553 old = current_cred();
554
555 retval = -EPERM;
556 if (rgid != (gid_t) -1) {
557 if (old->gid == rgid ||
558 old->egid == rgid ||
559 nsown_capable(CAP_SETGID))
560 new->gid = rgid;
561 else
562 goto error;
563 }
564 if (egid != (gid_t) -1) {
565 if (old->gid == egid ||
566 old->egid == egid ||
567 old->sgid == egid ||
568 nsown_capable(CAP_SETGID))
569 new->egid = egid;
570 else
571 goto error;
572 }
573
574 if (rgid != (gid_t) -1 ||
575 (egid != (gid_t) -1 && egid != old->gid))
576 new->sgid = new->egid;
577 new->fsgid = new->egid;
578
579 return commit_creds(new);
580
581error:
582 abort_creds(new);
583 return retval;
584}
585
586/*
587 * setgid() is implemented like SysV w/ SAVED_IDS
588 *
589 * SMP: Same implicit races as above.
590 */
591SYSCALL_DEFINE1(setgid, gid_t, gid)
592{
593 const struct cred *old;
594 struct cred *new;
595 int retval;
596
597 new = prepare_creds();
598 if (!new)
599 return -ENOMEM;
600 old = current_cred();
601
602 retval = -EPERM;
603 if (nsown_capable(CAP_SETGID))
604 new->gid = new->egid = new->sgid = new->fsgid = gid;
605 else if (gid == old->gid || gid == old->sgid)
606 new->egid = new->fsgid = gid;
607 else
608 goto error;
609
610 return commit_creds(new);
611
612error:
613 abort_creds(new);
614 return retval;
615}
616
617/*
618 * change the user struct in a credentials set to match the new UID
619 */
620static int set_user(struct cred *new)
621{
622 struct user_struct *new_user;
623
624 new_user = alloc_uid(current_user_ns(), new->uid);
625 if (!new_user)
626 return -EAGAIN;
627
628 /*
629 * We don't fail in case of NPROC limit excess here because too many
630 * poorly written programs don't check set*uid() return code, assuming
631 * it never fails if called by root. We may still enforce NPROC limit
632 * for programs doing set*uid()+execve() by harmlessly deferring the
633 * failure to the execve() stage.
634 */
635 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
636 new_user != INIT_USER)
637 current->flags |= PF_NPROC_EXCEEDED;
638 else
639 current->flags &= ~PF_NPROC_EXCEEDED;
640
641 free_uid(new->user);
642 new->user = new_user;
643 return 0;
644}
645
646/*
647 * Unprivileged users may change the real uid to the effective uid
648 * or vice versa. (BSD-style)
649 *
650 * If you set the real uid at all, or set the effective uid to a value not
651 * equal to the real uid, then the saved uid is set to the new effective uid.
652 *
653 * This makes it possible for a setuid program to completely drop its
654 * privileges, which is often a useful assertion to make when you are doing
655 * a security audit over a program.
656 *
657 * The general idea is that a program which uses just setreuid() will be
658 * 100% compatible with BSD. A program which uses just setuid() will be
659 * 100% compatible with POSIX with saved IDs.
660 */
661SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
662{
663 const struct cred *old;
664 struct cred *new;
665 int retval;
666
667 new = prepare_creds();
668 if (!new)
669 return -ENOMEM;
670 old = current_cred();
671
672 retval = -EPERM;
673 if (ruid != (uid_t) -1) {
674 new->uid = ruid;
675 if (old->uid != ruid &&
676 old->euid != ruid &&
677 !nsown_capable(CAP_SETUID))
678 goto error;
679 }
680
681 if (euid != (uid_t) -1) {
682 new->euid = euid;
683 if (old->uid != euid &&
684 old->euid != euid &&
685 old->suid != euid &&
686 !nsown_capable(CAP_SETUID))
687 goto error;
688 }
689
690 if (new->uid != old->uid) {
691 retval = set_user(new);
692 if (retval < 0)
693 goto error;
694 }
695 if (ruid != (uid_t) -1 ||
696 (euid != (uid_t) -1 && euid != old->uid))
697 new->suid = new->euid;
698 new->fsuid = new->euid;
699
700 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
701 if (retval < 0)
702 goto error;
703
704 return commit_creds(new);
705
706error:
707 abort_creds(new);
708 return retval;
709}
710
711/*
712 * setuid() is implemented like SysV with SAVED_IDS
713 *
714 * Note that SAVED_ID's is deficient in that a setuid root program
715 * like sendmail, for example, cannot set its uid to be a normal
716 * user and then switch back, because if you're root, setuid() sets
717 * the saved uid too. If you don't like this, blame the bright people
718 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
719 * will allow a root program to temporarily drop privileges and be able to
720 * regain them by swapping the real and effective uid.
721 */
722SYSCALL_DEFINE1(setuid, uid_t, uid)
723{
724 const struct cred *old;
725 struct cred *new;
726 int retval;
727
728 new = prepare_creds();
729 if (!new)
730 return -ENOMEM;
731 old = current_cred();
732
733 retval = -EPERM;
734 if (nsown_capable(CAP_SETUID)) {
735 new->suid = new->uid = uid;
736 if (uid != old->uid) {
737 retval = set_user(new);
738 if (retval < 0)
739 goto error;
740 }
741 } else if (uid != old->uid && uid != new->suid) {
742 goto error;
743 }
744
745 new->fsuid = new->euid = uid;
746
747 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
748 if (retval < 0)
749 goto error;
750
751 return commit_creds(new);
752
753error:
754 abort_creds(new);
755 return retval;
756}
757
758
759/*
760 * This function implements a generic ability to update ruid, euid,
761 * and suid. This allows you to implement the 4.4 compatible seteuid().
762 */
763SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
764{
765 const struct cred *old;
766 struct cred *new;
767 int retval;
768
769 new = prepare_creds();
770 if (!new)
771 return -ENOMEM;
772
773 old = current_cred();
774
775 retval = -EPERM;
776 if (!nsown_capable(CAP_SETUID)) {
777 if (ruid != (uid_t) -1 && ruid != old->uid &&
778 ruid != old->euid && ruid != old->suid)
779 goto error;
780 if (euid != (uid_t) -1 && euid != old->uid &&
781 euid != old->euid && euid != old->suid)
782 goto error;
783 if (suid != (uid_t) -1 && suid != old->uid &&
784 suid != old->euid && suid != old->suid)
785 goto error;
786 }
787
788 if (ruid != (uid_t) -1) {
789 new->uid = ruid;
790 if (ruid != old->uid) {
791 retval = set_user(new);
792 if (retval < 0)
793 goto error;
794 }
795 }
796 if (euid != (uid_t) -1)
797 new->euid = euid;
798 if (suid != (uid_t) -1)
799 new->suid = suid;
800 new->fsuid = new->euid;
801
802 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
803 if (retval < 0)
804 goto error;
805
806 return commit_creds(new);
807
808error:
809 abort_creds(new);
810 return retval;
811}
812
813SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
814{
815 const struct cred *cred = current_cred();
816 int retval;
817
818 if (!(retval = put_user(cred->uid, ruid)) &&
819 !(retval = put_user(cred->euid, euid)))
820 retval = put_user(cred->suid, suid);
821
822 return retval;
823}
824
825/*
826 * Same as above, but for rgid, egid, sgid.
827 */
828SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
829{
830 const struct cred *old;
831 struct cred *new;
832 int retval;
833
834 new = prepare_creds();
835 if (!new)
836 return -ENOMEM;
837 old = current_cred();
838
839 retval = -EPERM;
840 if (!nsown_capable(CAP_SETGID)) {
841 if (rgid != (gid_t) -1 && rgid != old->gid &&
842 rgid != old->egid && rgid != old->sgid)
843 goto error;
844 if (egid != (gid_t) -1 && egid != old->gid &&
845 egid != old->egid && egid != old->sgid)
846 goto error;
847 if (sgid != (gid_t) -1 && sgid != old->gid &&
848 sgid != old->egid && sgid != old->sgid)
849 goto error;
850 }
851
852 if (rgid != (gid_t) -1)
853 new->gid = rgid;
854 if (egid != (gid_t) -1)
855 new->egid = egid;
856 if (sgid != (gid_t) -1)
857 new->sgid = sgid;
858 new->fsgid = new->egid;
859
860 return commit_creds(new);
861
862error:
863 abort_creds(new);
864 return retval;
865}
866
867SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
868{
869 const struct cred *cred = current_cred();
870 int retval;
871
872 if (!(retval = put_user(cred->gid, rgid)) &&
873 !(retval = put_user(cred->egid, egid)))
874 retval = put_user(cred->sgid, sgid);
875
876 return retval;
877}
878
879
880/*
881 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
882 * is used for "access()" and for the NFS daemon (letting nfsd stay at
883 * whatever uid it wants to). It normally shadows "euid", except when
884 * explicitly set by setfsuid() or for access..
885 */
886SYSCALL_DEFINE1(setfsuid, uid_t, uid)
887{
888 const struct cred *old;
889 struct cred *new;
890 uid_t old_fsuid;
891
892 new = prepare_creds();
893 if (!new)
894 return current_fsuid();
895 old = current_cred();
896 old_fsuid = old->fsuid;
897
898 if (uid == old->uid || uid == old->euid ||
899 uid == old->suid || uid == old->fsuid ||
900 nsown_capable(CAP_SETUID)) {
901 if (uid != old_fsuid) {
902 new->fsuid = uid;
903 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
904 goto change_okay;
905 }
906 }
907
908 abort_creds(new);
909 return old_fsuid;
910
911change_okay:
912 commit_creds(new);
913 return old_fsuid;
914}
915
916/*
917 * Samma på svenska..
918 */
919SYSCALL_DEFINE1(setfsgid, gid_t, gid)
920{
921 const struct cred *old;
922 struct cred *new;
923 gid_t old_fsgid;
924
925 new = prepare_creds();
926 if (!new)
927 return current_fsgid();
928 old = current_cred();
929 old_fsgid = old->fsgid;
930
931 if (gid == old->gid || gid == old->egid ||
932 gid == old->sgid || gid == old->fsgid ||
933 nsown_capable(CAP_SETGID)) {
934 if (gid != old_fsgid) {
935 new->fsgid = gid;
936 goto change_okay;
937 }
938 }
939
940 abort_creds(new);
941 return old_fsgid;
942
943change_okay:
944 commit_creds(new);
945 return old_fsgid;
946}
947
948void do_sys_times(struct tms *tms)
949{
950 cputime_t tgutime, tgstime, cutime, cstime;
951
952 spin_lock_irq(¤t->sighand->siglock);
953 thread_group_times(current, &tgutime, &tgstime);
954 cutime = current->signal->cutime;
955 cstime = current->signal->cstime;
956 spin_unlock_irq(¤t->sighand->siglock);
957 tms->tms_utime = cputime_to_clock_t(tgutime);
958 tms->tms_stime = cputime_to_clock_t(tgstime);
959 tms->tms_cutime = cputime_to_clock_t(cutime);
960 tms->tms_cstime = cputime_to_clock_t(cstime);
961}
962
963SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
964{
965 if (tbuf) {
966 struct tms tmp;
967
968 do_sys_times(&tmp);
969 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
970 return -EFAULT;
971 }
972 force_successful_syscall_return();
973 return (long) jiffies_64_to_clock_t(get_jiffies_64());
974}
975
976/*
977 * This needs some heavy checking ...
978 * I just haven't the stomach for it. I also don't fully
979 * understand sessions/pgrp etc. Let somebody who does explain it.
980 *
981 * OK, I think I have the protection semantics right.... this is really
982 * only important on a multi-user system anyway, to make sure one user
983 * can't send a signal to a process owned by another. -TYT, 12/12/91
984 *
985 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
986 * LBT 04.03.94
987 */
988SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
989{
990 struct task_struct *p;
991 struct task_struct *group_leader = current->group_leader;
992 struct pid *pgrp;
993 int err;
994
995 if (!pid)
996 pid = task_pid_vnr(group_leader);
997 if (!pgid)
998 pgid = pid;
999 if (pgid < 0)
1000 return -EINVAL;
1001 rcu_read_lock();
1002
1003 /* From this point forward we keep holding onto the tasklist lock
1004 * so that our parent does not change from under us. -DaveM
1005 */
1006 write_lock_irq(&tasklist_lock);
1007
1008 err = -ESRCH;
1009 p = find_task_by_vpid(pid);
1010 if (!p)
1011 goto out;
1012
1013 err = -EINVAL;
1014 if (!thread_group_leader(p))
1015 goto out;
1016
1017 if (same_thread_group(p->real_parent, group_leader)) {
1018 err = -EPERM;
1019 if (task_session(p) != task_session(group_leader))
1020 goto out;
1021 err = -EACCES;
1022 if (p->did_exec)
1023 goto out;
1024 } else {
1025 err = -ESRCH;
1026 if (p != group_leader)
1027 goto out;
1028 }
1029
1030 err = -EPERM;
1031 if (p->signal->leader)
1032 goto out;
1033
1034 pgrp = task_pid(p);
1035 if (pgid != pid) {
1036 struct task_struct *g;
1037
1038 pgrp = find_vpid(pgid);
1039 g = pid_task(pgrp, PIDTYPE_PGID);
1040 if (!g || task_session(g) != task_session(group_leader))
1041 goto out;
1042 }
1043
1044 err = security_task_setpgid(p, pgid);
1045 if (err)
1046 goto out;
1047
1048 if (task_pgrp(p) != pgrp)
1049 change_pid(p, PIDTYPE_PGID, pgrp);
1050
1051 err = 0;
1052out:
1053 /* All paths lead to here, thus we are safe. -DaveM */
1054 write_unlock_irq(&tasklist_lock);
1055 rcu_read_unlock();
1056 return err;
1057}
1058
1059SYSCALL_DEFINE1(getpgid, pid_t, pid)
1060{
1061 struct task_struct *p;
1062 struct pid *grp;
1063 int retval;
1064
1065 rcu_read_lock();
1066 if (!pid)
1067 grp = task_pgrp(current);
1068 else {
1069 retval = -ESRCH;
1070 p = find_task_by_vpid(pid);
1071 if (!p)
1072 goto out;
1073 grp = task_pgrp(p);
1074 if (!grp)
1075 goto out;
1076
1077 retval = security_task_getpgid(p);
1078 if (retval)
1079 goto out;
1080 }
1081 retval = pid_vnr(grp);
1082out:
1083 rcu_read_unlock();
1084 return retval;
1085}
1086
1087#ifdef __ARCH_WANT_SYS_GETPGRP
1088
1089SYSCALL_DEFINE0(getpgrp)
1090{
1091 return sys_getpgid(0);
1092}
1093
1094#endif
1095
1096SYSCALL_DEFINE1(getsid, pid_t, pid)
1097{
1098 struct task_struct *p;
1099 struct pid *sid;
1100 int retval;
1101
1102 rcu_read_lock();
1103 if (!pid)
1104 sid = task_session(current);
1105 else {
1106 retval = -ESRCH;
1107 p = find_task_by_vpid(pid);
1108 if (!p)
1109 goto out;
1110 sid = task_session(p);
1111 if (!sid)
1112 goto out;
1113
1114 retval = security_task_getsid(p);
1115 if (retval)
1116 goto out;
1117 }
1118 retval = pid_vnr(sid);
1119out:
1120 rcu_read_unlock();
1121 return retval;
1122}
1123
1124SYSCALL_DEFINE0(setsid)
1125{
1126 struct task_struct *group_leader = current->group_leader;
1127 struct pid *sid = task_pid(group_leader);
1128 pid_t session = pid_vnr(sid);
1129 int err = -EPERM;
1130
1131 write_lock_irq(&tasklist_lock);
1132 /* Fail if I am already a session leader */
1133 if (group_leader->signal->leader)
1134 goto out;
1135
1136 /* Fail if a process group id already exists that equals the
1137 * proposed session id.
1138 */
1139 if (pid_task(sid, PIDTYPE_PGID))
1140 goto out;
1141
1142 group_leader->signal->leader = 1;
1143 __set_special_pids(sid);
1144
1145 proc_clear_tty(group_leader);
1146
1147 err = session;
1148out:
1149 write_unlock_irq(&tasklist_lock);
1150 if (err > 0) {
1151 proc_sid_connector(group_leader);
1152 sched_autogroup_create_attach(group_leader);
1153 }
1154 return err;
1155}
1156
1157DECLARE_RWSEM(uts_sem);
1158
1159#ifdef COMPAT_UTS_MACHINE
1160#define override_architecture(name) \
1161 (personality(current->personality) == PER_LINUX32 && \
1162 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1163 sizeof(COMPAT_UTS_MACHINE)))
1164#else
1165#define override_architecture(name) 0
1166#endif
1167
1168/*
1169 * Work around broken programs that cannot handle "Linux 3.0".
1170 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1171 */
1172static int override_release(char __user *release, int len)
1173{
1174 int ret = 0;
1175 char buf[65];
1176
1177 if (current->personality & UNAME26) {
1178 char *rest = UTS_RELEASE;
1179 int ndots = 0;
1180 unsigned v;
1181
1182 while (*rest) {
1183 if (*rest == '.' && ++ndots >= 3)
1184 break;
1185 if (!isdigit(*rest) && *rest != '.')
1186 break;
1187 rest++;
1188 }
1189 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1190 snprintf(buf, len, "2.6.%u%s", v, rest);
1191 ret = copy_to_user(release, buf, len);
1192 }
1193 return ret;
1194}
1195
1196SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1197{
1198 int errno = 0;
1199
1200 down_read(&uts_sem);
1201 if (copy_to_user(name, utsname(), sizeof *name))
1202 errno = -EFAULT;
1203 up_read(&uts_sem);
1204
1205 if (!errno && override_release(name->release, sizeof(name->release)))
1206 errno = -EFAULT;
1207 if (!errno && override_architecture(name))
1208 errno = -EFAULT;
1209 return errno;
1210}
1211
1212#ifdef __ARCH_WANT_SYS_OLD_UNAME
1213/*
1214 * Old cruft
1215 */
1216SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1217{
1218 int error = 0;
1219
1220 if (!name)
1221 return -EFAULT;
1222
1223 down_read(&uts_sem);
1224 if (copy_to_user(name, utsname(), sizeof(*name)))
1225 error = -EFAULT;
1226 up_read(&uts_sem);
1227
1228 if (!error && override_release(name->release, sizeof(name->release)))
1229 error = -EFAULT;
1230 if (!error && override_architecture(name))
1231 error = -EFAULT;
1232 return error;
1233}
1234
1235SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1236{
1237 int error;
1238
1239 if (!name)
1240 return -EFAULT;
1241 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1242 return -EFAULT;
1243
1244 down_read(&uts_sem);
1245 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1246 __OLD_UTS_LEN);
1247 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1248 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1249 __OLD_UTS_LEN);
1250 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1251 error |= __copy_to_user(&name->release, &utsname()->release,
1252 __OLD_UTS_LEN);
1253 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1254 error |= __copy_to_user(&name->version, &utsname()->version,
1255 __OLD_UTS_LEN);
1256 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1257 error |= __copy_to_user(&name->machine, &utsname()->machine,
1258 __OLD_UTS_LEN);
1259 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1260 up_read(&uts_sem);
1261
1262 if (!error && override_architecture(name))
1263 error = -EFAULT;
1264 if (!error && override_release(name->release, sizeof(name->release)))
1265 error = -EFAULT;
1266 return error ? -EFAULT : 0;
1267}
1268#endif
1269
1270SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1271{
1272 int errno;
1273 char tmp[__NEW_UTS_LEN];
1274
1275 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1276 return -EPERM;
1277
1278 if (len < 0 || len > __NEW_UTS_LEN)
1279 return -EINVAL;
1280 down_write(&uts_sem);
1281 errno = -EFAULT;
1282 if (!copy_from_user(tmp, name, len)) {
1283 struct new_utsname *u = utsname();
1284
1285 memcpy(u->nodename, tmp, len);
1286 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1287 errno = 0;
1288 }
1289 up_write(&uts_sem);
1290 return errno;
1291}
1292
1293#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1294
1295SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1296{
1297 int i, errno;
1298 struct new_utsname *u;
1299
1300 if (len < 0)
1301 return -EINVAL;
1302 down_read(&uts_sem);
1303 u = utsname();
1304 i = 1 + strlen(u->nodename);
1305 if (i > len)
1306 i = len;
1307 errno = 0;
1308 if (copy_to_user(name, u->nodename, i))
1309 errno = -EFAULT;
1310 up_read(&uts_sem);
1311 return errno;
1312}
1313
1314#endif
1315
1316/*
1317 * Only setdomainname; getdomainname can be implemented by calling
1318 * uname()
1319 */
1320SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1321{
1322 int errno;
1323 char tmp[__NEW_UTS_LEN];
1324
1325 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1326 return -EPERM;
1327 if (len < 0 || len > __NEW_UTS_LEN)
1328 return -EINVAL;
1329
1330 down_write(&uts_sem);
1331 errno = -EFAULT;
1332 if (!copy_from_user(tmp, name, len)) {
1333 struct new_utsname *u = utsname();
1334
1335 memcpy(u->domainname, tmp, len);
1336 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1337 errno = 0;
1338 }
1339 up_write(&uts_sem);
1340 return errno;
1341}
1342
1343SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1344{
1345 struct rlimit value;
1346 int ret;
1347
1348 ret = do_prlimit(current, resource, NULL, &value);
1349 if (!ret)
1350 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1351
1352 return ret;
1353}
1354
1355#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1356
1357/*
1358 * Back compatibility for getrlimit. Needed for some apps.
1359 */
1360
1361SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1362 struct rlimit __user *, rlim)
1363{
1364 struct rlimit x;
1365 if (resource >= RLIM_NLIMITS)
1366 return -EINVAL;
1367
1368 task_lock(current->group_leader);
1369 x = current->signal->rlim[resource];
1370 task_unlock(current->group_leader);
1371 if (x.rlim_cur > 0x7FFFFFFF)
1372 x.rlim_cur = 0x7FFFFFFF;
1373 if (x.rlim_max > 0x7FFFFFFF)
1374 x.rlim_max = 0x7FFFFFFF;
1375 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1376}
1377
1378#endif
1379
1380static inline bool rlim64_is_infinity(__u64 rlim64)
1381{
1382#if BITS_PER_LONG < 64
1383 return rlim64 >= ULONG_MAX;
1384#else
1385 return rlim64 == RLIM64_INFINITY;
1386#endif
1387}
1388
1389static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1390{
1391 if (rlim->rlim_cur == RLIM_INFINITY)
1392 rlim64->rlim_cur = RLIM64_INFINITY;
1393 else
1394 rlim64->rlim_cur = rlim->rlim_cur;
1395 if (rlim->rlim_max == RLIM_INFINITY)
1396 rlim64->rlim_max = RLIM64_INFINITY;
1397 else
1398 rlim64->rlim_max = rlim->rlim_max;
1399}
1400
1401static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1402{
1403 if (rlim64_is_infinity(rlim64->rlim_cur))
1404 rlim->rlim_cur = RLIM_INFINITY;
1405 else
1406 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1407 if (rlim64_is_infinity(rlim64->rlim_max))
1408 rlim->rlim_max = RLIM_INFINITY;
1409 else
1410 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1411}
1412
1413/* make sure you are allowed to change @tsk limits before calling this */
1414int do_prlimit(struct task_struct *tsk, unsigned int resource,
1415 struct rlimit *new_rlim, struct rlimit *old_rlim)
1416{
1417 struct rlimit *rlim;
1418 int retval = 0;
1419
1420 if (resource >= RLIM_NLIMITS)
1421 return -EINVAL;
1422 if (new_rlim) {
1423 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1424 return -EINVAL;
1425 if (resource == RLIMIT_NOFILE &&
1426 new_rlim->rlim_max > sysctl_nr_open)
1427 return -EPERM;
1428 }
1429
1430 /* protect tsk->signal and tsk->sighand from disappearing */
1431 read_lock(&tasklist_lock);
1432 if (!tsk->sighand) {
1433 retval = -ESRCH;
1434 goto out;
1435 }
1436
1437 rlim = tsk->signal->rlim + resource;
1438 task_lock(tsk->group_leader);
1439 if (new_rlim) {
1440 /* Keep the capable check against init_user_ns until
1441 cgroups can contain all limits */
1442 if (new_rlim->rlim_max > rlim->rlim_max &&
1443 !capable(CAP_SYS_RESOURCE))
1444 retval = -EPERM;
1445 if (!retval)
1446 retval = security_task_setrlimit(tsk->group_leader,
1447 resource, new_rlim);
1448 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1449 /*
1450 * The caller is asking for an immediate RLIMIT_CPU
1451 * expiry. But we use the zero value to mean "it was
1452 * never set". So let's cheat and make it one second
1453 * instead
1454 */
1455 new_rlim->rlim_cur = 1;
1456 }
1457 }
1458 if (!retval) {
1459 if (old_rlim)
1460 *old_rlim = *rlim;
1461 if (new_rlim)
1462 *rlim = *new_rlim;
1463 }
1464 task_unlock(tsk->group_leader);
1465
1466 /*
1467 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1468 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1469 * very long-standing error, and fixing it now risks breakage of
1470 * applications, so we live with it
1471 */
1472 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1473 new_rlim->rlim_cur != RLIM_INFINITY)
1474 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1475out:
1476 read_unlock(&tasklist_lock);
1477 return retval;
1478}
1479
1480/* rcu lock must be held */
1481static int check_prlimit_permission(struct task_struct *task)
1482{
1483 const struct cred *cred = current_cred(), *tcred;
1484
1485 if (current == task)
1486 return 0;
1487
1488 tcred = __task_cred(task);
1489 if (cred->user->user_ns == tcred->user->user_ns &&
1490 (cred->uid == tcred->euid &&
1491 cred->uid == tcred->suid &&
1492 cred->uid == tcred->uid &&
1493 cred->gid == tcred->egid &&
1494 cred->gid == tcred->sgid &&
1495 cred->gid == tcred->gid))
1496 return 0;
1497 if (ns_capable(tcred->user->user_ns, CAP_SYS_RESOURCE))
1498 return 0;
1499
1500 return -EPERM;
1501}
1502
1503SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1504 const struct rlimit64 __user *, new_rlim,
1505 struct rlimit64 __user *, old_rlim)
1506{
1507 struct rlimit64 old64, new64;
1508 struct rlimit old, new;
1509 struct task_struct *tsk;
1510 int ret;
1511
1512 if (new_rlim) {
1513 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1514 return -EFAULT;
1515 rlim64_to_rlim(&new64, &new);
1516 }
1517
1518 rcu_read_lock();
1519 tsk = pid ? find_task_by_vpid(pid) : current;
1520 if (!tsk) {
1521 rcu_read_unlock();
1522 return -ESRCH;
1523 }
1524 ret = check_prlimit_permission(tsk);
1525 if (ret) {
1526 rcu_read_unlock();
1527 return ret;
1528 }
1529 get_task_struct(tsk);
1530 rcu_read_unlock();
1531
1532 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1533 old_rlim ? &old : NULL);
1534
1535 if (!ret && old_rlim) {
1536 rlim_to_rlim64(&old, &old64);
1537 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1538 ret = -EFAULT;
1539 }
1540
1541 put_task_struct(tsk);
1542 return ret;
1543}
1544
1545SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1546{
1547 struct rlimit new_rlim;
1548
1549 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1550 return -EFAULT;
1551 return do_prlimit(current, resource, &new_rlim, NULL);
1552}
1553
1554/*
1555 * It would make sense to put struct rusage in the task_struct,
1556 * except that would make the task_struct be *really big*. After
1557 * task_struct gets moved into malloc'ed memory, it would
1558 * make sense to do this. It will make moving the rest of the information
1559 * a lot simpler! (Which we're not doing right now because we're not
1560 * measuring them yet).
1561 *
1562 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1563 * races with threads incrementing their own counters. But since word
1564 * reads are atomic, we either get new values or old values and we don't
1565 * care which for the sums. We always take the siglock to protect reading
1566 * the c* fields from p->signal from races with exit.c updating those
1567 * fields when reaping, so a sample either gets all the additions of a
1568 * given child after it's reaped, or none so this sample is before reaping.
1569 *
1570 * Locking:
1571 * We need to take the siglock for CHILDEREN, SELF and BOTH
1572 * for the cases current multithreaded, non-current single threaded
1573 * non-current multithreaded. Thread traversal is now safe with
1574 * the siglock held.
1575 * Strictly speaking, we donot need to take the siglock if we are current and
1576 * single threaded, as no one else can take our signal_struct away, no one
1577 * else can reap the children to update signal->c* counters, and no one else
1578 * can race with the signal-> fields. If we do not take any lock, the
1579 * signal-> fields could be read out of order while another thread was just
1580 * exiting. So we should place a read memory barrier when we avoid the lock.
1581 * On the writer side, write memory barrier is implied in __exit_signal
1582 * as __exit_signal releases the siglock spinlock after updating the signal->
1583 * fields. But we don't do this yet to keep things simple.
1584 *
1585 */
1586
1587static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1588{
1589 r->ru_nvcsw += t->nvcsw;
1590 r->ru_nivcsw += t->nivcsw;
1591 r->ru_minflt += t->min_flt;
1592 r->ru_majflt += t->maj_flt;
1593 r->ru_inblock += task_io_get_inblock(t);
1594 r->ru_oublock += task_io_get_oublock(t);
1595}
1596
1597static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1598{
1599 struct task_struct *t;
1600 unsigned long flags;
1601 cputime_t tgutime, tgstime, utime, stime;
1602 unsigned long maxrss = 0;
1603
1604 memset((char *) r, 0, sizeof *r);
1605 utime = stime = cputime_zero;
1606
1607 if (who == RUSAGE_THREAD) {
1608 task_times(current, &utime, &stime);
1609 accumulate_thread_rusage(p, r);
1610 maxrss = p->signal->maxrss;
1611 goto out;
1612 }
1613
1614 if (!lock_task_sighand(p, &flags))
1615 return;
1616
1617 switch (who) {
1618 case RUSAGE_BOTH:
1619 case RUSAGE_CHILDREN:
1620 utime = p->signal->cutime;
1621 stime = p->signal->cstime;
1622 r->ru_nvcsw = p->signal->cnvcsw;
1623 r->ru_nivcsw = p->signal->cnivcsw;
1624 r->ru_minflt = p->signal->cmin_flt;
1625 r->ru_majflt = p->signal->cmaj_flt;
1626 r->ru_inblock = p->signal->cinblock;
1627 r->ru_oublock = p->signal->coublock;
1628 maxrss = p->signal->cmaxrss;
1629
1630 if (who == RUSAGE_CHILDREN)
1631 break;
1632
1633 case RUSAGE_SELF:
1634 thread_group_times(p, &tgutime, &tgstime);
1635 utime = cputime_add(utime, tgutime);
1636 stime = cputime_add(stime, tgstime);
1637 r->ru_nvcsw += p->signal->nvcsw;
1638 r->ru_nivcsw += p->signal->nivcsw;
1639 r->ru_minflt += p->signal->min_flt;
1640 r->ru_majflt += p->signal->maj_flt;
1641 r->ru_inblock += p->signal->inblock;
1642 r->ru_oublock += p->signal->oublock;
1643 if (maxrss < p->signal->maxrss)
1644 maxrss = p->signal->maxrss;
1645 t = p;
1646 do {
1647 accumulate_thread_rusage(t, r);
1648 t = next_thread(t);
1649 } while (t != p);
1650 break;
1651
1652 default:
1653 BUG();
1654 }
1655 unlock_task_sighand(p, &flags);
1656
1657out:
1658 cputime_to_timeval(utime, &r->ru_utime);
1659 cputime_to_timeval(stime, &r->ru_stime);
1660
1661 if (who != RUSAGE_CHILDREN) {
1662 struct mm_struct *mm = get_task_mm(p);
1663 if (mm) {
1664 setmax_mm_hiwater_rss(&maxrss, mm);
1665 mmput(mm);
1666 }
1667 }
1668 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1669}
1670
1671int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1672{
1673 struct rusage r;
1674 k_getrusage(p, who, &r);
1675 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1676}
1677
1678SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1679{
1680 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1681 who != RUSAGE_THREAD)
1682 return -EINVAL;
1683 return getrusage(current, who, ru);
1684}
1685
1686SYSCALL_DEFINE1(umask, int, mask)
1687{
1688 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1689 return mask;
1690}
1691
1692SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1693 unsigned long, arg4, unsigned long, arg5)
1694{
1695 struct task_struct *me = current;
1696 unsigned char comm[sizeof(me->comm)];
1697 long error;
1698
1699 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1700 if (error != -ENOSYS)
1701 return error;
1702
1703 error = 0;
1704 switch (option) {
1705 case PR_SET_PDEATHSIG:
1706 if (!valid_signal(arg2)) {
1707 error = -EINVAL;
1708 break;
1709 }
1710 me->pdeath_signal = arg2;
1711 error = 0;
1712 break;
1713 case PR_GET_PDEATHSIG:
1714 error = put_user(me->pdeath_signal, (int __user *)arg2);
1715 break;
1716 case PR_GET_DUMPABLE:
1717 error = get_dumpable(me->mm);
1718 break;
1719 case PR_SET_DUMPABLE:
1720 if (arg2 < 0 || arg2 > 1) {
1721 error = -EINVAL;
1722 break;
1723 }
1724 set_dumpable(me->mm, arg2);
1725 error = 0;
1726 break;
1727
1728 case PR_SET_UNALIGN:
1729 error = SET_UNALIGN_CTL(me, arg2);
1730 break;
1731 case PR_GET_UNALIGN:
1732 error = GET_UNALIGN_CTL(me, arg2);
1733 break;
1734 case PR_SET_FPEMU:
1735 error = SET_FPEMU_CTL(me, arg2);
1736 break;
1737 case PR_GET_FPEMU:
1738 error = GET_FPEMU_CTL(me, arg2);
1739 break;
1740 case PR_SET_FPEXC:
1741 error = SET_FPEXC_CTL(me, arg2);
1742 break;
1743 case PR_GET_FPEXC:
1744 error = GET_FPEXC_CTL(me, arg2);
1745 break;
1746 case PR_GET_TIMING:
1747 error = PR_TIMING_STATISTICAL;
1748 break;
1749 case PR_SET_TIMING:
1750 if (arg2 != PR_TIMING_STATISTICAL)
1751 error = -EINVAL;
1752 else
1753 error = 0;
1754 break;
1755
1756 case PR_SET_NAME:
1757 comm[sizeof(me->comm)-1] = 0;
1758 if (strncpy_from_user(comm, (char __user *)arg2,
1759 sizeof(me->comm) - 1) < 0)
1760 return -EFAULT;
1761 set_task_comm(me, comm);
1762 return 0;
1763 case PR_GET_NAME:
1764 get_task_comm(comm, me);
1765 if (copy_to_user((char __user *)arg2, comm,
1766 sizeof(comm)))
1767 return -EFAULT;
1768 return 0;
1769 case PR_GET_ENDIAN:
1770 error = GET_ENDIAN(me, arg2);
1771 break;
1772 case PR_SET_ENDIAN:
1773 error = SET_ENDIAN(me, arg2);
1774 break;
1775
1776 case PR_GET_SECCOMP:
1777 error = prctl_get_seccomp();
1778 break;
1779 case PR_SET_SECCOMP:
1780 error = prctl_set_seccomp(arg2);
1781 break;
1782 case PR_GET_TSC:
1783 error = GET_TSC_CTL(arg2);
1784 break;
1785 case PR_SET_TSC:
1786 error = SET_TSC_CTL(arg2);
1787 break;
1788 case PR_TASK_PERF_EVENTS_DISABLE:
1789 error = perf_event_task_disable();
1790 break;
1791 case PR_TASK_PERF_EVENTS_ENABLE:
1792 error = perf_event_task_enable();
1793 break;
1794 case PR_GET_TIMERSLACK:
1795 error = current->timer_slack_ns;
1796 break;
1797 case PR_SET_TIMERSLACK:
1798 if (arg2 <= 0)
1799 current->timer_slack_ns =
1800 current->default_timer_slack_ns;
1801 else
1802 current->timer_slack_ns = arg2;
1803 error = 0;
1804 break;
1805 case PR_MCE_KILL:
1806 if (arg4 | arg5)
1807 return -EINVAL;
1808 switch (arg2) {
1809 case PR_MCE_KILL_CLEAR:
1810 if (arg3 != 0)
1811 return -EINVAL;
1812 current->flags &= ~PF_MCE_PROCESS;
1813 break;
1814 case PR_MCE_KILL_SET:
1815 current->flags |= PF_MCE_PROCESS;
1816 if (arg3 == PR_MCE_KILL_EARLY)
1817 current->flags |= PF_MCE_EARLY;
1818 else if (arg3 == PR_MCE_KILL_LATE)
1819 current->flags &= ~PF_MCE_EARLY;
1820 else if (arg3 == PR_MCE_KILL_DEFAULT)
1821 current->flags &=
1822 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1823 else
1824 return -EINVAL;
1825 break;
1826 default:
1827 return -EINVAL;
1828 }
1829 error = 0;
1830 break;
1831 case PR_MCE_KILL_GET:
1832 if (arg2 | arg3 | arg4 | arg5)
1833 return -EINVAL;
1834 if (current->flags & PF_MCE_PROCESS)
1835 error = (current->flags & PF_MCE_EARLY) ?
1836 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1837 else
1838 error = PR_MCE_KILL_DEFAULT;
1839 break;
1840 default:
1841 error = -EINVAL;
1842 break;
1843 }
1844 return error;
1845}
1846
1847SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
1848 struct getcpu_cache __user *, unused)
1849{
1850 int err = 0;
1851 int cpu = raw_smp_processor_id();
1852 if (cpup)
1853 err |= put_user(cpu, cpup);
1854 if (nodep)
1855 err |= put_user(cpu_to_node(cpu), nodep);
1856 return err ? -EFAULT : 0;
1857}
1858
1859char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
1860
1861static void argv_cleanup(struct subprocess_info *info)
1862{
1863 argv_free(info->argv);
1864}
1865
1866/**
1867 * orderly_poweroff - Trigger an orderly system poweroff
1868 * @force: force poweroff if command execution fails
1869 *
1870 * This may be called from any context to trigger a system shutdown.
1871 * If the orderly shutdown fails, it will force an immediate shutdown.
1872 */
1873int orderly_poweroff(bool force)
1874{
1875 int argc;
1876 char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
1877 static char *envp[] = {
1878 "HOME=/",
1879 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
1880 NULL
1881 };
1882 int ret = -ENOMEM;
1883 struct subprocess_info *info;
1884
1885 if (argv == NULL) {
1886 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
1887 __func__, poweroff_cmd);
1888 goto out;
1889 }
1890
1891 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1892 if (info == NULL) {
1893 argv_free(argv);
1894 goto out;
1895 }
1896
1897 call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
1898
1899 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1900
1901 out:
1902 if (ret && force) {
1903 printk(KERN_WARNING "Failed to start orderly shutdown: "
1904 "forcing the issue\n");
1905
1906 /* I guess this should try to kick off some daemon to
1907 sync and poweroff asap. Or not even bother syncing
1908 if we're doing an emergency shutdown? */
1909 emergency_sync();
1910 kernel_power_off();
1911 }
1912
1913 return ret;
1914}
1915EXPORT_SYMBOL_GPL(orderly_poweroff);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/kernel/sys.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/utsname.h>
11#include <linux/mman.h>
12#include <linux/reboot.h>
13#include <linux/prctl.h>
14#include <linux/highuid.h>
15#include <linux/fs.h>
16#include <linux/kmod.h>
17#include <linux/perf_event.h>
18#include <linux/resource.h>
19#include <linux/kernel.h>
20#include <linux/workqueue.h>
21#include <linux/capability.h>
22#include <linux/device.h>
23#include <linux/key.h>
24#include <linux/times.h>
25#include <linux/posix-timers.h>
26#include <linux/security.h>
27#include <linux/suspend.h>
28#include <linux/tty.h>
29#include <linux/signal.h>
30#include <linux/cn_proc.h>
31#include <linux/getcpu.h>
32#include <linux/task_io_accounting_ops.h>
33#include <linux/seccomp.h>
34#include <linux/cpu.h>
35#include <linux/personality.h>
36#include <linux/ptrace.h>
37#include <linux/fs_struct.h>
38#include <linux/file.h>
39#include <linux/mount.h>
40#include <linux/gfp.h>
41#include <linux/syscore_ops.h>
42#include <linux/version.h>
43#include <linux/ctype.h>
44#include <linux/syscall_user_dispatch.h>
45
46#include <linux/compat.h>
47#include <linux/syscalls.h>
48#include <linux/kprobes.h>
49#include <linux/user_namespace.h>
50#include <linux/time_namespace.h>
51#include <linux/binfmts.h>
52
53#include <linux/sched.h>
54#include <linux/sched/autogroup.h>
55#include <linux/sched/loadavg.h>
56#include <linux/sched/stat.h>
57#include <linux/sched/mm.h>
58#include <linux/sched/coredump.h>
59#include <linux/sched/task.h>
60#include <linux/sched/cputime.h>
61#include <linux/rcupdate.h>
62#include <linux/uidgid.h>
63#include <linux/cred.h>
64
65#include <linux/nospec.h>
66
67#include <linux/kmsg_dump.h>
68/* Move somewhere else to avoid recompiling? */
69#include <generated/utsrelease.h>
70
71#include <linux/uaccess.h>
72#include <asm/io.h>
73#include <asm/unistd.h>
74
75#include "uid16.h"
76
77#ifndef SET_UNALIGN_CTL
78# define SET_UNALIGN_CTL(a, b) (-EINVAL)
79#endif
80#ifndef GET_UNALIGN_CTL
81# define GET_UNALIGN_CTL(a, b) (-EINVAL)
82#endif
83#ifndef SET_FPEMU_CTL
84# define SET_FPEMU_CTL(a, b) (-EINVAL)
85#endif
86#ifndef GET_FPEMU_CTL
87# define GET_FPEMU_CTL(a, b) (-EINVAL)
88#endif
89#ifndef SET_FPEXC_CTL
90# define SET_FPEXC_CTL(a, b) (-EINVAL)
91#endif
92#ifndef GET_FPEXC_CTL
93# define GET_FPEXC_CTL(a, b) (-EINVAL)
94#endif
95#ifndef GET_ENDIAN
96# define GET_ENDIAN(a, b) (-EINVAL)
97#endif
98#ifndef SET_ENDIAN
99# define SET_ENDIAN(a, b) (-EINVAL)
100#endif
101#ifndef GET_TSC_CTL
102# define GET_TSC_CTL(a) (-EINVAL)
103#endif
104#ifndef SET_TSC_CTL
105# define SET_TSC_CTL(a) (-EINVAL)
106#endif
107#ifndef GET_FP_MODE
108# define GET_FP_MODE(a) (-EINVAL)
109#endif
110#ifndef SET_FP_MODE
111# define SET_FP_MODE(a,b) (-EINVAL)
112#endif
113#ifndef SVE_SET_VL
114# define SVE_SET_VL(a) (-EINVAL)
115#endif
116#ifndef SVE_GET_VL
117# define SVE_GET_VL() (-EINVAL)
118#endif
119#ifndef PAC_RESET_KEYS
120# define PAC_RESET_KEYS(a, b) (-EINVAL)
121#endif
122#ifndef PAC_SET_ENABLED_KEYS
123# define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL)
124#endif
125#ifndef PAC_GET_ENABLED_KEYS
126# define PAC_GET_ENABLED_KEYS(a) (-EINVAL)
127#endif
128#ifndef SET_TAGGED_ADDR_CTRL
129# define SET_TAGGED_ADDR_CTRL(a) (-EINVAL)
130#endif
131#ifndef GET_TAGGED_ADDR_CTRL
132# define GET_TAGGED_ADDR_CTRL() (-EINVAL)
133#endif
134
135/*
136 * this is where the system-wide overflow UID and GID are defined, for
137 * architectures that now have 32-bit UID/GID but didn't in the past
138 */
139
140int overflowuid = DEFAULT_OVERFLOWUID;
141int overflowgid = DEFAULT_OVERFLOWGID;
142
143EXPORT_SYMBOL(overflowuid);
144EXPORT_SYMBOL(overflowgid);
145
146/*
147 * the same as above, but for filesystems which can only store a 16-bit
148 * UID and GID. as such, this is needed on all architectures
149 */
150
151int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
152int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
153
154EXPORT_SYMBOL(fs_overflowuid);
155EXPORT_SYMBOL(fs_overflowgid);
156
157/*
158 * Returns true if current's euid is same as p's uid or euid,
159 * or has CAP_SYS_NICE to p's user_ns.
160 *
161 * Called with rcu_read_lock, creds are safe
162 */
163static bool set_one_prio_perm(struct task_struct *p)
164{
165 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
166
167 if (uid_eq(pcred->uid, cred->euid) ||
168 uid_eq(pcred->euid, cred->euid))
169 return true;
170 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
171 return true;
172 return false;
173}
174
175/*
176 * set the priority of a task
177 * - the caller must hold the RCU read lock
178 */
179static int set_one_prio(struct task_struct *p, int niceval, int error)
180{
181 int no_nice;
182
183 if (!set_one_prio_perm(p)) {
184 error = -EPERM;
185 goto out;
186 }
187 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
188 error = -EACCES;
189 goto out;
190 }
191 no_nice = security_task_setnice(p, niceval);
192 if (no_nice) {
193 error = no_nice;
194 goto out;
195 }
196 if (error == -ESRCH)
197 error = 0;
198 set_user_nice(p, niceval);
199out:
200 return error;
201}
202
203SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
204{
205 struct task_struct *g, *p;
206 struct user_struct *user;
207 const struct cred *cred = current_cred();
208 int error = -EINVAL;
209 struct pid *pgrp;
210 kuid_t uid;
211
212 if (which > PRIO_USER || which < PRIO_PROCESS)
213 goto out;
214
215 /* normalize: avoid signed division (rounding problems) */
216 error = -ESRCH;
217 if (niceval < MIN_NICE)
218 niceval = MIN_NICE;
219 if (niceval > MAX_NICE)
220 niceval = MAX_NICE;
221
222 rcu_read_lock();
223 read_lock(&tasklist_lock);
224 switch (which) {
225 case PRIO_PROCESS:
226 if (who)
227 p = find_task_by_vpid(who);
228 else
229 p = current;
230 if (p)
231 error = set_one_prio(p, niceval, error);
232 break;
233 case PRIO_PGRP:
234 if (who)
235 pgrp = find_vpid(who);
236 else
237 pgrp = task_pgrp(current);
238 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
239 error = set_one_prio(p, niceval, error);
240 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
241 break;
242 case PRIO_USER:
243 uid = make_kuid(cred->user_ns, who);
244 user = cred->user;
245 if (!who)
246 uid = cred->uid;
247 else if (!uid_eq(uid, cred->uid)) {
248 user = find_user(uid);
249 if (!user)
250 goto out_unlock; /* No processes for this user */
251 }
252 do_each_thread(g, p) {
253 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
254 error = set_one_prio(p, niceval, error);
255 } while_each_thread(g, p);
256 if (!uid_eq(uid, cred->uid))
257 free_uid(user); /* For find_user() */
258 break;
259 }
260out_unlock:
261 read_unlock(&tasklist_lock);
262 rcu_read_unlock();
263out:
264 return error;
265}
266
267/*
268 * Ugh. To avoid negative return values, "getpriority()" will
269 * not return the normal nice-value, but a negated value that
270 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
271 * to stay compatible.
272 */
273SYSCALL_DEFINE2(getpriority, int, which, int, who)
274{
275 struct task_struct *g, *p;
276 struct user_struct *user;
277 const struct cred *cred = current_cred();
278 long niceval, retval = -ESRCH;
279 struct pid *pgrp;
280 kuid_t uid;
281
282 if (which > PRIO_USER || which < PRIO_PROCESS)
283 return -EINVAL;
284
285 rcu_read_lock();
286 read_lock(&tasklist_lock);
287 switch (which) {
288 case PRIO_PROCESS:
289 if (who)
290 p = find_task_by_vpid(who);
291 else
292 p = current;
293 if (p) {
294 niceval = nice_to_rlimit(task_nice(p));
295 if (niceval > retval)
296 retval = niceval;
297 }
298 break;
299 case PRIO_PGRP:
300 if (who)
301 pgrp = find_vpid(who);
302 else
303 pgrp = task_pgrp(current);
304 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
305 niceval = nice_to_rlimit(task_nice(p));
306 if (niceval > retval)
307 retval = niceval;
308 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
309 break;
310 case PRIO_USER:
311 uid = make_kuid(cred->user_ns, who);
312 user = cred->user;
313 if (!who)
314 uid = cred->uid;
315 else if (!uid_eq(uid, cred->uid)) {
316 user = find_user(uid);
317 if (!user)
318 goto out_unlock; /* No processes for this user */
319 }
320 do_each_thread(g, p) {
321 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
322 niceval = nice_to_rlimit(task_nice(p));
323 if (niceval > retval)
324 retval = niceval;
325 }
326 } while_each_thread(g, p);
327 if (!uid_eq(uid, cred->uid))
328 free_uid(user); /* for find_user() */
329 break;
330 }
331out_unlock:
332 read_unlock(&tasklist_lock);
333 rcu_read_unlock();
334
335 return retval;
336}
337
338/*
339 * Unprivileged users may change the real gid to the effective gid
340 * or vice versa. (BSD-style)
341 *
342 * If you set the real gid at all, or set the effective gid to a value not
343 * equal to the real gid, then the saved gid is set to the new effective gid.
344 *
345 * This makes it possible for a setgid program to completely drop its
346 * privileges, which is often a useful assertion to make when you are doing
347 * a security audit over a program.
348 *
349 * The general idea is that a program which uses just setregid() will be
350 * 100% compatible with BSD. A program which uses just setgid() will be
351 * 100% compatible with POSIX with saved IDs.
352 *
353 * SMP: There are not races, the GIDs are checked only by filesystem
354 * operations (as far as semantic preservation is concerned).
355 */
356#ifdef CONFIG_MULTIUSER
357long __sys_setregid(gid_t rgid, gid_t egid)
358{
359 struct user_namespace *ns = current_user_ns();
360 const struct cred *old;
361 struct cred *new;
362 int retval;
363 kgid_t krgid, kegid;
364
365 krgid = make_kgid(ns, rgid);
366 kegid = make_kgid(ns, egid);
367
368 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
369 return -EINVAL;
370 if ((egid != (gid_t) -1) && !gid_valid(kegid))
371 return -EINVAL;
372
373 new = prepare_creds();
374 if (!new)
375 return -ENOMEM;
376 old = current_cred();
377
378 retval = -EPERM;
379 if (rgid != (gid_t) -1) {
380 if (gid_eq(old->gid, krgid) ||
381 gid_eq(old->egid, krgid) ||
382 ns_capable_setid(old->user_ns, CAP_SETGID))
383 new->gid = krgid;
384 else
385 goto error;
386 }
387 if (egid != (gid_t) -1) {
388 if (gid_eq(old->gid, kegid) ||
389 gid_eq(old->egid, kegid) ||
390 gid_eq(old->sgid, kegid) ||
391 ns_capable_setid(old->user_ns, CAP_SETGID))
392 new->egid = kegid;
393 else
394 goto error;
395 }
396
397 if (rgid != (gid_t) -1 ||
398 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
399 new->sgid = new->egid;
400 new->fsgid = new->egid;
401
402 retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
403 if (retval < 0)
404 goto error;
405
406 return commit_creds(new);
407
408error:
409 abort_creds(new);
410 return retval;
411}
412
413SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
414{
415 return __sys_setregid(rgid, egid);
416}
417
418/*
419 * setgid() is implemented like SysV w/ SAVED_IDS
420 *
421 * SMP: Same implicit races as above.
422 */
423long __sys_setgid(gid_t gid)
424{
425 struct user_namespace *ns = current_user_ns();
426 const struct cred *old;
427 struct cred *new;
428 int retval;
429 kgid_t kgid;
430
431 kgid = make_kgid(ns, gid);
432 if (!gid_valid(kgid))
433 return -EINVAL;
434
435 new = prepare_creds();
436 if (!new)
437 return -ENOMEM;
438 old = current_cred();
439
440 retval = -EPERM;
441 if (ns_capable_setid(old->user_ns, CAP_SETGID))
442 new->gid = new->egid = new->sgid = new->fsgid = kgid;
443 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
444 new->egid = new->fsgid = kgid;
445 else
446 goto error;
447
448 retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
449 if (retval < 0)
450 goto error;
451
452 return commit_creds(new);
453
454error:
455 abort_creds(new);
456 return retval;
457}
458
459SYSCALL_DEFINE1(setgid, gid_t, gid)
460{
461 return __sys_setgid(gid);
462}
463
464/*
465 * change the user struct in a credentials set to match the new UID
466 */
467static int set_user(struct cred *new)
468{
469 struct user_struct *new_user;
470
471 new_user = alloc_uid(new->uid);
472 if (!new_user)
473 return -EAGAIN;
474
475 /*
476 * We don't fail in case of NPROC limit excess here because too many
477 * poorly written programs don't check set*uid() return code, assuming
478 * it never fails if called by root. We may still enforce NPROC limit
479 * for programs doing set*uid()+execve() by harmlessly deferring the
480 * failure to the execve() stage.
481 */
482 if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
483 new_user != INIT_USER)
484 current->flags |= PF_NPROC_EXCEEDED;
485 else
486 current->flags &= ~PF_NPROC_EXCEEDED;
487
488 free_uid(new->user);
489 new->user = new_user;
490 return 0;
491}
492
493/*
494 * Unprivileged users may change the real uid to the effective uid
495 * or vice versa. (BSD-style)
496 *
497 * If you set the real uid at all, or set the effective uid to a value not
498 * equal to the real uid, then the saved uid is set to the new effective uid.
499 *
500 * This makes it possible for a setuid program to completely drop its
501 * privileges, which is often a useful assertion to make when you are doing
502 * a security audit over a program.
503 *
504 * The general idea is that a program which uses just setreuid() will be
505 * 100% compatible with BSD. A program which uses just setuid() will be
506 * 100% compatible with POSIX with saved IDs.
507 */
508long __sys_setreuid(uid_t ruid, uid_t euid)
509{
510 struct user_namespace *ns = current_user_ns();
511 const struct cred *old;
512 struct cred *new;
513 int retval;
514 kuid_t kruid, keuid;
515
516 kruid = make_kuid(ns, ruid);
517 keuid = make_kuid(ns, euid);
518
519 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
520 return -EINVAL;
521 if ((euid != (uid_t) -1) && !uid_valid(keuid))
522 return -EINVAL;
523
524 new = prepare_creds();
525 if (!new)
526 return -ENOMEM;
527 old = current_cred();
528
529 retval = -EPERM;
530 if (ruid != (uid_t) -1) {
531 new->uid = kruid;
532 if (!uid_eq(old->uid, kruid) &&
533 !uid_eq(old->euid, kruid) &&
534 !ns_capable_setid(old->user_ns, CAP_SETUID))
535 goto error;
536 }
537
538 if (euid != (uid_t) -1) {
539 new->euid = keuid;
540 if (!uid_eq(old->uid, keuid) &&
541 !uid_eq(old->euid, keuid) &&
542 !uid_eq(old->suid, keuid) &&
543 !ns_capable_setid(old->user_ns, CAP_SETUID))
544 goto error;
545 }
546
547 if (!uid_eq(new->uid, old->uid)) {
548 retval = set_user(new);
549 if (retval < 0)
550 goto error;
551 }
552 if (ruid != (uid_t) -1 ||
553 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
554 new->suid = new->euid;
555 new->fsuid = new->euid;
556
557 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
558 if (retval < 0)
559 goto error;
560
561 retval = set_cred_ucounts(new);
562 if (retval < 0)
563 goto error;
564
565 return commit_creds(new);
566
567error:
568 abort_creds(new);
569 return retval;
570}
571
572SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
573{
574 return __sys_setreuid(ruid, euid);
575}
576
577/*
578 * setuid() is implemented like SysV with SAVED_IDS
579 *
580 * Note that SAVED_ID's is deficient in that a setuid root program
581 * like sendmail, for example, cannot set its uid to be a normal
582 * user and then switch back, because if you're root, setuid() sets
583 * the saved uid too. If you don't like this, blame the bright people
584 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
585 * will allow a root program to temporarily drop privileges and be able to
586 * regain them by swapping the real and effective uid.
587 */
588long __sys_setuid(uid_t uid)
589{
590 struct user_namespace *ns = current_user_ns();
591 const struct cred *old;
592 struct cred *new;
593 int retval;
594 kuid_t kuid;
595
596 kuid = make_kuid(ns, uid);
597 if (!uid_valid(kuid))
598 return -EINVAL;
599
600 new = prepare_creds();
601 if (!new)
602 return -ENOMEM;
603 old = current_cred();
604
605 retval = -EPERM;
606 if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
607 new->suid = new->uid = kuid;
608 if (!uid_eq(kuid, old->uid)) {
609 retval = set_user(new);
610 if (retval < 0)
611 goto error;
612 }
613 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
614 goto error;
615 }
616
617 new->fsuid = new->euid = kuid;
618
619 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
620 if (retval < 0)
621 goto error;
622
623 retval = set_cred_ucounts(new);
624 if (retval < 0)
625 goto error;
626
627 return commit_creds(new);
628
629error:
630 abort_creds(new);
631 return retval;
632}
633
634SYSCALL_DEFINE1(setuid, uid_t, uid)
635{
636 return __sys_setuid(uid);
637}
638
639
640/*
641 * This function implements a generic ability to update ruid, euid,
642 * and suid. This allows you to implement the 4.4 compatible seteuid().
643 */
644long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
645{
646 struct user_namespace *ns = current_user_ns();
647 const struct cred *old;
648 struct cred *new;
649 int retval;
650 kuid_t kruid, keuid, ksuid;
651
652 kruid = make_kuid(ns, ruid);
653 keuid = make_kuid(ns, euid);
654 ksuid = make_kuid(ns, suid);
655
656 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
657 return -EINVAL;
658
659 if ((euid != (uid_t) -1) && !uid_valid(keuid))
660 return -EINVAL;
661
662 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
663 return -EINVAL;
664
665 new = prepare_creds();
666 if (!new)
667 return -ENOMEM;
668
669 old = current_cred();
670
671 retval = -EPERM;
672 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
673 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
674 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
675 goto error;
676 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
677 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
678 goto error;
679 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
680 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
681 goto error;
682 }
683
684 if (ruid != (uid_t) -1) {
685 new->uid = kruid;
686 if (!uid_eq(kruid, old->uid)) {
687 retval = set_user(new);
688 if (retval < 0)
689 goto error;
690 }
691 }
692 if (euid != (uid_t) -1)
693 new->euid = keuid;
694 if (suid != (uid_t) -1)
695 new->suid = ksuid;
696 new->fsuid = new->euid;
697
698 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
699 if (retval < 0)
700 goto error;
701
702 retval = set_cred_ucounts(new);
703 if (retval < 0)
704 goto error;
705
706 return commit_creds(new);
707
708error:
709 abort_creds(new);
710 return retval;
711}
712
713SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
714{
715 return __sys_setresuid(ruid, euid, suid);
716}
717
718SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
719{
720 const struct cred *cred = current_cred();
721 int retval;
722 uid_t ruid, euid, suid;
723
724 ruid = from_kuid_munged(cred->user_ns, cred->uid);
725 euid = from_kuid_munged(cred->user_ns, cred->euid);
726 suid = from_kuid_munged(cred->user_ns, cred->suid);
727
728 retval = put_user(ruid, ruidp);
729 if (!retval) {
730 retval = put_user(euid, euidp);
731 if (!retval)
732 return put_user(suid, suidp);
733 }
734 return retval;
735}
736
737/*
738 * Same as above, but for rgid, egid, sgid.
739 */
740long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
741{
742 struct user_namespace *ns = current_user_ns();
743 const struct cred *old;
744 struct cred *new;
745 int retval;
746 kgid_t krgid, kegid, ksgid;
747
748 krgid = make_kgid(ns, rgid);
749 kegid = make_kgid(ns, egid);
750 ksgid = make_kgid(ns, sgid);
751
752 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
753 return -EINVAL;
754 if ((egid != (gid_t) -1) && !gid_valid(kegid))
755 return -EINVAL;
756 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
757 return -EINVAL;
758
759 new = prepare_creds();
760 if (!new)
761 return -ENOMEM;
762 old = current_cred();
763
764 retval = -EPERM;
765 if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
766 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
767 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
768 goto error;
769 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
770 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
771 goto error;
772 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
773 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
774 goto error;
775 }
776
777 if (rgid != (gid_t) -1)
778 new->gid = krgid;
779 if (egid != (gid_t) -1)
780 new->egid = kegid;
781 if (sgid != (gid_t) -1)
782 new->sgid = ksgid;
783 new->fsgid = new->egid;
784
785 retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
786 if (retval < 0)
787 goto error;
788
789 return commit_creds(new);
790
791error:
792 abort_creds(new);
793 return retval;
794}
795
796SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
797{
798 return __sys_setresgid(rgid, egid, sgid);
799}
800
801SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
802{
803 const struct cred *cred = current_cred();
804 int retval;
805 gid_t rgid, egid, sgid;
806
807 rgid = from_kgid_munged(cred->user_ns, cred->gid);
808 egid = from_kgid_munged(cred->user_ns, cred->egid);
809 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
810
811 retval = put_user(rgid, rgidp);
812 if (!retval) {
813 retval = put_user(egid, egidp);
814 if (!retval)
815 retval = put_user(sgid, sgidp);
816 }
817
818 return retval;
819}
820
821
822/*
823 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
824 * is used for "access()" and for the NFS daemon (letting nfsd stay at
825 * whatever uid it wants to). It normally shadows "euid", except when
826 * explicitly set by setfsuid() or for access..
827 */
828long __sys_setfsuid(uid_t uid)
829{
830 const struct cred *old;
831 struct cred *new;
832 uid_t old_fsuid;
833 kuid_t kuid;
834
835 old = current_cred();
836 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
837
838 kuid = make_kuid(old->user_ns, uid);
839 if (!uid_valid(kuid))
840 return old_fsuid;
841
842 new = prepare_creds();
843 if (!new)
844 return old_fsuid;
845
846 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
847 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
848 ns_capable_setid(old->user_ns, CAP_SETUID)) {
849 if (!uid_eq(kuid, old->fsuid)) {
850 new->fsuid = kuid;
851 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
852 goto change_okay;
853 }
854 }
855
856 abort_creds(new);
857 return old_fsuid;
858
859change_okay:
860 commit_creds(new);
861 return old_fsuid;
862}
863
864SYSCALL_DEFINE1(setfsuid, uid_t, uid)
865{
866 return __sys_setfsuid(uid);
867}
868
869/*
870 * Samma på svenska..
871 */
872long __sys_setfsgid(gid_t gid)
873{
874 const struct cred *old;
875 struct cred *new;
876 gid_t old_fsgid;
877 kgid_t kgid;
878
879 old = current_cred();
880 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
881
882 kgid = make_kgid(old->user_ns, gid);
883 if (!gid_valid(kgid))
884 return old_fsgid;
885
886 new = prepare_creds();
887 if (!new)
888 return old_fsgid;
889
890 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
891 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
892 ns_capable_setid(old->user_ns, CAP_SETGID)) {
893 if (!gid_eq(kgid, old->fsgid)) {
894 new->fsgid = kgid;
895 if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
896 goto change_okay;
897 }
898 }
899
900 abort_creds(new);
901 return old_fsgid;
902
903change_okay:
904 commit_creds(new);
905 return old_fsgid;
906}
907
908SYSCALL_DEFINE1(setfsgid, gid_t, gid)
909{
910 return __sys_setfsgid(gid);
911}
912#endif /* CONFIG_MULTIUSER */
913
914/**
915 * sys_getpid - return the thread group id of the current process
916 *
917 * Note, despite the name, this returns the tgid not the pid. The tgid and
918 * the pid are identical unless CLONE_THREAD was specified on clone() in
919 * which case the tgid is the same in all threads of the same group.
920 *
921 * This is SMP safe as current->tgid does not change.
922 */
923SYSCALL_DEFINE0(getpid)
924{
925 return task_tgid_vnr(current);
926}
927
928/* Thread ID - the internal kernel "pid" */
929SYSCALL_DEFINE0(gettid)
930{
931 return task_pid_vnr(current);
932}
933
934/*
935 * Accessing ->real_parent is not SMP-safe, it could
936 * change from under us. However, we can use a stale
937 * value of ->real_parent under rcu_read_lock(), see
938 * release_task()->call_rcu(delayed_put_task_struct).
939 */
940SYSCALL_DEFINE0(getppid)
941{
942 int pid;
943
944 rcu_read_lock();
945 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
946 rcu_read_unlock();
947
948 return pid;
949}
950
951SYSCALL_DEFINE0(getuid)
952{
953 /* Only we change this so SMP safe */
954 return from_kuid_munged(current_user_ns(), current_uid());
955}
956
957SYSCALL_DEFINE0(geteuid)
958{
959 /* Only we change this so SMP safe */
960 return from_kuid_munged(current_user_ns(), current_euid());
961}
962
963SYSCALL_DEFINE0(getgid)
964{
965 /* Only we change this so SMP safe */
966 return from_kgid_munged(current_user_ns(), current_gid());
967}
968
969SYSCALL_DEFINE0(getegid)
970{
971 /* Only we change this so SMP safe */
972 return from_kgid_munged(current_user_ns(), current_egid());
973}
974
975static void do_sys_times(struct tms *tms)
976{
977 u64 tgutime, tgstime, cutime, cstime;
978
979 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
980 cutime = current->signal->cutime;
981 cstime = current->signal->cstime;
982 tms->tms_utime = nsec_to_clock_t(tgutime);
983 tms->tms_stime = nsec_to_clock_t(tgstime);
984 tms->tms_cutime = nsec_to_clock_t(cutime);
985 tms->tms_cstime = nsec_to_clock_t(cstime);
986}
987
988SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
989{
990 if (tbuf) {
991 struct tms tmp;
992
993 do_sys_times(&tmp);
994 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
995 return -EFAULT;
996 }
997 force_successful_syscall_return();
998 return (long) jiffies_64_to_clock_t(get_jiffies_64());
999}
1000
1001#ifdef CONFIG_COMPAT
1002static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
1003{
1004 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
1005}
1006
1007COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
1008{
1009 if (tbuf) {
1010 struct tms tms;
1011 struct compat_tms tmp;
1012
1013 do_sys_times(&tms);
1014 /* Convert our struct tms to the compat version. */
1015 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
1016 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
1017 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1018 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1019 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1020 return -EFAULT;
1021 }
1022 force_successful_syscall_return();
1023 return compat_jiffies_to_clock_t(jiffies);
1024}
1025#endif
1026
1027/*
1028 * This needs some heavy checking ...
1029 * I just haven't the stomach for it. I also don't fully
1030 * understand sessions/pgrp etc. Let somebody who does explain it.
1031 *
1032 * OK, I think I have the protection semantics right.... this is really
1033 * only important on a multi-user system anyway, to make sure one user
1034 * can't send a signal to a process owned by another. -TYT, 12/12/91
1035 *
1036 * !PF_FORKNOEXEC check to conform completely to POSIX.
1037 */
1038SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1039{
1040 struct task_struct *p;
1041 struct task_struct *group_leader = current->group_leader;
1042 struct pid *pgrp;
1043 int err;
1044
1045 if (!pid)
1046 pid = task_pid_vnr(group_leader);
1047 if (!pgid)
1048 pgid = pid;
1049 if (pgid < 0)
1050 return -EINVAL;
1051 rcu_read_lock();
1052
1053 /* From this point forward we keep holding onto the tasklist lock
1054 * so that our parent does not change from under us. -DaveM
1055 */
1056 write_lock_irq(&tasklist_lock);
1057
1058 err = -ESRCH;
1059 p = find_task_by_vpid(pid);
1060 if (!p)
1061 goto out;
1062
1063 err = -EINVAL;
1064 if (!thread_group_leader(p))
1065 goto out;
1066
1067 if (same_thread_group(p->real_parent, group_leader)) {
1068 err = -EPERM;
1069 if (task_session(p) != task_session(group_leader))
1070 goto out;
1071 err = -EACCES;
1072 if (!(p->flags & PF_FORKNOEXEC))
1073 goto out;
1074 } else {
1075 err = -ESRCH;
1076 if (p != group_leader)
1077 goto out;
1078 }
1079
1080 err = -EPERM;
1081 if (p->signal->leader)
1082 goto out;
1083
1084 pgrp = task_pid(p);
1085 if (pgid != pid) {
1086 struct task_struct *g;
1087
1088 pgrp = find_vpid(pgid);
1089 g = pid_task(pgrp, PIDTYPE_PGID);
1090 if (!g || task_session(g) != task_session(group_leader))
1091 goto out;
1092 }
1093
1094 err = security_task_setpgid(p, pgid);
1095 if (err)
1096 goto out;
1097
1098 if (task_pgrp(p) != pgrp)
1099 change_pid(p, PIDTYPE_PGID, pgrp);
1100
1101 err = 0;
1102out:
1103 /* All paths lead to here, thus we are safe. -DaveM */
1104 write_unlock_irq(&tasklist_lock);
1105 rcu_read_unlock();
1106 return err;
1107}
1108
1109static int do_getpgid(pid_t pid)
1110{
1111 struct task_struct *p;
1112 struct pid *grp;
1113 int retval;
1114
1115 rcu_read_lock();
1116 if (!pid)
1117 grp = task_pgrp(current);
1118 else {
1119 retval = -ESRCH;
1120 p = find_task_by_vpid(pid);
1121 if (!p)
1122 goto out;
1123 grp = task_pgrp(p);
1124 if (!grp)
1125 goto out;
1126
1127 retval = security_task_getpgid(p);
1128 if (retval)
1129 goto out;
1130 }
1131 retval = pid_vnr(grp);
1132out:
1133 rcu_read_unlock();
1134 return retval;
1135}
1136
1137SYSCALL_DEFINE1(getpgid, pid_t, pid)
1138{
1139 return do_getpgid(pid);
1140}
1141
1142#ifdef __ARCH_WANT_SYS_GETPGRP
1143
1144SYSCALL_DEFINE0(getpgrp)
1145{
1146 return do_getpgid(0);
1147}
1148
1149#endif
1150
1151SYSCALL_DEFINE1(getsid, pid_t, pid)
1152{
1153 struct task_struct *p;
1154 struct pid *sid;
1155 int retval;
1156
1157 rcu_read_lock();
1158 if (!pid)
1159 sid = task_session(current);
1160 else {
1161 retval = -ESRCH;
1162 p = find_task_by_vpid(pid);
1163 if (!p)
1164 goto out;
1165 sid = task_session(p);
1166 if (!sid)
1167 goto out;
1168
1169 retval = security_task_getsid(p);
1170 if (retval)
1171 goto out;
1172 }
1173 retval = pid_vnr(sid);
1174out:
1175 rcu_read_unlock();
1176 return retval;
1177}
1178
1179static void set_special_pids(struct pid *pid)
1180{
1181 struct task_struct *curr = current->group_leader;
1182
1183 if (task_session(curr) != pid)
1184 change_pid(curr, PIDTYPE_SID, pid);
1185
1186 if (task_pgrp(curr) != pid)
1187 change_pid(curr, PIDTYPE_PGID, pid);
1188}
1189
1190int ksys_setsid(void)
1191{
1192 struct task_struct *group_leader = current->group_leader;
1193 struct pid *sid = task_pid(group_leader);
1194 pid_t session = pid_vnr(sid);
1195 int err = -EPERM;
1196
1197 write_lock_irq(&tasklist_lock);
1198 /* Fail if I am already a session leader */
1199 if (group_leader->signal->leader)
1200 goto out;
1201
1202 /* Fail if a process group id already exists that equals the
1203 * proposed session id.
1204 */
1205 if (pid_task(sid, PIDTYPE_PGID))
1206 goto out;
1207
1208 group_leader->signal->leader = 1;
1209 set_special_pids(sid);
1210
1211 proc_clear_tty(group_leader);
1212
1213 err = session;
1214out:
1215 write_unlock_irq(&tasklist_lock);
1216 if (err > 0) {
1217 proc_sid_connector(group_leader);
1218 sched_autogroup_create_attach(group_leader);
1219 }
1220 return err;
1221}
1222
1223SYSCALL_DEFINE0(setsid)
1224{
1225 return ksys_setsid();
1226}
1227
1228DECLARE_RWSEM(uts_sem);
1229
1230#ifdef COMPAT_UTS_MACHINE
1231#define override_architecture(name) \
1232 (personality(current->personality) == PER_LINUX32 && \
1233 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1234 sizeof(COMPAT_UTS_MACHINE)))
1235#else
1236#define override_architecture(name) 0
1237#endif
1238
1239/*
1240 * Work around broken programs that cannot handle "Linux 3.0".
1241 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1242 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1243 * 2.6.60.
1244 */
1245static int override_release(char __user *release, size_t len)
1246{
1247 int ret = 0;
1248
1249 if (current->personality & UNAME26) {
1250 const char *rest = UTS_RELEASE;
1251 char buf[65] = { 0 };
1252 int ndots = 0;
1253 unsigned v;
1254 size_t copy;
1255
1256 while (*rest) {
1257 if (*rest == '.' && ++ndots >= 3)
1258 break;
1259 if (!isdigit(*rest) && *rest != '.')
1260 break;
1261 rest++;
1262 }
1263 v = LINUX_VERSION_PATCHLEVEL + 60;
1264 copy = clamp_t(size_t, len, 1, sizeof(buf));
1265 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1266 ret = copy_to_user(release, buf, copy + 1);
1267 }
1268 return ret;
1269}
1270
1271SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1272{
1273 struct new_utsname tmp;
1274
1275 down_read(&uts_sem);
1276 memcpy(&tmp, utsname(), sizeof(tmp));
1277 up_read(&uts_sem);
1278 if (copy_to_user(name, &tmp, sizeof(tmp)))
1279 return -EFAULT;
1280
1281 if (override_release(name->release, sizeof(name->release)))
1282 return -EFAULT;
1283 if (override_architecture(name))
1284 return -EFAULT;
1285 return 0;
1286}
1287
1288#ifdef __ARCH_WANT_SYS_OLD_UNAME
1289/*
1290 * Old cruft
1291 */
1292SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1293{
1294 struct old_utsname tmp;
1295
1296 if (!name)
1297 return -EFAULT;
1298
1299 down_read(&uts_sem);
1300 memcpy(&tmp, utsname(), sizeof(tmp));
1301 up_read(&uts_sem);
1302 if (copy_to_user(name, &tmp, sizeof(tmp)))
1303 return -EFAULT;
1304
1305 if (override_release(name->release, sizeof(name->release)))
1306 return -EFAULT;
1307 if (override_architecture(name))
1308 return -EFAULT;
1309 return 0;
1310}
1311
1312SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1313{
1314 struct oldold_utsname tmp;
1315
1316 if (!name)
1317 return -EFAULT;
1318
1319 memset(&tmp, 0, sizeof(tmp));
1320
1321 down_read(&uts_sem);
1322 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1323 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1324 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1325 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1326 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1327 up_read(&uts_sem);
1328 if (copy_to_user(name, &tmp, sizeof(tmp)))
1329 return -EFAULT;
1330
1331 if (override_architecture(name))
1332 return -EFAULT;
1333 if (override_release(name->release, sizeof(name->release)))
1334 return -EFAULT;
1335 return 0;
1336}
1337#endif
1338
1339SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1340{
1341 int errno;
1342 char tmp[__NEW_UTS_LEN];
1343
1344 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1345 return -EPERM;
1346
1347 if (len < 0 || len > __NEW_UTS_LEN)
1348 return -EINVAL;
1349 errno = -EFAULT;
1350 if (!copy_from_user(tmp, name, len)) {
1351 struct new_utsname *u;
1352
1353 down_write(&uts_sem);
1354 u = utsname();
1355 memcpy(u->nodename, tmp, len);
1356 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1357 errno = 0;
1358 uts_proc_notify(UTS_PROC_HOSTNAME);
1359 up_write(&uts_sem);
1360 }
1361 return errno;
1362}
1363
1364#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1365
1366SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1367{
1368 int i;
1369 struct new_utsname *u;
1370 char tmp[__NEW_UTS_LEN + 1];
1371
1372 if (len < 0)
1373 return -EINVAL;
1374 down_read(&uts_sem);
1375 u = utsname();
1376 i = 1 + strlen(u->nodename);
1377 if (i > len)
1378 i = len;
1379 memcpy(tmp, u->nodename, i);
1380 up_read(&uts_sem);
1381 if (copy_to_user(name, tmp, i))
1382 return -EFAULT;
1383 return 0;
1384}
1385
1386#endif
1387
1388/*
1389 * Only setdomainname; getdomainname can be implemented by calling
1390 * uname()
1391 */
1392SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1393{
1394 int errno;
1395 char tmp[__NEW_UTS_LEN];
1396
1397 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1398 return -EPERM;
1399 if (len < 0 || len > __NEW_UTS_LEN)
1400 return -EINVAL;
1401
1402 errno = -EFAULT;
1403 if (!copy_from_user(tmp, name, len)) {
1404 struct new_utsname *u;
1405
1406 down_write(&uts_sem);
1407 u = utsname();
1408 memcpy(u->domainname, tmp, len);
1409 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1410 errno = 0;
1411 uts_proc_notify(UTS_PROC_DOMAINNAME);
1412 up_write(&uts_sem);
1413 }
1414 return errno;
1415}
1416
1417SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1418{
1419 struct rlimit value;
1420 int ret;
1421
1422 ret = do_prlimit(current, resource, NULL, &value);
1423 if (!ret)
1424 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1425
1426 return ret;
1427}
1428
1429#ifdef CONFIG_COMPAT
1430
1431COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1432 struct compat_rlimit __user *, rlim)
1433{
1434 struct rlimit r;
1435 struct compat_rlimit r32;
1436
1437 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1438 return -EFAULT;
1439
1440 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1441 r.rlim_cur = RLIM_INFINITY;
1442 else
1443 r.rlim_cur = r32.rlim_cur;
1444 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1445 r.rlim_max = RLIM_INFINITY;
1446 else
1447 r.rlim_max = r32.rlim_max;
1448 return do_prlimit(current, resource, &r, NULL);
1449}
1450
1451COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1452 struct compat_rlimit __user *, rlim)
1453{
1454 struct rlimit r;
1455 int ret;
1456
1457 ret = do_prlimit(current, resource, NULL, &r);
1458 if (!ret) {
1459 struct compat_rlimit r32;
1460 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1461 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1462 else
1463 r32.rlim_cur = r.rlim_cur;
1464 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1465 r32.rlim_max = COMPAT_RLIM_INFINITY;
1466 else
1467 r32.rlim_max = r.rlim_max;
1468
1469 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1470 return -EFAULT;
1471 }
1472 return ret;
1473}
1474
1475#endif
1476
1477#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1478
1479/*
1480 * Back compatibility for getrlimit. Needed for some apps.
1481 */
1482SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1483 struct rlimit __user *, rlim)
1484{
1485 struct rlimit x;
1486 if (resource >= RLIM_NLIMITS)
1487 return -EINVAL;
1488
1489 resource = array_index_nospec(resource, RLIM_NLIMITS);
1490 task_lock(current->group_leader);
1491 x = current->signal->rlim[resource];
1492 task_unlock(current->group_leader);
1493 if (x.rlim_cur > 0x7FFFFFFF)
1494 x.rlim_cur = 0x7FFFFFFF;
1495 if (x.rlim_max > 0x7FFFFFFF)
1496 x.rlim_max = 0x7FFFFFFF;
1497 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1498}
1499
1500#ifdef CONFIG_COMPAT
1501COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1502 struct compat_rlimit __user *, rlim)
1503{
1504 struct rlimit r;
1505
1506 if (resource >= RLIM_NLIMITS)
1507 return -EINVAL;
1508
1509 resource = array_index_nospec(resource, RLIM_NLIMITS);
1510 task_lock(current->group_leader);
1511 r = current->signal->rlim[resource];
1512 task_unlock(current->group_leader);
1513 if (r.rlim_cur > 0x7FFFFFFF)
1514 r.rlim_cur = 0x7FFFFFFF;
1515 if (r.rlim_max > 0x7FFFFFFF)
1516 r.rlim_max = 0x7FFFFFFF;
1517
1518 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1519 put_user(r.rlim_max, &rlim->rlim_max))
1520 return -EFAULT;
1521 return 0;
1522}
1523#endif
1524
1525#endif
1526
1527static inline bool rlim64_is_infinity(__u64 rlim64)
1528{
1529#if BITS_PER_LONG < 64
1530 return rlim64 >= ULONG_MAX;
1531#else
1532 return rlim64 == RLIM64_INFINITY;
1533#endif
1534}
1535
1536static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1537{
1538 if (rlim->rlim_cur == RLIM_INFINITY)
1539 rlim64->rlim_cur = RLIM64_INFINITY;
1540 else
1541 rlim64->rlim_cur = rlim->rlim_cur;
1542 if (rlim->rlim_max == RLIM_INFINITY)
1543 rlim64->rlim_max = RLIM64_INFINITY;
1544 else
1545 rlim64->rlim_max = rlim->rlim_max;
1546}
1547
1548static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1549{
1550 if (rlim64_is_infinity(rlim64->rlim_cur))
1551 rlim->rlim_cur = RLIM_INFINITY;
1552 else
1553 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1554 if (rlim64_is_infinity(rlim64->rlim_max))
1555 rlim->rlim_max = RLIM_INFINITY;
1556 else
1557 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1558}
1559
1560/* make sure you are allowed to change @tsk limits before calling this */
1561int do_prlimit(struct task_struct *tsk, unsigned int resource,
1562 struct rlimit *new_rlim, struct rlimit *old_rlim)
1563{
1564 struct rlimit *rlim;
1565 int retval = 0;
1566
1567 if (resource >= RLIM_NLIMITS)
1568 return -EINVAL;
1569 if (new_rlim) {
1570 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1571 return -EINVAL;
1572 if (resource == RLIMIT_NOFILE &&
1573 new_rlim->rlim_max > sysctl_nr_open)
1574 return -EPERM;
1575 }
1576
1577 /* protect tsk->signal and tsk->sighand from disappearing */
1578 read_lock(&tasklist_lock);
1579 if (!tsk->sighand) {
1580 retval = -ESRCH;
1581 goto out;
1582 }
1583
1584 rlim = tsk->signal->rlim + resource;
1585 task_lock(tsk->group_leader);
1586 if (new_rlim) {
1587 /* Keep the capable check against init_user_ns until
1588 cgroups can contain all limits */
1589 if (new_rlim->rlim_max > rlim->rlim_max &&
1590 !capable(CAP_SYS_RESOURCE))
1591 retval = -EPERM;
1592 if (!retval)
1593 retval = security_task_setrlimit(tsk, resource, new_rlim);
1594 }
1595 if (!retval) {
1596 if (old_rlim)
1597 *old_rlim = *rlim;
1598 if (new_rlim)
1599 *rlim = *new_rlim;
1600 }
1601 task_unlock(tsk->group_leader);
1602
1603 /*
1604 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1605 * infinite. In case of RLIM_INFINITY the posix CPU timer code
1606 * ignores the rlimit.
1607 */
1608 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1609 new_rlim->rlim_cur != RLIM_INFINITY &&
1610 IS_ENABLED(CONFIG_POSIX_TIMERS))
1611 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1612out:
1613 read_unlock(&tasklist_lock);
1614 return retval;
1615}
1616
1617/* rcu lock must be held */
1618static int check_prlimit_permission(struct task_struct *task,
1619 unsigned int flags)
1620{
1621 const struct cred *cred = current_cred(), *tcred;
1622 bool id_match;
1623
1624 if (current == task)
1625 return 0;
1626
1627 tcred = __task_cred(task);
1628 id_match = (uid_eq(cred->uid, tcred->euid) &&
1629 uid_eq(cred->uid, tcred->suid) &&
1630 uid_eq(cred->uid, tcred->uid) &&
1631 gid_eq(cred->gid, tcred->egid) &&
1632 gid_eq(cred->gid, tcred->sgid) &&
1633 gid_eq(cred->gid, tcred->gid));
1634 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1635 return -EPERM;
1636
1637 return security_task_prlimit(cred, tcred, flags);
1638}
1639
1640SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1641 const struct rlimit64 __user *, new_rlim,
1642 struct rlimit64 __user *, old_rlim)
1643{
1644 struct rlimit64 old64, new64;
1645 struct rlimit old, new;
1646 struct task_struct *tsk;
1647 unsigned int checkflags = 0;
1648 int ret;
1649
1650 if (old_rlim)
1651 checkflags |= LSM_PRLIMIT_READ;
1652
1653 if (new_rlim) {
1654 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1655 return -EFAULT;
1656 rlim64_to_rlim(&new64, &new);
1657 checkflags |= LSM_PRLIMIT_WRITE;
1658 }
1659
1660 rcu_read_lock();
1661 tsk = pid ? find_task_by_vpid(pid) : current;
1662 if (!tsk) {
1663 rcu_read_unlock();
1664 return -ESRCH;
1665 }
1666 ret = check_prlimit_permission(tsk, checkflags);
1667 if (ret) {
1668 rcu_read_unlock();
1669 return ret;
1670 }
1671 get_task_struct(tsk);
1672 rcu_read_unlock();
1673
1674 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1675 old_rlim ? &old : NULL);
1676
1677 if (!ret && old_rlim) {
1678 rlim_to_rlim64(&old, &old64);
1679 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1680 ret = -EFAULT;
1681 }
1682
1683 put_task_struct(tsk);
1684 return ret;
1685}
1686
1687SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1688{
1689 struct rlimit new_rlim;
1690
1691 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1692 return -EFAULT;
1693 return do_prlimit(current, resource, &new_rlim, NULL);
1694}
1695
1696/*
1697 * It would make sense to put struct rusage in the task_struct,
1698 * except that would make the task_struct be *really big*. After
1699 * task_struct gets moved into malloc'ed memory, it would
1700 * make sense to do this. It will make moving the rest of the information
1701 * a lot simpler! (Which we're not doing right now because we're not
1702 * measuring them yet).
1703 *
1704 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1705 * races with threads incrementing their own counters. But since word
1706 * reads are atomic, we either get new values or old values and we don't
1707 * care which for the sums. We always take the siglock to protect reading
1708 * the c* fields from p->signal from races with exit.c updating those
1709 * fields when reaping, so a sample either gets all the additions of a
1710 * given child after it's reaped, or none so this sample is before reaping.
1711 *
1712 * Locking:
1713 * We need to take the siglock for CHILDEREN, SELF and BOTH
1714 * for the cases current multithreaded, non-current single threaded
1715 * non-current multithreaded. Thread traversal is now safe with
1716 * the siglock held.
1717 * Strictly speaking, we donot need to take the siglock if we are current and
1718 * single threaded, as no one else can take our signal_struct away, no one
1719 * else can reap the children to update signal->c* counters, and no one else
1720 * can race with the signal-> fields. If we do not take any lock, the
1721 * signal-> fields could be read out of order while another thread was just
1722 * exiting. So we should place a read memory barrier when we avoid the lock.
1723 * On the writer side, write memory barrier is implied in __exit_signal
1724 * as __exit_signal releases the siglock spinlock after updating the signal->
1725 * fields. But we don't do this yet to keep things simple.
1726 *
1727 */
1728
1729static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1730{
1731 r->ru_nvcsw += t->nvcsw;
1732 r->ru_nivcsw += t->nivcsw;
1733 r->ru_minflt += t->min_flt;
1734 r->ru_majflt += t->maj_flt;
1735 r->ru_inblock += task_io_get_inblock(t);
1736 r->ru_oublock += task_io_get_oublock(t);
1737}
1738
1739void getrusage(struct task_struct *p, int who, struct rusage *r)
1740{
1741 struct task_struct *t;
1742 unsigned long flags;
1743 u64 tgutime, tgstime, utime, stime;
1744 unsigned long maxrss = 0;
1745
1746 memset((char *)r, 0, sizeof (*r));
1747 utime = stime = 0;
1748
1749 if (who == RUSAGE_THREAD) {
1750 task_cputime_adjusted(current, &utime, &stime);
1751 accumulate_thread_rusage(p, r);
1752 maxrss = p->signal->maxrss;
1753 goto out;
1754 }
1755
1756 if (!lock_task_sighand(p, &flags))
1757 return;
1758
1759 switch (who) {
1760 case RUSAGE_BOTH:
1761 case RUSAGE_CHILDREN:
1762 utime = p->signal->cutime;
1763 stime = p->signal->cstime;
1764 r->ru_nvcsw = p->signal->cnvcsw;
1765 r->ru_nivcsw = p->signal->cnivcsw;
1766 r->ru_minflt = p->signal->cmin_flt;
1767 r->ru_majflt = p->signal->cmaj_flt;
1768 r->ru_inblock = p->signal->cinblock;
1769 r->ru_oublock = p->signal->coublock;
1770 maxrss = p->signal->cmaxrss;
1771
1772 if (who == RUSAGE_CHILDREN)
1773 break;
1774 fallthrough;
1775
1776 case RUSAGE_SELF:
1777 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1778 utime += tgutime;
1779 stime += tgstime;
1780 r->ru_nvcsw += p->signal->nvcsw;
1781 r->ru_nivcsw += p->signal->nivcsw;
1782 r->ru_minflt += p->signal->min_flt;
1783 r->ru_majflt += p->signal->maj_flt;
1784 r->ru_inblock += p->signal->inblock;
1785 r->ru_oublock += p->signal->oublock;
1786 if (maxrss < p->signal->maxrss)
1787 maxrss = p->signal->maxrss;
1788 t = p;
1789 do {
1790 accumulate_thread_rusage(t, r);
1791 } while_each_thread(p, t);
1792 break;
1793
1794 default:
1795 BUG();
1796 }
1797 unlock_task_sighand(p, &flags);
1798
1799out:
1800 r->ru_utime = ns_to_kernel_old_timeval(utime);
1801 r->ru_stime = ns_to_kernel_old_timeval(stime);
1802
1803 if (who != RUSAGE_CHILDREN) {
1804 struct mm_struct *mm = get_task_mm(p);
1805
1806 if (mm) {
1807 setmax_mm_hiwater_rss(&maxrss, mm);
1808 mmput(mm);
1809 }
1810 }
1811 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1812}
1813
1814SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1815{
1816 struct rusage r;
1817
1818 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1819 who != RUSAGE_THREAD)
1820 return -EINVAL;
1821
1822 getrusage(current, who, &r);
1823 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1824}
1825
1826#ifdef CONFIG_COMPAT
1827COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1828{
1829 struct rusage r;
1830
1831 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1832 who != RUSAGE_THREAD)
1833 return -EINVAL;
1834
1835 getrusage(current, who, &r);
1836 return put_compat_rusage(&r, ru);
1837}
1838#endif
1839
1840SYSCALL_DEFINE1(umask, int, mask)
1841{
1842 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1843 return mask;
1844}
1845
1846static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1847{
1848 struct fd exe;
1849 struct file *old_exe, *exe_file;
1850 struct inode *inode;
1851 int err;
1852
1853 exe = fdget(fd);
1854 if (!exe.file)
1855 return -EBADF;
1856
1857 inode = file_inode(exe.file);
1858
1859 /*
1860 * Because the original mm->exe_file points to executable file, make
1861 * sure that this one is executable as well, to avoid breaking an
1862 * overall picture.
1863 */
1864 err = -EACCES;
1865 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1866 goto exit;
1867
1868 err = file_permission(exe.file, MAY_EXEC);
1869 if (err)
1870 goto exit;
1871
1872 /*
1873 * Forbid mm->exe_file change if old file still mapped.
1874 */
1875 exe_file = get_mm_exe_file(mm);
1876 err = -EBUSY;
1877 if (exe_file) {
1878 struct vm_area_struct *vma;
1879
1880 mmap_read_lock(mm);
1881 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1882 if (!vma->vm_file)
1883 continue;
1884 if (path_equal(&vma->vm_file->f_path,
1885 &exe_file->f_path))
1886 goto exit_err;
1887 }
1888
1889 mmap_read_unlock(mm);
1890 fput(exe_file);
1891 }
1892
1893 err = 0;
1894 /* set the new file, lockless */
1895 get_file(exe.file);
1896 old_exe = xchg(&mm->exe_file, exe.file);
1897 if (old_exe)
1898 fput(old_exe);
1899exit:
1900 fdput(exe);
1901 return err;
1902exit_err:
1903 mmap_read_unlock(mm);
1904 fput(exe_file);
1905 goto exit;
1906}
1907
1908/*
1909 * Check arithmetic relations of passed addresses.
1910 *
1911 * WARNING: we don't require any capability here so be very careful
1912 * in what is allowed for modification from userspace.
1913 */
1914static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1915{
1916 unsigned long mmap_max_addr = TASK_SIZE;
1917 int error = -EINVAL, i;
1918
1919 static const unsigned char offsets[] = {
1920 offsetof(struct prctl_mm_map, start_code),
1921 offsetof(struct prctl_mm_map, end_code),
1922 offsetof(struct prctl_mm_map, start_data),
1923 offsetof(struct prctl_mm_map, end_data),
1924 offsetof(struct prctl_mm_map, start_brk),
1925 offsetof(struct prctl_mm_map, brk),
1926 offsetof(struct prctl_mm_map, start_stack),
1927 offsetof(struct prctl_mm_map, arg_start),
1928 offsetof(struct prctl_mm_map, arg_end),
1929 offsetof(struct prctl_mm_map, env_start),
1930 offsetof(struct prctl_mm_map, env_end),
1931 };
1932
1933 /*
1934 * Make sure the members are not somewhere outside
1935 * of allowed address space.
1936 */
1937 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1938 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1939
1940 if ((unsigned long)val >= mmap_max_addr ||
1941 (unsigned long)val < mmap_min_addr)
1942 goto out;
1943 }
1944
1945 /*
1946 * Make sure the pairs are ordered.
1947 */
1948#define __prctl_check_order(__m1, __op, __m2) \
1949 ((unsigned long)prctl_map->__m1 __op \
1950 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1951 error = __prctl_check_order(start_code, <, end_code);
1952 error |= __prctl_check_order(start_data,<=, end_data);
1953 error |= __prctl_check_order(start_brk, <=, brk);
1954 error |= __prctl_check_order(arg_start, <=, arg_end);
1955 error |= __prctl_check_order(env_start, <=, env_end);
1956 if (error)
1957 goto out;
1958#undef __prctl_check_order
1959
1960 error = -EINVAL;
1961
1962 /*
1963 * Neither we should allow to override limits if they set.
1964 */
1965 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1966 prctl_map->start_brk, prctl_map->end_data,
1967 prctl_map->start_data))
1968 goto out;
1969
1970 error = 0;
1971out:
1972 return error;
1973}
1974
1975#ifdef CONFIG_CHECKPOINT_RESTORE
1976static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1977{
1978 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1979 unsigned long user_auxv[AT_VECTOR_SIZE];
1980 struct mm_struct *mm = current->mm;
1981 int error;
1982
1983 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1984 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1985
1986 if (opt == PR_SET_MM_MAP_SIZE)
1987 return put_user((unsigned int)sizeof(prctl_map),
1988 (unsigned int __user *)addr);
1989
1990 if (data_size != sizeof(prctl_map))
1991 return -EINVAL;
1992
1993 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1994 return -EFAULT;
1995
1996 error = validate_prctl_map_addr(&prctl_map);
1997 if (error)
1998 return error;
1999
2000 if (prctl_map.auxv_size) {
2001 /*
2002 * Someone is trying to cheat the auxv vector.
2003 */
2004 if (!prctl_map.auxv ||
2005 prctl_map.auxv_size > sizeof(mm->saved_auxv))
2006 return -EINVAL;
2007
2008 memset(user_auxv, 0, sizeof(user_auxv));
2009 if (copy_from_user(user_auxv,
2010 (const void __user *)prctl_map.auxv,
2011 prctl_map.auxv_size))
2012 return -EFAULT;
2013
2014 /* Last entry must be AT_NULL as specification requires */
2015 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2016 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2017 }
2018
2019 if (prctl_map.exe_fd != (u32)-1) {
2020 /*
2021 * Check if the current user is checkpoint/restore capable.
2022 * At the time of this writing, it checks for CAP_SYS_ADMIN
2023 * or CAP_CHECKPOINT_RESTORE.
2024 * Note that a user with access to ptrace can masquerade an
2025 * arbitrary program as any executable, even setuid ones.
2026 * This may have implications in the tomoyo subsystem.
2027 */
2028 if (!checkpoint_restore_ns_capable(current_user_ns()))
2029 return -EPERM;
2030
2031 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2032 if (error)
2033 return error;
2034 }
2035
2036 /*
2037 * arg_lock protects concurrent updates but we still need mmap_lock for
2038 * read to exclude races with sys_brk.
2039 */
2040 mmap_read_lock(mm);
2041
2042 /*
2043 * We don't validate if these members are pointing to
2044 * real present VMAs because application may have correspond
2045 * VMAs already unmapped and kernel uses these members for statistics
2046 * output in procfs mostly, except
2047 *
2048 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2049 * for VMAs when updating these members so anything wrong written
2050 * here cause kernel to swear at userspace program but won't lead
2051 * to any problem in kernel itself
2052 */
2053
2054 spin_lock(&mm->arg_lock);
2055 mm->start_code = prctl_map.start_code;
2056 mm->end_code = prctl_map.end_code;
2057 mm->start_data = prctl_map.start_data;
2058 mm->end_data = prctl_map.end_data;
2059 mm->start_brk = prctl_map.start_brk;
2060 mm->brk = prctl_map.brk;
2061 mm->start_stack = prctl_map.start_stack;
2062 mm->arg_start = prctl_map.arg_start;
2063 mm->arg_end = prctl_map.arg_end;
2064 mm->env_start = prctl_map.env_start;
2065 mm->env_end = prctl_map.env_end;
2066 spin_unlock(&mm->arg_lock);
2067
2068 /*
2069 * Note this update of @saved_auxv is lockless thus
2070 * if someone reads this member in procfs while we're
2071 * updating -- it may get partly updated results. It's
2072 * known and acceptable trade off: we leave it as is to
2073 * not introduce additional locks here making the kernel
2074 * more complex.
2075 */
2076 if (prctl_map.auxv_size)
2077 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2078
2079 mmap_read_unlock(mm);
2080 return 0;
2081}
2082#endif /* CONFIG_CHECKPOINT_RESTORE */
2083
2084static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2085 unsigned long len)
2086{
2087 /*
2088 * This doesn't move the auxiliary vector itself since it's pinned to
2089 * mm_struct, but it permits filling the vector with new values. It's
2090 * up to the caller to provide sane values here, otherwise userspace
2091 * tools which use this vector might be unhappy.
2092 */
2093 unsigned long user_auxv[AT_VECTOR_SIZE] = {};
2094
2095 if (len > sizeof(user_auxv))
2096 return -EINVAL;
2097
2098 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2099 return -EFAULT;
2100
2101 /* Make sure the last entry is always AT_NULL */
2102 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2103 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2104
2105 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2106
2107 task_lock(current);
2108 memcpy(mm->saved_auxv, user_auxv, len);
2109 task_unlock(current);
2110
2111 return 0;
2112}
2113
2114static int prctl_set_mm(int opt, unsigned long addr,
2115 unsigned long arg4, unsigned long arg5)
2116{
2117 struct mm_struct *mm = current->mm;
2118 struct prctl_mm_map prctl_map = {
2119 .auxv = NULL,
2120 .auxv_size = 0,
2121 .exe_fd = -1,
2122 };
2123 struct vm_area_struct *vma;
2124 int error;
2125
2126 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2127 opt != PR_SET_MM_MAP &&
2128 opt != PR_SET_MM_MAP_SIZE)))
2129 return -EINVAL;
2130
2131#ifdef CONFIG_CHECKPOINT_RESTORE
2132 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2133 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2134#endif
2135
2136 if (!capable(CAP_SYS_RESOURCE))
2137 return -EPERM;
2138
2139 if (opt == PR_SET_MM_EXE_FILE)
2140 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2141
2142 if (opt == PR_SET_MM_AUXV)
2143 return prctl_set_auxv(mm, addr, arg4);
2144
2145 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2146 return -EINVAL;
2147
2148 error = -EINVAL;
2149
2150 /*
2151 * arg_lock protects concurrent updates of arg boundaries, we need
2152 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2153 * validation.
2154 */
2155 mmap_read_lock(mm);
2156 vma = find_vma(mm, addr);
2157
2158 spin_lock(&mm->arg_lock);
2159 prctl_map.start_code = mm->start_code;
2160 prctl_map.end_code = mm->end_code;
2161 prctl_map.start_data = mm->start_data;
2162 prctl_map.end_data = mm->end_data;
2163 prctl_map.start_brk = mm->start_brk;
2164 prctl_map.brk = mm->brk;
2165 prctl_map.start_stack = mm->start_stack;
2166 prctl_map.arg_start = mm->arg_start;
2167 prctl_map.arg_end = mm->arg_end;
2168 prctl_map.env_start = mm->env_start;
2169 prctl_map.env_end = mm->env_end;
2170
2171 switch (opt) {
2172 case PR_SET_MM_START_CODE:
2173 prctl_map.start_code = addr;
2174 break;
2175 case PR_SET_MM_END_CODE:
2176 prctl_map.end_code = addr;
2177 break;
2178 case PR_SET_MM_START_DATA:
2179 prctl_map.start_data = addr;
2180 break;
2181 case PR_SET_MM_END_DATA:
2182 prctl_map.end_data = addr;
2183 break;
2184 case PR_SET_MM_START_STACK:
2185 prctl_map.start_stack = addr;
2186 break;
2187 case PR_SET_MM_START_BRK:
2188 prctl_map.start_brk = addr;
2189 break;
2190 case PR_SET_MM_BRK:
2191 prctl_map.brk = addr;
2192 break;
2193 case PR_SET_MM_ARG_START:
2194 prctl_map.arg_start = addr;
2195 break;
2196 case PR_SET_MM_ARG_END:
2197 prctl_map.arg_end = addr;
2198 break;
2199 case PR_SET_MM_ENV_START:
2200 prctl_map.env_start = addr;
2201 break;
2202 case PR_SET_MM_ENV_END:
2203 prctl_map.env_end = addr;
2204 break;
2205 default:
2206 goto out;
2207 }
2208
2209 error = validate_prctl_map_addr(&prctl_map);
2210 if (error)
2211 goto out;
2212
2213 switch (opt) {
2214 /*
2215 * If command line arguments and environment
2216 * are placed somewhere else on stack, we can
2217 * set them up here, ARG_START/END to setup
2218 * command line arguments and ENV_START/END
2219 * for environment.
2220 */
2221 case PR_SET_MM_START_STACK:
2222 case PR_SET_MM_ARG_START:
2223 case PR_SET_MM_ARG_END:
2224 case PR_SET_MM_ENV_START:
2225 case PR_SET_MM_ENV_END:
2226 if (!vma) {
2227 error = -EFAULT;
2228 goto out;
2229 }
2230 }
2231
2232 mm->start_code = prctl_map.start_code;
2233 mm->end_code = prctl_map.end_code;
2234 mm->start_data = prctl_map.start_data;
2235 mm->end_data = prctl_map.end_data;
2236 mm->start_brk = prctl_map.start_brk;
2237 mm->brk = prctl_map.brk;
2238 mm->start_stack = prctl_map.start_stack;
2239 mm->arg_start = prctl_map.arg_start;
2240 mm->arg_end = prctl_map.arg_end;
2241 mm->env_start = prctl_map.env_start;
2242 mm->env_end = prctl_map.env_end;
2243
2244 error = 0;
2245out:
2246 spin_unlock(&mm->arg_lock);
2247 mmap_read_unlock(mm);
2248 return error;
2249}
2250
2251#ifdef CONFIG_CHECKPOINT_RESTORE
2252static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2253{
2254 return put_user(me->clear_child_tid, tid_addr);
2255}
2256#else
2257static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2258{
2259 return -EINVAL;
2260}
2261#endif
2262
2263static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2264{
2265 /*
2266 * If task has has_child_subreaper - all its descendants
2267 * already have these flag too and new descendants will
2268 * inherit it on fork, skip them.
2269 *
2270 * If we've found child_reaper - skip descendants in
2271 * it's subtree as they will never get out pidns.
2272 */
2273 if (p->signal->has_child_subreaper ||
2274 is_child_reaper(task_pid(p)))
2275 return 0;
2276
2277 p->signal->has_child_subreaper = 1;
2278 return 1;
2279}
2280
2281int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2282{
2283 return -EINVAL;
2284}
2285
2286int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2287 unsigned long ctrl)
2288{
2289 return -EINVAL;
2290}
2291
2292#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2293
2294SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2295 unsigned long, arg4, unsigned long, arg5)
2296{
2297 struct task_struct *me = current;
2298 unsigned char comm[sizeof(me->comm)];
2299 long error;
2300
2301 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2302 if (error != -ENOSYS)
2303 return error;
2304
2305 error = 0;
2306 switch (option) {
2307 case PR_SET_PDEATHSIG:
2308 if (!valid_signal(arg2)) {
2309 error = -EINVAL;
2310 break;
2311 }
2312 me->pdeath_signal = arg2;
2313 break;
2314 case PR_GET_PDEATHSIG:
2315 error = put_user(me->pdeath_signal, (int __user *)arg2);
2316 break;
2317 case PR_GET_DUMPABLE:
2318 error = get_dumpable(me->mm);
2319 break;
2320 case PR_SET_DUMPABLE:
2321 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2322 error = -EINVAL;
2323 break;
2324 }
2325 set_dumpable(me->mm, arg2);
2326 break;
2327
2328 case PR_SET_UNALIGN:
2329 error = SET_UNALIGN_CTL(me, arg2);
2330 break;
2331 case PR_GET_UNALIGN:
2332 error = GET_UNALIGN_CTL(me, arg2);
2333 break;
2334 case PR_SET_FPEMU:
2335 error = SET_FPEMU_CTL(me, arg2);
2336 break;
2337 case PR_GET_FPEMU:
2338 error = GET_FPEMU_CTL(me, arg2);
2339 break;
2340 case PR_SET_FPEXC:
2341 error = SET_FPEXC_CTL(me, arg2);
2342 break;
2343 case PR_GET_FPEXC:
2344 error = GET_FPEXC_CTL(me, arg2);
2345 break;
2346 case PR_GET_TIMING:
2347 error = PR_TIMING_STATISTICAL;
2348 break;
2349 case PR_SET_TIMING:
2350 if (arg2 != PR_TIMING_STATISTICAL)
2351 error = -EINVAL;
2352 break;
2353 case PR_SET_NAME:
2354 comm[sizeof(me->comm) - 1] = 0;
2355 if (strncpy_from_user(comm, (char __user *)arg2,
2356 sizeof(me->comm) - 1) < 0)
2357 return -EFAULT;
2358 set_task_comm(me, comm);
2359 proc_comm_connector(me);
2360 break;
2361 case PR_GET_NAME:
2362 get_task_comm(comm, me);
2363 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2364 return -EFAULT;
2365 break;
2366 case PR_GET_ENDIAN:
2367 error = GET_ENDIAN(me, arg2);
2368 break;
2369 case PR_SET_ENDIAN:
2370 error = SET_ENDIAN(me, arg2);
2371 break;
2372 case PR_GET_SECCOMP:
2373 error = prctl_get_seccomp();
2374 break;
2375 case PR_SET_SECCOMP:
2376 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2377 break;
2378 case PR_GET_TSC:
2379 error = GET_TSC_CTL(arg2);
2380 break;
2381 case PR_SET_TSC:
2382 error = SET_TSC_CTL(arg2);
2383 break;
2384 case PR_TASK_PERF_EVENTS_DISABLE:
2385 error = perf_event_task_disable();
2386 break;
2387 case PR_TASK_PERF_EVENTS_ENABLE:
2388 error = perf_event_task_enable();
2389 break;
2390 case PR_GET_TIMERSLACK:
2391 if (current->timer_slack_ns > ULONG_MAX)
2392 error = ULONG_MAX;
2393 else
2394 error = current->timer_slack_ns;
2395 break;
2396 case PR_SET_TIMERSLACK:
2397 if (arg2 <= 0)
2398 current->timer_slack_ns =
2399 current->default_timer_slack_ns;
2400 else
2401 current->timer_slack_ns = arg2;
2402 break;
2403 case PR_MCE_KILL:
2404 if (arg4 | arg5)
2405 return -EINVAL;
2406 switch (arg2) {
2407 case PR_MCE_KILL_CLEAR:
2408 if (arg3 != 0)
2409 return -EINVAL;
2410 current->flags &= ~PF_MCE_PROCESS;
2411 break;
2412 case PR_MCE_KILL_SET:
2413 current->flags |= PF_MCE_PROCESS;
2414 if (arg3 == PR_MCE_KILL_EARLY)
2415 current->flags |= PF_MCE_EARLY;
2416 else if (arg3 == PR_MCE_KILL_LATE)
2417 current->flags &= ~PF_MCE_EARLY;
2418 else if (arg3 == PR_MCE_KILL_DEFAULT)
2419 current->flags &=
2420 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2421 else
2422 return -EINVAL;
2423 break;
2424 default:
2425 return -EINVAL;
2426 }
2427 break;
2428 case PR_MCE_KILL_GET:
2429 if (arg2 | arg3 | arg4 | arg5)
2430 return -EINVAL;
2431 if (current->flags & PF_MCE_PROCESS)
2432 error = (current->flags & PF_MCE_EARLY) ?
2433 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2434 else
2435 error = PR_MCE_KILL_DEFAULT;
2436 break;
2437 case PR_SET_MM:
2438 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2439 break;
2440 case PR_GET_TID_ADDRESS:
2441 error = prctl_get_tid_address(me, (int __user * __user *)arg2);
2442 break;
2443 case PR_SET_CHILD_SUBREAPER:
2444 me->signal->is_child_subreaper = !!arg2;
2445 if (!arg2)
2446 break;
2447
2448 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2449 break;
2450 case PR_GET_CHILD_SUBREAPER:
2451 error = put_user(me->signal->is_child_subreaper,
2452 (int __user *)arg2);
2453 break;
2454 case PR_SET_NO_NEW_PRIVS:
2455 if (arg2 != 1 || arg3 || arg4 || arg5)
2456 return -EINVAL;
2457
2458 task_set_no_new_privs(current);
2459 break;
2460 case PR_GET_NO_NEW_PRIVS:
2461 if (arg2 || arg3 || arg4 || arg5)
2462 return -EINVAL;
2463 return task_no_new_privs(current) ? 1 : 0;
2464 case PR_GET_THP_DISABLE:
2465 if (arg2 || arg3 || arg4 || arg5)
2466 return -EINVAL;
2467 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2468 break;
2469 case PR_SET_THP_DISABLE:
2470 if (arg3 || arg4 || arg5)
2471 return -EINVAL;
2472 if (mmap_write_lock_killable(me->mm))
2473 return -EINTR;
2474 if (arg2)
2475 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2476 else
2477 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2478 mmap_write_unlock(me->mm);
2479 break;
2480 case PR_MPX_ENABLE_MANAGEMENT:
2481 case PR_MPX_DISABLE_MANAGEMENT:
2482 /* No longer implemented: */
2483 return -EINVAL;
2484 case PR_SET_FP_MODE:
2485 error = SET_FP_MODE(me, arg2);
2486 break;
2487 case PR_GET_FP_MODE:
2488 error = GET_FP_MODE(me);
2489 break;
2490 case PR_SVE_SET_VL:
2491 error = SVE_SET_VL(arg2);
2492 break;
2493 case PR_SVE_GET_VL:
2494 error = SVE_GET_VL();
2495 break;
2496 case PR_GET_SPECULATION_CTRL:
2497 if (arg3 || arg4 || arg5)
2498 return -EINVAL;
2499 error = arch_prctl_spec_ctrl_get(me, arg2);
2500 break;
2501 case PR_SET_SPECULATION_CTRL:
2502 if (arg4 || arg5)
2503 return -EINVAL;
2504 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2505 break;
2506 case PR_PAC_RESET_KEYS:
2507 if (arg3 || arg4 || arg5)
2508 return -EINVAL;
2509 error = PAC_RESET_KEYS(me, arg2);
2510 break;
2511 case PR_PAC_SET_ENABLED_KEYS:
2512 if (arg4 || arg5)
2513 return -EINVAL;
2514 error = PAC_SET_ENABLED_KEYS(me, arg2, arg3);
2515 break;
2516 case PR_PAC_GET_ENABLED_KEYS:
2517 if (arg2 || arg3 || arg4 || arg5)
2518 return -EINVAL;
2519 error = PAC_GET_ENABLED_KEYS(me);
2520 break;
2521 case PR_SET_TAGGED_ADDR_CTRL:
2522 if (arg3 || arg4 || arg5)
2523 return -EINVAL;
2524 error = SET_TAGGED_ADDR_CTRL(arg2);
2525 break;
2526 case PR_GET_TAGGED_ADDR_CTRL:
2527 if (arg2 || arg3 || arg4 || arg5)
2528 return -EINVAL;
2529 error = GET_TAGGED_ADDR_CTRL();
2530 break;
2531 case PR_SET_IO_FLUSHER:
2532 if (!capable(CAP_SYS_RESOURCE))
2533 return -EPERM;
2534
2535 if (arg3 || arg4 || arg5)
2536 return -EINVAL;
2537
2538 if (arg2 == 1)
2539 current->flags |= PR_IO_FLUSHER;
2540 else if (!arg2)
2541 current->flags &= ~PR_IO_FLUSHER;
2542 else
2543 return -EINVAL;
2544 break;
2545 case PR_GET_IO_FLUSHER:
2546 if (!capable(CAP_SYS_RESOURCE))
2547 return -EPERM;
2548
2549 if (arg2 || arg3 || arg4 || arg5)
2550 return -EINVAL;
2551
2552 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2553 break;
2554 case PR_SET_SYSCALL_USER_DISPATCH:
2555 error = set_syscall_user_dispatch(arg2, arg3, arg4,
2556 (char __user *) arg5);
2557 break;
2558#ifdef CONFIG_SCHED_CORE
2559 case PR_SCHED_CORE:
2560 error = sched_core_share_pid(arg2, arg3, arg4, arg5);
2561 break;
2562#endif
2563 default:
2564 error = -EINVAL;
2565 break;
2566 }
2567 return error;
2568}
2569
2570SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2571 struct getcpu_cache __user *, unused)
2572{
2573 int err = 0;
2574 int cpu = raw_smp_processor_id();
2575
2576 if (cpup)
2577 err |= put_user(cpu, cpup);
2578 if (nodep)
2579 err |= put_user(cpu_to_node(cpu), nodep);
2580 return err ? -EFAULT : 0;
2581}
2582
2583/**
2584 * do_sysinfo - fill in sysinfo struct
2585 * @info: pointer to buffer to fill
2586 */
2587static int do_sysinfo(struct sysinfo *info)
2588{
2589 unsigned long mem_total, sav_total;
2590 unsigned int mem_unit, bitcount;
2591 struct timespec64 tp;
2592
2593 memset(info, 0, sizeof(struct sysinfo));
2594
2595 ktime_get_boottime_ts64(&tp);
2596 timens_add_boottime(&tp);
2597 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2598
2599 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2600
2601 info->procs = nr_threads;
2602
2603 si_meminfo(info);
2604 si_swapinfo(info);
2605
2606 /*
2607 * If the sum of all the available memory (i.e. ram + swap)
2608 * is less than can be stored in a 32 bit unsigned long then
2609 * we can be binary compatible with 2.2.x kernels. If not,
2610 * well, in that case 2.2.x was broken anyways...
2611 *
2612 * -Erik Andersen <andersee@debian.org>
2613 */
2614
2615 mem_total = info->totalram + info->totalswap;
2616 if (mem_total < info->totalram || mem_total < info->totalswap)
2617 goto out;
2618 bitcount = 0;
2619 mem_unit = info->mem_unit;
2620 while (mem_unit > 1) {
2621 bitcount++;
2622 mem_unit >>= 1;
2623 sav_total = mem_total;
2624 mem_total <<= 1;
2625 if (mem_total < sav_total)
2626 goto out;
2627 }
2628
2629 /*
2630 * If mem_total did not overflow, multiply all memory values by
2631 * info->mem_unit and set it to 1. This leaves things compatible
2632 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2633 * kernels...
2634 */
2635
2636 info->mem_unit = 1;
2637 info->totalram <<= bitcount;
2638 info->freeram <<= bitcount;
2639 info->sharedram <<= bitcount;
2640 info->bufferram <<= bitcount;
2641 info->totalswap <<= bitcount;
2642 info->freeswap <<= bitcount;
2643 info->totalhigh <<= bitcount;
2644 info->freehigh <<= bitcount;
2645
2646out:
2647 return 0;
2648}
2649
2650SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2651{
2652 struct sysinfo val;
2653
2654 do_sysinfo(&val);
2655
2656 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2657 return -EFAULT;
2658
2659 return 0;
2660}
2661
2662#ifdef CONFIG_COMPAT
2663struct compat_sysinfo {
2664 s32 uptime;
2665 u32 loads[3];
2666 u32 totalram;
2667 u32 freeram;
2668 u32 sharedram;
2669 u32 bufferram;
2670 u32 totalswap;
2671 u32 freeswap;
2672 u16 procs;
2673 u16 pad;
2674 u32 totalhigh;
2675 u32 freehigh;
2676 u32 mem_unit;
2677 char _f[20-2*sizeof(u32)-sizeof(int)];
2678};
2679
2680COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2681{
2682 struct sysinfo s;
2683 struct compat_sysinfo s_32;
2684
2685 do_sysinfo(&s);
2686
2687 /* Check to see if any memory value is too large for 32-bit and scale
2688 * down if needed
2689 */
2690 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2691 int bitcount = 0;
2692
2693 while (s.mem_unit < PAGE_SIZE) {
2694 s.mem_unit <<= 1;
2695 bitcount++;
2696 }
2697
2698 s.totalram >>= bitcount;
2699 s.freeram >>= bitcount;
2700 s.sharedram >>= bitcount;
2701 s.bufferram >>= bitcount;
2702 s.totalswap >>= bitcount;
2703 s.freeswap >>= bitcount;
2704 s.totalhigh >>= bitcount;
2705 s.freehigh >>= bitcount;
2706 }
2707
2708 memset(&s_32, 0, sizeof(s_32));
2709 s_32.uptime = s.uptime;
2710 s_32.loads[0] = s.loads[0];
2711 s_32.loads[1] = s.loads[1];
2712 s_32.loads[2] = s.loads[2];
2713 s_32.totalram = s.totalram;
2714 s_32.freeram = s.freeram;
2715 s_32.sharedram = s.sharedram;
2716 s_32.bufferram = s.bufferram;
2717 s_32.totalswap = s.totalswap;
2718 s_32.freeswap = s.freeswap;
2719 s_32.procs = s.procs;
2720 s_32.totalhigh = s.totalhigh;
2721 s_32.freehigh = s.freehigh;
2722 s_32.mem_unit = s.mem_unit;
2723 if (copy_to_user(info, &s_32, sizeof(s_32)))
2724 return -EFAULT;
2725 return 0;
2726}
2727#endif /* CONFIG_COMPAT */