Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/kernel/sys.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/mm_inline.h>
11#include <linux/utsname.h>
12#include <linux/mman.h>
13#include <linux/reboot.h>
14#include <linux/prctl.h>
15#include <linux/highuid.h>
16#include <linux/fs.h>
17#include <linux/kmod.h>
18#include <linux/perf_event.h>
19#include <linux/resource.h>
20#include <linux/kernel.h>
21#include <linux/workqueue.h>
22#include <linux/capability.h>
23#include <linux/device.h>
24#include <linux/key.h>
25#include <linux/times.h>
26#include <linux/posix-timers.h>
27#include <linux/security.h>
28#include <linux/random.h>
29#include <linux/suspend.h>
30#include <linux/tty.h>
31#include <linux/signal.h>
32#include <linux/cn_proc.h>
33#include <linux/getcpu.h>
34#include <linux/task_io_accounting_ops.h>
35#include <linux/seccomp.h>
36#include <linux/cpu.h>
37#include <linux/personality.h>
38#include <linux/ptrace.h>
39#include <linux/fs_struct.h>
40#include <linux/file.h>
41#include <linux/mount.h>
42#include <linux/gfp.h>
43#include <linux/syscore_ops.h>
44#include <linux/version.h>
45#include <linux/ctype.h>
46#include <linux/syscall_user_dispatch.h>
47
48#include <linux/compat.h>
49#include <linux/syscalls.h>
50#include <linux/kprobes.h>
51#include <linux/user_namespace.h>
52#include <linux/time_namespace.h>
53#include <linux/binfmts.h>
54
55#include <linux/sched.h>
56#include <linux/sched/autogroup.h>
57#include <linux/sched/loadavg.h>
58#include <linux/sched/stat.h>
59#include <linux/sched/mm.h>
60#include <linux/sched/coredump.h>
61#include <linux/sched/task.h>
62#include <linux/sched/cputime.h>
63#include <linux/rcupdate.h>
64#include <linux/uidgid.h>
65#include <linux/cred.h>
66
67#include <linux/nospec.h>
68
69#include <linux/kmsg_dump.h>
70/* Move somewhere else to avoid recompiling? */
71#include <generated/utsrelease.h>
72
73#include <linux/uaccess.h>
74#include <asm/io.h>
75#include <asm/unistd.h>
76
77#include "uid16.h"
78
79#ifndef SET_UNALIGN_CTL
80# define SET_UNALIGN_CTL(a, b) (-EINVAL)
81#endif
82#ifndef GET_UNALIGN_CTL
83# define GET_UNALIGN_CTL(a, b) (-EINVAL)
84#endif
85#ifndef SET_FPEMU_CTL
86# define SET_FPEMU_CTL(a, b) (-EINVAL)
87#endif
88#ifndef GET_FPEMU_CTL
89# define GET_FPEMU_CTL(a, b) (-EINVAL)
90#endif
91#ifndef SET_FPEXC_CTL
92# define SET_FPEXC_CTL(a, b) (-EINVAL)
93#endif
94#ifndef GET_FPEXC_CTL
95# define GET_FPEXC_CTL(a, b) (-EINVAL)
96#endif
97#ifndef GET_ENDIAN
98# define GET_ENDIAN(a, b) (-EINVAL)
99#endif
100#ifndef SET_ENDIAN
101# define SET_ENDIAN(a, b) (-EINVAL)
102#endif
103#ifndef GET_TSC_CTL
104# define GET_TSC_CTL(a) (-EINVAL)
105#endif
106#ifndef SET_TSC_CTL
107# define SET_TSC_CTL(a) (-EINVAL)
108#endif
109#ifndef GET_FP_MODE
110# define GET_FP_MODE(a) (-EINVAL)
111#endif
112#ifndef SET_FP_MODE
113# define SET_FP_MODE(a,b) (-EINVAL)
114#endif
115#ifndef SVE_SET_VL
116# define SVE_SET_VL(a) (-EINVAL)
117#endif
118#ifndef SVE_GET_VL
119# define SVE_GET_VL() (-EINVAL)
120#endif
121#ifndef SME_SET_VL
122# define SME_SET_VL(a) (-EINVAL)
123#endif
124#ifndef SME_GET_VL
125# define SME_GET_VL() (-EINVAL)
126#endif
127#ifndef PAC_RESET_KEYS
128# define PAC_RESET_KEYS(a, b) (-EINVAL)
129#endif
130#ifndef PAC_SET_ENABLED_KEYS
131# define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL)
132#endif
133#ifndef PAC_GET_ENABLED_KEYS
134# define PAC_GET_ENABLED_KEYS(a) (-EINVAL)
135#endif
136#ifndef SET_TAGGED_ADDR_CTRL
137# define SET_TAGGED_ADDR_CTRL(a) (-EINVAL)
138#endif
139#ifndef GET_TAGGED_ADDR_CTRL
140# define GET_TAGGED_ADDR_CTRL() (-EINVAL)
141#endif
142
143/*
144 * this is where the system-wide overflow UID and GID are defined, for
145 * architectures that now have 32-bit UID/GID but didn't in the past
146 */
147
148int overflowuid = DEFAULT_OVERFLOWUID;
149int overflowgid = DEFAULT_OVERFLOWGID;
150
151EXPORT_SYMBOL(overflowuid);
152EXPORT_SYMBOL(overflowgid);
153
154/*
155 * the same as above, but for filesystems which can only store a 16-bit
156 * UID and GID. as such, this is needed on all architectures
157 */
158
159int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
160int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
161
162EXPORT_SYMBOL(fs_overflowuid);
163EXPORT_SYMBOL(fs_overflowgid);
164
165/*
166 * Returns true if current's euid is same as p's uid or euid,
167 * or has CAP_SYS_NICE to p's user_ns.
168 *
169 * Called with rcu_read_lock, creds are safe
170 */
171static bool set_one_prio_perm(struct task_struct *p)
172{
173 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
174
175 if (uid_eq(pcred->uid, cred->euid) ||
176 uid_eq(pcred->euid, cred->euid))
177 return true;
178 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
179 return true;
180 return false;
181}
182
183/*
184 * set the priority of a task
185 * - the caller must hold the RCU read lock
186 */
187static int set_one_prio(struct task_struct *p, int niceval, int error)
188{
189 int no_nice;
190
191 if (!set_one_prio_perm(p)) {
192 error = -EPERM;
193 goto out;
194 }
195 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
196 error = -EACCES;
197 goto out;
198 }
199 no_nice = security_task_setnice(p, niceval);
200 if (no_nice) {
201 error = no_nice;
202 goto out;
203 }
204 if (error == -ESRCH)
205 error = 0;
206 set_user_nice(p, niceval);
207out:
208 return error;
209}
210
211SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
212{
213 struct task_struct *g, *p;
214 struct user_struct *user;
215 const struct cred *cred = current_cred();
216 int error = -EINVAL;
217 struct pid *pgrp;
218 kuid_t uid;
219
220 if (which > PRIO_USER || which < PRIO_PROCESS)
221 goto out;
222
223 /* normalize: avoid signed division (rounding problems) */
224 error = -ESRCH;
225 if (niceval < MIN_NICE)
226 niceval = MIN_NICE;
227 if (niceval > MAX_NICE)
228 niceval = MAX_NICE;
229
230 rcu_read_lock();
231 switch (which) {
232 case PRIO_PROCESS:
233 if (who)
234 p = find_task_by_vpid(who);
235 else
236 p = current;
237 if (p)
238 error = set_one_prio(p, niceval, error);
239 break;
240 case PRIO_PGRP:
241 if (who)
242 pgrp = find_vpid(who);
243 else
244 pgrp = task_pgrp(current);
245 read_lock(&tasklist_lock);
246 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
247 error = set_one_prio(p, niceval, error);
248 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
249 read_unlock(&tasklist_lock);
250 break;
251 case PRIO_USER:
252 uid = make_kuid(cred->user_ns, who);
253 user = cred->user;
254 if (!who)
255 uid = cred->uid;
256 else if (!uid_eq(uid, cred->uid)) {
257 user = find_user(uid);
258 if (!user)
259 goto out_unlock; /* No processes for this user */
260 }
261 for_each_process_thread(g, p) {
262 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
263 error = set_one_prio(p, niceval, error);
264 }
265 if (!uid_eq(uid, cred->uid))
266 free_uid(user); /* For find_user() */
267 break;
268 }
269out_unlock:
270 rcu_read_unlock();
271out:
272 return error;
273}
274
275/*
276 * Ugh. To avoid negative return values, "getpriority()" will
277 * not return the normal nice-value, but a negated value that
278 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
279 * to stay compatible.
280 */
281SYSCALL_DEFINE2(getpriority, int, which, int, who)
282{
283 struct task_struct *g, *p;
284 struct user_struct *user;
285 const struct cred *cred = current_cred();
286 long niceval, retval = -ESRCH;
287 struct pid *pgrp;
288 kuid_t uid;
289
290 if (which > PRIO_USER || which < PRIO_PROCESS)
291 return -EINVAL;
292
293 rcu_read_lock();
294 switch (which) {
295 case PRIO_PROCESS:
296 if (who)
297 p = find_task_by_vpid(who);
298 else
299 p = current;
300 if (p) {
301 niceval = nice_to_rlimit(task_nice(p));
302 if (niceval > retval)
303 retval = niceval;
304 }
305 break;
306 case PRIO_PGRP:
307 if (who)
308 pgrp = find_vpid(who);
309 else
310 pgrp = task_pgrp(current);
311 read_lock(&tasklist_lock);
312 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
313 niceval = nice_to_rlimit(task_nice(p));
314 if (niceval > retval)
315 retval = niceval;
316 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
317 read_unlock(&tasklist_lock);
318 break;
319 case PRIO_USER:
320 uid = make_kuid(cred->user_ns, who);
321 user = cred->user;
322 if (!who)
323 uid = cred->uid;
324 else if (!uid_eq(uid, cred->uid)) {
325 user = find_user(uid);
326 if (!user)
327 goto out_unlock; /* No processes for this user */
328 }
329 for_each_process_thread(g, p) {
330 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
331 niceval = nice_to_rlimit(task_nice(p));
332 if (niceval > retval)
333 retval = niceval;
334 }
335 }
336 if (!uid_eq(uid, cred->uid))
337 free_uid(user); /* for find_user() */
338 break;
339 }
340out_unlock:
341 rcu_read_unlock();
342
343 return retval;
344}
345
346/*
347 * Unprivileged users may change the real gid to the effective gid
348 * or vice versa. (BSD-style)
349 *
350 * If you set the real gid at all, or set the effective gid to a value not
351 * equal to the real gid, then the saved gid is set to the new effective gid.
352 *
353 * This makes it possible for a setgid program to completely drop its
354 * privileges, which is often a useful assertion to make when you are doing
355 * a security audit over a program.
356 *
357 * The general idea is that a program which uses just setregid() will be
358 * 100% compatible with BSD. A program which uses just setgid() will be
359 * 100% compatible with POSIX with saved IDs.
360 *
361 * SMP: There are not races, the GIDs are checked only by filesystem
362 * operations (as far as semantic preservation is concerned).
363 */
364#ifdef CONFIG_MULTIUSER
365long __sys_setregid(gid_t rgid, gid_t egid)
366{
367 struct user_namespace *ns = current_user_ns();
368 const struct cred *old;
369 struct cred *new;
370 int retval;
371 kgid_t krgid, kegid;
372
373 krgid = make_kgid(ns, rgid);
374 kegid = make_kgid(ns, egid);
375
376 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
377 return -EINVAL;
378 if ((egid != (gid_t) -1) && !gid_valid(kegid))
379 return -EINVAL;
380
381 new = prepare_creds();
382 if (!new)
383 return -ENOMEM;
384 old = current_cred();
385
386 retval = -EPERM;
387 if (rgid != (gid_t) -1) {
388 if (gid_eq(old->gid, krgid) ||
389 gid_eq(old->egid, krgid) ||
390 ns_capable_setid(old->user_ns, CAP_SETGID))
391 new->gid = krgid;
392 else
393 goto error;
394 }
395 if (egid != (gid_t) -1) {
396 if (gid_eq(old->gid, kegid) ||
397 gid_eq(old->egid, kegid) ||
398 gid_eq(old->sgid, kegid) ||
399 ns_capable_setid(old->user_ns, CAP_SETGID))
400 new->egid = kegid;
401 else
402 goto error;
403 }
404
405 if (rgid != (gid_t) -1 ||
406 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
407 new->sgid = new->egid;
408 new->fsgid = new->egid;
409
410 retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
411 if (retval < 0)
412 goto error;
413
414 return commit_creds(new);
415
416error:
417 abort_creds(new);
418 return retval;
419}
420
421SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
422{
423 return __sys_setregid(rgid, egid);
424}
425
426/*
427 * setgid() is implemented like SysV w/ SAVED_IDS
428 *
429 * SMP: Same implicit races as above.
430 */
431long __sys_setgid(gid_t gid)
432{
433 struct user_namespace *ns = current_user_ns();
434 const struct cred *old;
435 struct cred *new;
436 int retval;
437 kgid_t kgid;
438
439 kgid = make_kgid(ns, gid);
440 if (!gid_valid(kgid))
441 return -EINVAL;
442
443 new = prepare_creds();
444 if (!new)
445 return -ENOMEM;
446 old = current_cred();
447
448 retval = -EPERM;
449 if (ns_capable_setid(old->user_ns, CAP_SETGID))
450 new->gid = new->egid = new->sgid = new->fsgid = kgid;
451 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
452 new->egid = new->fsgid = kgid;
453 else
454 goto error;
455
456 retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
457 if (retval < 0)
458 goto error;
459
460 return commit_creds(new);
461
462error:
463 abort_creds(new);
464 return retval;
465}
466
467SYSCALL_DEFINE1(setgid, gid_t, gid)
468{
469 return __sys_setgid(gid);
470}
471
472/*
473 * change the user struct in a credentials set to match the new UID
474 */
475static int set_user(struct cred *new)
476{
477 struct user_struct *new_user;
478
479 new_user = alloc_uid(new->uid);
480 if (!new_user)
481 return -EAGAIN;
482
483 free_uid(new->user);
484 new->user = new_user;
485 return 0;
486}
487
488static void flag_nproc_exceeded(struct cred *new)
489{
490 if (new->ucounts == current_ucounts())
491 return;
492
493 /*
494 * We don't fail in case of NPROC limit excess here because too many
495 * poorly written programs don't check set*uid() return code, assuming
496 * it never fails if called by root. We may still enforce NPROC limit
497 * for programs doing set*uid()+execve() by harmlessly deferring the
498 * failure to the execve() stage.
499 */
500 if (is_rlimit_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
501 new->user != INIT_USER)
502 current->flags |= PF_NPROC_EXCEEDED;
503 else
504 current->flags &= ~PF_NPROC_EXCEEDED;
505}
506
507/*
508 * Unprivileged users may change the real uid to the effective uid
509 * or vice versa. (BSD-style)
510 *
511 * If you set the real uid at all, or set the effective uid to a value not
512 * equal to the real uid, then the saved uid is set to the new effective uid.
513 *
514 * This makes it possible for a setuid program to completely drop its
515 * privileges, which is often a useful assertion to make when you are doing
516 * a security audit over a program.
517 *
518 * The general idea is that a program which uses just setreuid() will be
519 * 100% compatible with BSD. A program which uses just setuid() will be
520 * 100% compatible with POSIX with saved IDs.
521 */
522long __sys_setreuid(uid_t ruid, uid_t euid)
523{
524 struct user_namespace *ns = current_user_ns();
525 const struct cred *old;
526 struct cred *new;
527 int retval;
528 kuid_t kruid, keuid;
529
530 kruid = make_kuid(ns, ruid);
531 keuid = make_kuid(ns, euid);
532
533 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
534 return -EINVAL;
535 if ((euid != (uid_t) -1) && !uid_valid(keuid))
536 return -EINVAL;
537
538 new = prepare_creds();
539 if (!new)
540 return -ENOMEM;
541 old = current_cred();
542
543 retval = -EPERM;
544 if (ruid != (uid_t) -1) {
545 new->uid = kruid;
546 if (!uid_eq(old->uid, kruid) &&
547 !uid_eq(old->euid, kruid) &&
548 !ns_capable_setid(old->user_ns, CAP_SETUID))
549 goto error;
550 }
551
552 if (euid != (uid_t) -1) {
553 new->euid = keuid;
554 if (!uid_eq(old->uid, keuid) &&
555 !uid_eq(old->euid, keuid) &&
556 !uid_eq(old->suid, keuid) &&
557 !ns_capable_setid(old->user_ns, CAP_SETUID))
558 goto error;
559 }
560
561 if (!uid_eq(new->uid, old->uid)) {
562 retval = set_user(new);
563 if (retval < 0)
564 goto error;
565 }
566 if (ruid != (uid_t) -1 ||
567 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
568 new->suid = new->euid;
569 new->fsuid = new->euid;
570
571 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
572 if (retval < 0)
573 goto error;
574
575 retval = set_cred_ucounts(new);
576 if (retval < 0)
577 goto error;
578
579 flag_nproc_exceeded(new);
580 return commit_creds(new);
581
582error:
583 abort_creds(new);
584 return retval;
585}
586
587SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
588{
589 return __sys_setreuid(ruid, euid);
590}
591
592/*
593 * setuid() is implemented like SysV with SAVED_IDS
594 *
595 * Note that SAVED_ID's is deficient in that a setuid root program
596 * like sendmail, for example, cannot set its uid to be a normal
597 * user and then switch back, because if you're root, setuid() sets
598 * the saved uid too. If you don't like this, blame the bright people
599 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
600 * will allow a root program to temporarily drop privileges and be able to
601 * regain them by swapping the real and effective uid.
602 */
603long __sys_setuid(uid_t uid)
604{
605 struct user_namespace *ns = current_user_ns();
606 const struct cred *old;
607 struct cred *new;
608 int retval;
609 kuid_t kuid;
610
611 kuid = make_kuid(ns, uid);
612 if (!uid_valid(kuid))
613 return -EINVAL;
614
615 new = prepare_creds();
616 if (!new)
617 return -ENOMEM;
618 old = current_cred();
619
620 retval = -EPERM;
621 if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
622 new->suid = new->uid = kuid;
623 if (!uid_eq(kuid, old->uid)) {
624 retval = set_user(new);
625 if (retval < 0)
626 goto error;
627 }
628 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
629 goto error;
630 }
631
632 new->fsuid = new->euid = kuid;
633
634 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
635 if (retval < 0)
636 goto error;
637
638 retval = set_cred_ucounts(new);
639 if (retval < 0)
640 goto error;
641
642 flag_nproc_exceeded(new);
643 return commit_creds(new);
644
645error:
646 abort_creds(new);
647 return retval;
648}
649
650SYSCALL_DEFINE1(setuid, uid_t, uid)
651{
652 return __sys_setuid(uid);
653}
654
655
656/*
657 * This function implements a generic ability to update ruid, euid,
658 * and suid. This allows you to implement the 4.4 compatible seteuid().
659 */
660long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
661{
662 struct user_namespace *ns = current_user_ns();
663 const struct cred *old;
664 struct cred *new;
665 int retval;
666 kuid_t kruid, keuid, ksuid;
667
668 kruid = make_kuid(ns, ruid);
669 keuid = make_kuid(ns, euid);
670 ksuid = make_kuid(ns, suid);
671
672 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
673 return -EINVAL;
674
675 if ((euid != (uid_t) -1) && !uid_valid(keuid))
676 return -EINVAL;
677
678 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
679 return -EINVAL;
680
681 new = prepare_creds();
682 if (!new)
683 return -ENOMEM;
684
685 old = current_cred();
686
687 retval = -EPERM;
688 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
689 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
690 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
691 goto error;
692 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
693 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
694 goto error;
695 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
696 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
697 goto error;
698 }
699
700 if (ruid != (uid_t) -1) {
701 new->uid = kruid;
702 if (!uid_eq(kruid, old->uid)) {
703 retval = set_user(new);
704 if (retval < 0)
705 goto error;
706 }
707 }
708 if (euid != (uid_t) -1)
709 new->euid = keuid;
710 if (suid != (uid_t) -1)
711 new->suid = ksuid;
712 new->fsuid = new->euid;
713
714 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
715 if (retval < 0)
716 goto error;
717
718 retval = set_cred_ucounts(new);
719 if (retval < 0)
720 goto error;
721
722 flag_nproc_exceeded(new);
723 return commit_creds(new);
724
725error:
726 abort_creds(new);
727 return retval;
728}
729
730SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
731{
732 return __sys_setresuid(ruid, euid, suid);
733}
734
735SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
736{
737 const struct cred *cred = current_cred();
738 int retval;
739 uid_t ruid, euid, suid;
740
741 ruid = from_kuid_munged(cred->user_ns, cred->uid);
742 euid = from_kuid_munged(cred->user_ns, cred->euid);
743 suid = from_kuid_munged(cred->user_ns, cred->suid);
744
745 retval = put_user(ruid, ruidp);
746 if (!retval) {
747 retval = put_user(euid, euidp);
748 if (!retval)
749 return put_user(suid, suidp);
750 }
751 return retval;
752}
753
754/*
755 * Same as above, but for rgid, egid, sgid.
756 */
757long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
758{
759 struct user_namespace *ns = current_user_ns();
760 const struct cred *old;
761 struct cred *new;
762 int retval;
763 kgid_t krgid, kegid, ksgid;
764
765 krgid = make_kgid(ns, rgid);
766 kegid = make_kgid(ns, egid);
767 ksgid = make_kgid(ns, sgid);
768
769 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
770 return -EINVAL;
771 if ((egid != (gid_t) -1) && !gid_valid(kegid))
772 return -EINVAL;
773 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
774 return -EINVAL;
775
776 new = prepare_creds();
777 if (!new)
778 return -ENOMEM;
779 old = current_cred();
780
781 retval = -EPERM;
782 if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
783 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
784 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
785 goto error;
786 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
787 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
788 goto error;
789 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
790 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
791 goto error;
792 }
793
794 if (rgid != (gid_t) -1)
795 new->gid = krgid;
796 if (egid != (gid_t) -1)
797 new->egid = kegid;
798 if (sgid != (gid_t) -1)
799 new->sgid = ksgid;
800 new->fsgid = new->egid;
801
802 retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
803 if (retval < 0)
804 goto error;
805
806 return commit_creds(new);
807
808error:
809 abort_creds(new);
810 return retval;
811}
812
813SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
814{
815 return __sys_setresgid(rgid, egid, sgid);
816}
817
818SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
819{
820 const struct cred *cred = current_cred();
821 int retval;
822 gid_t rgid, egid, sgid;
823
824 rgid = from_kgid_munged(cred->user_ns, cred->gid);
825 egid = from_kgid_munged(cred->user_ns, cred->egid);
826 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
827
828 retval = put_user(rgid, rgidp);
829 if (!retval) {
830 retval = put_user(egid, egidp);
831 if (!retval)
832 retval = put_user(sgid, sgidp);
833 }
834
835 return retval;
836}
837
838
839/*
840 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
841 * is used for "access()" and for the NFS daemon (letting nfsd stay at
842 * whatever uid it wants to). It normally shadows "euid", except when
843 * explicitly set by setfsuid() or for access..
844 */
845long __sys_setfsuid(uid_t uid)
846{
847 const struct cred *old;
848 struct cred *new;
849 uid_t old_fsuid;
850 kuid_t kuid;
851
852 old = current_cred();
853 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
854
855 kuid = make_kuid(old->user_ns, uid);
856 if (!uid_valid(kuid))
857 return old_fsuid;
858
859 new = prepare_creds();
860 if (!new)
861 return old_fsuid;
862
863 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
864 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
865 ns_capable_setid(old->user_ns, CAP_SETUID)) {
866 if (!uid_eq(kuid, old->fsuid)) {
867 new->fsuid = kuid;
868 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
869 goto change_okay;
870 }
871 }
872
873 abort_creds(new);
874 return old_fsuid;
875
876change_okay:
877 commit_creds(new);
878 return old_fsuid;
879}
880
881SYSCALL_DEFINE1(setfsuid, uid_t, uid)
882{
883 return __sys_setfsuid(uid);
884}
885
886/*
887 * Samma på svenska..
888 */
889long __sys_setfsgid(gid_t gid)
890{
891 const struct cred *old;
892 struct cred *new;
893 gid_t old_fsgid;
894 kgid_t kgid;
895
896 old = current_cred();
897 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
898
899 kgid = make_kgid(old->user_ns, gid);
900 if (!gid_valid(kgid))
901 return old_fsgid;
902
903 new = prepare_creds();
904 if (!new)
905 return old_fsgid;
906
907 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
908 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
909 ns_capable_setid(old->user_ns, CAP_SETGID)) {
910 if (!gid_eq(kgid, old->fsgid)) {
911 new->fsgid = kgid;
912 if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
913 goto change_okay;
914 }
915 }
916
917 abort_creds(new);
918 return old_fsgid;
919
920change_okay:
921 commit_creds(new);
922 return old_fsgid;
923}
924
925SYSCALL_DEFINE1(setfsgid, gid_t, gid)
926{
927 return __sys_setfsgid(gid);
928}
929#endif /* CONFIG_MULTIUSER */
930
931/**
932 * sys_getpid - return the thread group id of the current process
933 *
934 * Note, despite the name, this returns the tgid not the pid. The tgid and
935 * the pid are identical unless CLONE_THREAD was specified on clone() in
936 * which case the tgid is the same in all threads of the same group.
937 *
938 * This is SMP safe as current->tgid does not change.
939 */
940SYSCALL_DEFINE0(getpid)
941{
942 return task_tgid_vnr(current);
943}
944
945/* Thread ID - the internal kernel "pid" */
946SYSCALL_DEFINE0(gettid)
947{
948 return task_pid_vnr(current);
949}
950
951/*
952 * Accessing ->real_parent is not SMP-safe, it could
953 * change from under us. However, we can use a stale
954 * value of ->real_parent under rcu_read_lock(), see
955 * release_task()->call_rcu(delayed_put_task_struct).
956 */
957SYSCALL_DEFINE0(getppid)
958{
959 int pid;
960
961 rcu_read_lock();
962 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
963 rcu_read_unlock();
964
965 return pid;
966}
967
968SYSCALL_DEFINE0(getuid)
969{
970 /* Only we change this so SMP safe */
971 return from_kuid_munged(current_user_ns(), current_uid());
972}
973
974SYSCALL_DEFINE0(geteuid)
975{
976 /* Only we change this so SMP safe */
977 return from_kuid_munged(current_user_ns(), current_euid());
978}
979
980SYSCALL_DEFINE0(getgid)
981{
982 /* Only we change this so SMP safe */
983 return from_kgid_munged(current_user_ns(), current_gid());
984}
985
986SYSCALL_DEFINE0(getegid)
987{
988 /* Only we change this so SMP safe */
989 return from_kgid_munged(current_user_ns(), current_egid());
990}
991
992static void do_sys_times(struct tms *tms)
993{
994 u64 tgutime, tgstime, cutime, cstime;
995
996 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
997 cutime = current->signal->cutime;
998 cstime = current->signal->cstime;
999 tms->tms_utime = nsec_to_clock_t(tgutime);
1000 tms->tms_stime = nsec_to_clock_t(tgstime);
1001 tms->tms_cutime = nsec_to_clock_t(cutime);
1002 tms->tms_cstime = nsec_to_clock_t(cstime);
1003}
1004
1005SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1006{
1007 if (tbuf) {
1008 struct tms tmp;
1009
1010 do_sys_times(&tmp);
1011 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1012 return -EFAULT;
1013 }
1014 force_successful_syscall_return();
1015 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1016}
1017
1018#ifdef CONFIG_COMPAT
1019static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
1020{
1021 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
1022}
1023
1024COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
1025{
1026 if (tbuf) {
1027 struct tms tms;
1028 struct compat_tms tmp;
1029
1030 do_sys_times(&tms);
1031 /* Convert our struct tms to the compat version. */
1032 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
1033 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
1034 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1035 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1036 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1037 return -EFAULT;
1038 }
1039 force_successful_syscall_return();
1040 return compat_jiffies_to_clock_t(jiffies);
1041}
1042#endif
1043
1044/*
1045 * This needs some heavy checking ...
1046 * I just haven't the stomach for it. I also don't fully
1047 * understand sessions/pgrp etc. Let somebody who does explain it.
1048 *
1049 * OK, I think I have the protection semantics right.... this is really
1050 * only important on a multi-user system anyway, to make sure one user
1051 * can't send a signal to a process owned by another. -TYT, 12/12/91
1052 *
1053 * !PF_FORKNOEXEC check to conform completely to POSIX.
1054 */
1055SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1056{
1057 struct task_struct *p;
1058 struct task_struct *group_leader = current->group_leader;
1059 struct pid *pgrp;
1060 int err;
1061
1062 if (!pid)
1063 pid = task_pid_vnr(group_leader);
1064 if (!pgid)
1065 pgid = pid;
1066 if (pgid < 0)
1067 return -EINVAL;
1068 rcu_read_lock();
1069
1070 /* From this point forward we keep holding onto the tasklist lock
1071 * so that our parent does not change from under us. -DaveM
1072 */
1073 write_lock_irq(&tasklist_lock);
1074
1075 err = -ESRCH;
1076 p = find_task_by_vpid(pid);
1077 if (!p)
1078 goto out;
1079
1080 err = -EINVAL;
1081 if (!thread_group_leader(p))
1082 goto out;
1083
1084 if (same_thread_group(p->real_parent, group_leader)) {
1085 err = -EPERM;
1086 if (task_session(p) != task_session(group_leader))
1087 goto out;
1088 err = -EACCES;
1089 if (!(p->flags & PF_FORKNOEXEC))
1090 goto out;
1091 } else {
1092 err = -ESRCH;
1093 if (p != group_leader)
1094 goto out;
1095 }
1096
1097 err = -EPERM;
1098 if (p->signal->leader)
1099 goto out;
1100
1101 pgrp = task_pid(p);
1102 if (pgid != pid) {
1103 struct task_struct *g;
1104
1105 pgrp = find_vpid(pgid);
1106 g = pid_task(pgrp, PIDTYPE_PGID);
1107 if (!g || task_session(g) != task_session(group_leader))
1108 goto out;
1109 }
1110
1111 err = security_task_setpgid(p, pgid);
1112 if (err)
1113 goto out;
1114
1115 if (task_pgrp(p) != pgrp)
1116 change_pid(p, PIDTYPE_PGID, pgrp);
1117
1118 err = 0;
1119out:
1120 /* All paths lead to here, thus we are safe. -DaveM */
1121 write_unlock_irq(&tasklist_lock);
1122 rcu_read_unlock();
1123 return err;
1124}
1125
1126static int do_getpgid(pid_t pid)
1127{
1128 struct task_struct *p;
1129 struct pid *grp;
1130 int retval;
1131
1132 rcu_read_lock();
1133 if (!pid)
1134 grp = task_pgrp(current);
1135 else {
1136 retval = -ESRCH;
1137 p = find_task_by_vpid(pid);
1138 if (!p)
1139 goto out;
1140 grp = task_pgrp(p);
1141 if (!grp)
1142 goto out;
1143
1144 retval = security_task_getpgid(p);
1145 if (retval)
1146 goto out;
1147 }
1148 retval = pid_vnr(grp);
1149out:
1150 rcu_read_unlock();
1151 return retval;
1152}
1153
1154SYSCALL_DEFINE1(getpgid, pid_t, pid)
1155{
1156 return do_getpgid(pid);
1157}
1158
1159#ifdef __ARCH_WANT_SYS_GETPGRP
1160
1161SYSCALL_DEFINE0(getpgrp)
1162{
1163 return do_getpgid(0);
1164}
1165
1166#endif
1167
1168SYSCALL_DEFINE1(getsid, pid_t, pid)
1169{
1170 struct task_struct *p;
1171 struct pid *sid;
1172 int retval;
1173
1174 rcu_read_lock();
1175 if (!pid)
1176 sid = task_session(current);
1177 else {
1178 retval = -ESRCH;
1179 p = find_task_by_vpid(pid);
1180 if (!p)
1181 goto out;
1182 sid = task_session(p);
1183 if (!sid)
1184 goto out;
1185
1186 retval = security_task_getsid(p);
1187 if (retval)
1188 goto out;
1189 }
1190 retval = pid_vnr(sid);
1191out:
1192 rcu_read_unlock();
1193 return retval;
1194}
1195
1196static void set_special_pids(struct pid *pid)
1197{
1198 struct task_struct *curr = current->group_leader;
1199
1200 if (task_session(curr) != pid)
1201 change_pid(curr, PIDTYPE_SID, pid);
1202
1203 if (task_pgrp(curr) != pid)
1204 change_pid(curr, PIDTYPE_PGID, pid);
1205}
1206
1207int ksys_setsid(void)
1208{
1209 struct task_struct *group_leader = current->group_leader;
1210 struct pid *sid = task_pid(group_leader);
1211 pid_t session = pid_vnr(sid);
1212 int err = -EPERM;
1213
1214 write_lock_irq(&tasklist_lock);
1215 /* Fail if I am already a session leader */
1216 if (group_leader->signal->leader)
1217 goto out;
1218
1219 /* Fail if a process group id already exists that equals the
1220 * proposed session id.
1221 */
1222 if (pid_task(sid, PIDTYPE_PGID))
1223 goto out;
1224
1225 group_leader->signal->leader = 1;
1226 set_special_pids(sid);
1227
1228 proc_clear_tty(group_leader);
1229
1230 err = session;
1231out:
1232 write_unlock_irq(&tasklist_lock);
1233 if (err > 0) {
1234 proc_sid_connector(group_leader);
1235 sched_autogroup_create_attach(group_leader);
1236 }
1237 return err;
1238}
1239
1240SYSCALL_DEFINE0(setsid)
1241{
1242 return ksys_setsid();
1243}
1244
1245DECLARE_RWSEM(uts_sem);
1246
1247#ifdef COMPAT_UTS_MACHINE
1248#define override_architecture(name) \
1249 (personality(current->personality) == PER_LINUX32 && \
1250 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1251 sizeof(COMPAT_UTS_MACHINE)))
1252#else
1253#define override_architecture(name) 0
1254#endif
1255
1256/*
1257 * Work around broken programs that cannot handle "Linux 3.0".
1258 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1259 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1260 * 2.6.60.
1261 */
1262static int override_release(char __user *release, size_t len)
1263{
1264 int ret = 0;
1265
1266 if (current->personality & UNAME26) {
1267 const char *rest = UTS_RELEASE;
1268 char buf[65] = { 0 };
1269 int ndots = 0;
1270 unsigned v;
1271 size_t copy;
1272
1273 while (*rest) {
1274 if (*rest == '.' && ++ndots >= 3)
1275 break;
1276 if (!isdigit(*rest) && *rest != '.')
1277 break;
1278 rest++;
1279 }
1280 v = LINUX_VERSION_PATCHLEVEL + 60;
1281 copy = clamp_t(size_t, len, 1, sizeof(buf));
1282 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1283 ret = copy_to_user(release, buf, copy + 1);
1284 }
1285 return ret;
1286}
1287
1288SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1289{
1290 struct new_utsname tmp;
1291
1292 down_read(&uts_sem);
1293 memcpy(&tmp, utsname(), sizeof(tmp));
1294 up_read(&uts_sem);
1295 if (copy_to_user(name, &tmp, sizeof(tmp)))
1296 return -EFAULT;
1297
1298 if (override_release(name->release, sizeof(name->release)))
1299 return -EFAULT;
1300 if (override_architecture(name))
1301 return -EFAULT;
1302 return 0;
1303}
1304
1305#ifdef __ARCH_WANT_SYS_OLD_UNAME
1306/*
1307 * Old cruft
1308 */
1309SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1310{
1311 struct old_utsname tmp;
1312
1313 if (!name)
1314 return -EFAULT;
1315
1316 down_read(&uts_sem);
1317 memcpy(&tmp, utsname(), sizeof(tmp));
1318 up_read(&uts_sem);
1319 if (copy_to_user(name, &tmp, sizeof(tmp)))
1320 return -EFAULT;
1321
1322 if (override_release(name->release, sizeof(name->release)))
1323 return -EFAULT;
1324 if (override_architecture(name))
1325 return -EFAULT;
1326 return 0;
1327}
1328
1329SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1330{
1331 struct oldold_utsname tmp;
1332
1333 if (!name)
1334 return -EFAULT;
1335
1336 memset(&tmp, 0, sizeof(tmp));
1337
1338 down_read(&uts_sem);
1339 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1340 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1341 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1342 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1343 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1344 up_read(&uts_sem);
1345 if (copy_to_user(name, &tmp, sizeof(tmp)))
1346 return -EFAULT;
1347
1348 if (override_architecture(name))
1349 return -EFAULT;
1350 if (override_release(name->release, sizeof(name->release)))
1351 return -EFAULT;
1352 return 0;
1353}
1354#endif
1355
1356SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1357{
1358 int errno;
1359 char tmp[__NEW_UTS_LEN];
1360
1361 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1362 return -EPERM;
1363
1364 if (len < 0 || len > __NEW_UTS_LEN)
1365 return -EINVAL;
1366 errno = -EFAULT;
1367 if (!copy_from_user(tmp, name, len)) {
1368 struct new_utsname *u;
1369
1370 add_device_randomness(tmp, len);
1371 down_write(&uts_sem);
1372 u = utsname();
1373 memcpy(u->nodename, tmp, len);
1374 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1375 errno = 0;
1376 uts_proc_notify(UTS_PROC_HOSTNAME);
1377 up_write(&uts_sem);
1378 }
1379 return errno;
1380}
1381
1382#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1383
1384SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1385{
1386 int i;
1387 struct new_utsname *u;
1388 char tmp[__NEW_UTS_LEN + 1];
1389
1390 if (len < 0)
1391 return -EINVAL;
1392 down_read(&uts_sem);
1393 u = utsname();
1394 i = 1 + strlen(u->nodename);
1395 if (i > len)
1396 i = len;
1397 memcpy(tmp, u->nodename, i);
1398 up_read(&uts_sem);
1399 if (copy_to_user(name, tmp, i))
1400 return -EFAULT;
1401 return 0;
1402}
1403
1404#endif
1405
1406/*
1407 * Only setdomainname; getdomainname can be implemented by calling
1408 * uname()
1409 */
1410SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1411{
1412 int errno;
1413 char tmp[__NEW_UTS_LEN];
1414
1415 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1416 return -EPERM;
1417 if (len < 0 || len > __NEW_UTS_LEN)
1418 return -EINVAL;
1419
1420 errno = -EFAULT;
1421 if (!copy_from_user(tmp, name, len)) {
1422 struct new_utsname *u;
1423
1424 add_device_randomness(tmp, len);
1425 down_write(&uts_sem);
1426 u = utsname();
1427 memcpy(u->domainname, tmp, len);
1428 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1429 errno = 0;
1430 uts_proc_notify(UTS_PROC_DOMAINNAME);
1431 up_write(&uts_sem);
1432 }
1433 return errno;
1434}
1435
1436/* make sure you are allowed to change @tsk limits before calling this */
1437static int do_prlimit(struct task_struct *tsk, unsigned int resource,
1438 struct rlimit *new_rlim, struct rlimit *old_rlim)
1439{
1440 struct rlimit *rlim;
1441 int retval = 0;
1442
1443 if (resource >= RLIM_NLIMITS)
1444 return -EINVAL;
1445 resource = array_index_nospec(resource, RLIM_NLIMITS);
1446
1447 if (new_rlim) {
1448 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1449 return -EINVAL;
1450 if (resource == RLIMIT_NOFILE &&
1451 new_rlim->rlim_max > sysctl_nr_open)
1452 return -EPERM;
1453 }
1454
1455 /* Holding a refcount on tsk protects tsk->signal from disappearing. */
1456 rlim = tsk->signal->rlim + resource;
1457 task_lock(tsk->group_leader);
1458 if (new_rlim) {
1459 /*
1460 * Keep the capable check against init_user_ns until cgroups can
1461 * contain all limits.
1462 */
1463 if (new_rlim->rlim_max > rlim->rlim_max &&
1464 !capable(CAP_SYS_RESOURCE))
1465 retval = -EPERM;
1466 if (!retval)
1467 retval = security_task_setrlimit(tsk, resource, new_rlim);
1468 }
1469 if (!retval) {
1470 if (old_rlim)
1471 *old_rlim = *rlim;
1472 if (new_rlim)
1473 *rlim = *new_rlim;
1474 }
1475 task_unlock(tsk->group_leader);
1476
1477 /*
1478 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1479 * infinite. In case of RLIM_INFINITY the posix CPU timer code
1480 * ignores the rlimit.
1481 */
1482 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1483 new_rlim->rlim_cur != RLIM_INFINITY &&
1484 IS_ENABLED(CONFIG_POSIX_TIMERS)) {
1485 /*
1486 * update_rlimit_cpu can fail if the task is exiting, but there
1487 * may be other tasks in the thread group that are not exiting,
1488 * and they need their cpu timers adjusted.
1489 *
1490 * The group_leader is the last task to be released, so if we
1491 * cannot update_rlimit_cpu on it, then the entire process is
1492 * exiting and we do not need to update at all.
1493 */
1494 update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur);
1495 }
1496
1497 return retval;
1498}
1499
1500SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1501{
1502 struct rlimit value;
1503 int ret;
1504
1505 ret = do_prlimit(current, resource, NULL, &value);
1506 if (!ret)
1507 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1508
1509 return ret;
1510}
1511
1512#ifdef CONFIG_COMPAT
1513
1514COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1515 struct compat_rlimit __user *, rlim)
1516{
1517 struct rlimit r;
1518 struct compat_rlimit r32;
1519
1520 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1521 return -EFAULT;
1522
1523 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1524 r.rlim_cur = RLIM_INFINITY;
1525 else
1526 r.rlim_cur = r32.rlim_cur;
1527 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1528 r.rlim_max = RLIM_INFINITY;
1529 else
1530 r.rlim_max = r32.rlim_max;
1531 return do_prlimit(current, resource, &r, NULL);
1532}
1533
1534COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1535 struct compat_rlimit __user *, rlim)
1536{
1537 struct rlimit r;
1538 int ret;
1539
1540 ret = do_prlimit(current, resource, NULL, &r);
1541 if (!ret) {
1542 struct compat_rlimit r32;
1543 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1544 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1545 else
1546 r32.rlim_cur = r.rlim_cur;
1547 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1548 r32.rlim_max = COMPAT_RLIM_INFINITY;
1549 else
1550 r32.rlim_max = r.rlim_max;
1551
1552 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1553 return -EFAULT;
1554 }
1555 return ret;
1556}
1557
1558#endif
1559
1560#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1561
1562/*
1563 * Back compatibility for getrlimit. Needed for some apps.
1564 */
1565SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1566 struct rlimit __user *, rlim)
1567{
1568 struct rlimit x;
1569 if (resource >= RLIM_NLIMITS)
1570 return -EINVAL;
1571
1572 resource = array_index_nospec(resource, RLIM_NLIMITS);
1573 task_lock(current->group_leader);
1574 x = current->signal->rlim[resource];
1575 task_unlock(current->group_leader);
1576 if (x.rlim_cur > 0x7FFFFFFF)
1577 x.rlim_cur = 0x7FFFFFFF;
1578 if (x.rlim_max > 0x7FFFFFFF)
1579 x.rlim_max = 0x7FFFFFFF;
1580 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1581}
1582
1583#ifdef CONFIG_COMPAT
1584COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1585 struct compat_rlimit __user *, rlim)
1586{
1587 struct rlimit r;
1588
1589 if (resource >= RLIM_NLIMITS)
1590 return -EINVAL;
1591
1592 resource = array_index_nospec(resource, RLIM_NLIMITS);
1593 task_lock(current->group_leader);
1594 r = current->signal->rlim[resource];
1595 task_unlock(current->group_leader);
1596 if (r.rlim_cur > 0x7FFFFFFF)
1597 r.rlim_cur = 0x7FFFFFFF;
1598 if (r.rlim_max > 0x7FFFFFFF)
1599 r.rlim_max = 0x7FFFFFFF;
1600
1601 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1602 put_user(r.rlim_max, &rlim->rlim_max))
1603 return -EFAULT;
1604 return 0;
1605}
1606#endif
1607
1608#endif
1609
1610static inline bool rlim64_is_infinity(__u64 rlim64)
1611{
1612#if BITS_PER_LONG < 64
1613 return rlim64 >= ULONG_MAX;
1614#else
1615 return rlim64 == RLIM64_INFINITY;
1616#endif
1617}
1618
1619static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1620{
1621 if (rlim->rlim_cur == RLIM_INFINITY)
1622 rlim64->rlim_cur = RLIM64_INFINITY;
1623 else
1624 rlim64->rlim_cur = rlim->rlim_cur;
1625 if (rlim->rlim_max == RLIM_INFINITY)
1626 rlim64->rlim_max = RLIM64_INFINITY;
1627 else
1628 rlim64->rlim_max = rlim->rlim_max;
1629}
1630
1631static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1632{
1633 if (rlim64_is_infinity(rlim64->rlim_cur))
1634 rlim->rlim_cur = RLIM_INFINITY;
1635 else
1636 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1637 if (rlim64_is_infinity(rlim64->rlim_max))
1638 rlim->rlim_max = RLIM_INFINITY;
1639 else
1640 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1641}
1642
1643/* rcu lock must be held */
1644static int check_prlimit_permission(struct task_struct *task,
1645 unsigned int flags)
1646{
1647 const struct cred *cred = current_cred(), *tcred;
1648 bool id_match;
1649
1650 if (current == task)
1651 return 0;
1652
1653 tcred = __task_cred(task);
1654 id_match = (uid_eq(cred->uid, tcred->euid) &&
1655 uid_eq(cred->uid, tcred->suid) &&
1656 uid_eq(cred->uid, tcred->uid) &&
1657 gid_eq(cred->gid, tcred->egid) &&
1658 gid_eq(cred->gid, tcred->sgid) &&
1659 gid_eq(cred->gid, tcred->gid));
1660 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1661 return -EPERM;
1662
1663 return security_task_prlimit(cred, tcred, flags);
1664}
1665
1666SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1667 const struct rlimit64 __user *, new_rlim,
1668 struct rlimit64 __user *, old_rlim)
1669{
1670 struct rlimit64 old64, new64;
1671 struct rlimit old, new;
1672 struct task_struct *tsk;
1673 unsigned int checkflags = 0;
1674 int ret;
1675
1676 if (old_rlim)
1677 checkflags |= LSM_PRLIMIT_READ;
1678
1679 if (new_rlim) {
1680 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1681 return -EFAULT;
1682 rlim64_to_rlim(&new64, &new);
1683 checkflags |= LSM_PRLIMIT_WRITE;
1684 }
1685
1686 rcu_read_lock();
1687 tsk = pid ? find_task_by_vpid(pid) : current;
1688 if (!tsk) {
1689 rcu_read_unlock();
1690 return -ESRCH;
1691 }
1692 ret = check_prlimit_permission(tsk, checkflags);
1693 if (ret) {
1694 rcu_read_unlock();
1695 return ret;
1696 }
1697 get_task_struct(tsk);
1698 rcu_read_unlock();
1699
1700 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1701 old_rlim ? &old : NULL);
1702
1703 if (!ret && old_rlim) {
1704 rlim_to_rlim64(&old, &old64);
1705 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1706 ret = -EFAULT;
1707 }
1708
1709 put_task_struct(tsk);
1710 return ret;
1711}
1712
1713SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1714{
1715 struct rlimit new_rlim;
1716
1717 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1718 return -EFAULT;
1719 return do_prlimit(current, resource, &new_rlim, NULL);
1720}
1721
1722/*
1723 * It would make sense to put struct rusage in the task_struct,
1724 * except that would make the task_struct be *really big*. After
1725 * task_struct gets moved into malloc'ed memory, it would
1726 * make sense to do this. It will make moving the rest of the information
1727 * a lot simpler! (Which we're not doing right now because we're not
1728 * measuring them yet).
1729 *
1730 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1731 * races with threads incrementing their own counters. But since word
1732 * reads are atomic, we either get new values or old values and we don't
1733 * care which for the sums. We always take the siglock to protect reading
1734 * the c* fields from p->signal from races with exit.c updating those
1735 * fields when reaping, so a sample either gets all the additions of a
1736 * given child after it's reaped, or none so this sample is before reaping.
1737 *
1738 * Locking:
1739 * We need to take the siglock for CHILDEREN, SELF and BOTH
1740 * for the cases current multithreaded, non-current single threaded
1741 * non-current multithreaded. Thread traversal is now safe with
1742 * the siglock held.
1743 * Strictly speaking, we donot need to take the siglock if we are current and
1744 * single threaded, as no one else can take our signal_struct away, no one
1745 * else can reap the children to update signal->c* counters, and no one else
1746 * can race with the signal-> fields. If we do not take any lock, the
1747 * signal-> fields could be read out of order while another thread was just
1748 * exiting. So we should place a read memory barrier when we avoid the lock.
1749 * On the writer side, write memory barrier is implied in __exit_signal
1750 * as __exit_signal releases the siglock spinlock after updating the signal->
1751 * fields. But we don't do this yet to keep things simple.
1752 *
1753 */
1754
1755static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1756{
1757 r->ru_nvcsw += t->nvcsw;
1758 r->ru_nivcsw += t->nivcsw;
1759 r->ru_minflt += t->min_flt;
1760 r->ru_majflt += t->maj_flt;
1761 r->ru_inblock += task_io_get_inblock(t);
1762 r->ru_oublock += task_io_get_oublock(t);
1763}
1764
1765void getrusage(struct task_struct *p, int who, struct rusage *r)
1766{
1767 struct task_struct *t;
1768 unsigned long flags;
1769 u64 tgutime, tgstime, utime, stime;
1770 unsigned long maxrss = 0;
1771
1772 memset((char *)r, 0, sizeof (*r));
1773 utime = stime = 0;
1774
1775 if (who == RUSAGE_THREAD) {
1776 task_cputime_adjusted(current, &utime, &stime);
1777 accumulate_thread_rusage(p, r);
1778 maxrss = p->signal->maxrss;
1779 goto out;
1780 }
1781
1782 if (!lock_task_sighand(p, &flags))
1783 return;
1784
1785 switch (who) {
1786 case RUSAGE_BOTH:
1787 case RUSAGE_CHILDREN:
1788 utime = p->signal->cutime;
1789 stime = p->signal->cstime;
1790 r->ru_nvcsw = p->signal->cnvcsw;
1791 r->ru_nivcsw = p->signal->cnivcsw;
1792 r->ru_minflt = p->signal->cmin_flt;
1793 r->ru_majflt = p->signal->cmaj_flt;
1794 r->ru_inblock = p->signal->cinblock;
1795 r->ru_oublock = p->signal->coublock;
1796 maxrss = p->signal->cmaxrss;
1797
1798 if (who == RUSAGE_CHILDREN)
1799 break;
1800 fallthrough;
1801
1802 case RUSAGE_SELF:
1803 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1804 utime += tgutime;
1805 stime += tgstime;
1806 r->ru_nvcsw += p->signal->nvcsw;
1807 r->ru_nivcsw += p->signal->nivcsw;
1808 r->ru_minflt += p->signal->min_flt;
1809 r->ru_majflt += p->signal->maj_flt;
1810 r->ru_inblock += p->signal->inblock;
1811 r->ru_oublock += p->signal->oublock;
1812 if (maxrss < p->signal->maxrss)
1813 maxrss = p->signal->maxrss;
1814 t = p;
1815 do {
1816 accumulate_thread_rusage(t, r);
1817 } while_each_thread(p, t);
1818 break;
1819
1820 default:
1821 BUG();
1822 }
1823 unlock_task_sighand(p, &flags);
1824
1825out:
1826 r->ru_utime = ns_to_kernel_old_timeval(utime);
1827 r->ru_stime = ns_to_kernel_old_timeval(stime);
1828
1829 if (who != RUSAGE_CHILDREN) {
1830 struct mm_struct *mm = get_task_mm(p);
1831
1832 if (mm) {
1833 setmax_mm_hiwater_rss(&maxrss, mm);
1834 mmput(mm);
1835 }
1836 }
1837 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1838}
1839
1840SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1841{
1842 struct rusage r;
1843
1844 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1845 who != RUSAGE_THREAD)
1846 return -EINVAL;
1847
1848 getrusage(current, who, &r);
1849 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1850}
1851
1852#ifdef CONFIG_COMPAT
1853COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1854{
1855 struct rusage r;
1856
1857 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1858 who != RUSAGE_THREAD)
1859 return -EINVAL;
1860
1861 getrusage(current, who, &r);
1862 return put_compat_rusage(&r, ru);
1863}
1864#endif
1865
1866SYSCALL_DEFINE1(umask, int, mask)
1867{
1868 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1869 return mask;
1870}
1871
1872static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1873{
1874 struct fd exe;
1875 struct inode *inode;
1876 int err;
1877
1878 exe = fdget(fd);
1879 if (!exe.file)
1880 return -EBADF;
1881
1882 inode = file_inode(exe.file);
1883
1884 /*
1885 * Because the original mm->exe_file points to executable file, make
1886 * sure that this one is executable as well, to avoid breaking an
1887 * overall picture.
1888 */
1889 err = -EACCES;
1890 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1891 goto exit;
1892
1893 err = file_permission(exe.file, MAY_EXEC);
1894 if (err)
1895 goto exit;
1896
1897 err = replace_mm_exe_file(mm, exe.file);
1898exit:
1899 fdput(exe);
1900 return err;
1901}
1902
1903/*
1904 * Check arithmetic relations of passed addresses.
1905 *
1906 * WARNING: we don't require any capability here so be very careful
1907 * in what is allowed for modification from userspace.
1908 */
1909static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1910{
1911 unsigned long mmap_max_addr = TASK_SIZE;
1912 int error = -EINVAL, i;
1913
1914 static const unsigned char offsets[] = {
1915 offsetof(struct prctl_mm_map, start_code),
1916 offsetof(struct prctl_mm_map, end_code),
1917 offsetof(struct prctl_mm_map, start_data),
1918 offsetof(struct prctl_mm_map, end_data),
1919 offsetof(struct prctl_mm_map, start_brk),
1920 offsetof(struct prctl_mm_map, brk),
1921 offsetof(struct prctl_mm_map, start_stack),
1922 offsetof(struct prctl_mm_map, arg_start),
1923 offsetof(struct prctl_mm_map, arg_end),
1924 offsetof(struct prctl_mm_map, env_start),
1925 offsetof(struct prctl_mm_map, env_end),
1926 };
1927
1928 /*
1929 * Make sure the members are not somewhere outside
1930 * of allowed address space.
1931 */
1932 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1933 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1934
1935 if ((unsigned long)val >= mmap_max_addr ||
1936 (unsigned long)val < mmap_min_addr)
1937 goto out;
1938 }
1939
1940 /*
1941 * Make sure the pairs are ordered.
1942 */
1943#define __prctl_check_order(__m1, __op, __m2) \
1944 ((unsigned long)prctl_map->__m1 __op \
1945 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1946 error = __prctl_check_order(start_code, <, end_code);
1947 error |= __prctl_check_order(start_data,<=, end_data);
1948 error |= __prctl_check_order(start_brk, <=, brk);
1949 error |= __prctl_check_order(arg_start, <=, arg_end);
1950 error |= __prctl_check_order(env_start, <=, env_end);
1951 if (error)
1952 goto out;
1953#undef __prctl_check_order
1954
1955 error = -EINVAL;
1956
1957 /*
1958 * Neither we should allow to override limits if they set.
1959 */
1960 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1961 prctl_map->start_brk, prctl_map->end_data,
1962 prctl_map->start_data))
1963 goto out;
1964
1965 error = 0;
1966out:
1967 return error;
1968}
1969
1970#ifdef CONFIG_CHECKPOINT_RESTORE
1971static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1972{
1973 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1974 unsigned long user_auxv[AT_VECTOR_SIZE];
1975 struct mm_struct *mm = current->mm;
1976 int error;
1977
1978 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1979 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1980
1981 if (opt == PR_SET_MM_MAP_SIZE)
1982 return put_user((unsigned int)sizeof(prctl_map),
1983 (unsigned int __user *)addr);
1984
1985 if (data_size != sizeof(prctl_map))
1986 return -EINVAL;
1987
1988 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1989 return -EFAULT;
1990
1991 error = validate_prctl_map_addr(&prctl_map);
1992 if (error)
1993 return error;
1994
1995 if (prctl_map.auxv_size) {
1996 /*
1997 * Someone is trying to cheat the auxv vector.
1998 */
1999 if (!prctl_map.auxv ||
2000 prctl_map.auxv_size > sizeof(mm->saved_auxv))
2001 return -EINVAL;
2002
2003 memset(user_auxv, 0, sizeof(user_auxv));
2004 if (copy_from_user(user_auxv,
2005 (const void __user *)prctl_map.auxv,
2006 prctl_map.auxv_size))
2007 return -EFAULT;
2008
2009 /* Last entry must be AT_NULL as specification requires */
2010 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2011 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2012 }
2013
2014 if (prctl_map.exe_fd != (u32)-1) {
2015 /*
2016 * Check if the current user is checkpoint/restore capable.
2017 * At the time of this writing, it checks for CAP_SYS_ADMIN
2018 * or CAP_CHECKPOINT_RESTORE.
2019 * Note that a user with access to ptrace can masquerade an
2020 * arbitrary program as any executable, even setuid ones.
2021 * This may have implications in the tomoyo subsystem.
2022 */
2023 if (!checkpoint_restore_ns_capable(current_user_ns()))
2024 return -EPERM;
2025
2026 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2027 if (error)
2028 return error;
2029 }
2030
2031 /*
2032 * arg_lock protects concurrent updates but we still need mmap_lock for
2033 * read to exclude races with sys_brk.
2034 */
2035 mmap_read_lock(mm);
2036
2037 /*
2038 * We don't validate if these members are pointing to
2039 * real present VMAs because application may have correspond
2040 * VMAs already unmapped and kernel uses these members for statistics
2041 * output in procfs mostly, except
2042 *
2043 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2044 * for VMAs when updating these members so anything wrong written
2045 * here cause kernel to swear at userspace program but won't lead
2046 * to any problem in kernel itself
2047 */
2048
2049 spin_lock(&mm->arg_lock);
2050 mm->start_code = prctl_map.start_code;
2051 mm->end_code = prctl_map.end_code;
2052 mm->start_data = prctl_map.start_data;
2053 mm->end_data = prctl_map.end_data;
2054 mm->start_brk = prctl_map.start_brk;
2055 mm->brk = prctl_map.brk;
2056 mm->start_stack = prctl_map.start_stack;
2057 mm->arg_start = prctl_map.arg_start;
2058 mm->arg_end = prctl_map.arg_end;
2059 mm->env_start = prctl_map.env_start;
2060 mm->env_end = prctl_map.env_end;
2061 spin_unlock(&mm->arg_lock);
2062
2063 /*
2064 * Note this update of @saved_auxv is lockless thus
2065 * if someone reads this member in procfs while we're
2066 * updating -- it may get partly updated results. It's
2067 * known and acceptable trade off: we leave it as is to
2068 * not introduce additional locks here making the kernel
2069 * more complex.
2070 */
2071 if (prctl_map.auxv_size)
2072 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2073
2074 mmap_read_unlock(mm);
2075 return 0;
2076}
2077#endif /* CONFIG_CHECKPOINT_RESTORE */
2078
2079static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2080 unsigned long len)
2081{
2082 /*
2083 * This doesn't move the auxiliary vector itself since it's pinned to
2084 * mm_struct, but it permits filling the vector with new values. It's
2085 * up to the caller to provide sane values here, otherwise userspace
2086 * tools which use this vector might be unhappy.
2087 */
2088 unsigned long user_auxv[AT_VECTOR_SIZE] = {};
2089
2090 if (len > sizeof(user_auxv))
2091 return -EINVAL;
2092
2093 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2094 return -EFAULT;
2095
2096 /* Make sure the last entry is always AT_NULL */
2097 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2098 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2099
2100 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2101
2102 task_lock(current);
2103 memcpy(mm->saved_auxv, user_auxv, len);
2104 task_unlock(current);
2105
2106 return 0;
2107}
2108
2109static int prctl_set_mm(int opt, unsigned long addr,
2110 unsigned long arg4, unsigned long arg5)
2111{
2112 struct mm_struct *mm = current->mm;
2113 struct prctl_mm_map prctl_map = {
2114 .auxv = NULL,
2115 .auxv_size = 0,
2116 .exe_fd = -1,
2117 };
2118 struct vm_area_struct *vma;
2119 int error;
2120
2121 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2122 opt != PR_SET_MM_MAP &&
2123 opt != PR_SET_MM_MAP_SIZE)))
2124 return -EINVAL;
2125
2126#ifdef CONFIG_CHECKPOINT_RESTORE
2127 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2128 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2129#endif
2130
2131 if (!capable(CAP_SYS_RESOURCE))
2132 return -EPERM;
2133
2134 if (opt == PR_SET_MM_EXE_FILE)
2135 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2136
2137 if (opt == PR_SET_MM_AUXV)
2138 return prctl_set_auxv(mm, addr, arg4);
2139
2140 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2141 return -EINVAL;
2142
2143 error = -EINVAL;
2144
2145 /*
2146 * arg_lock protects concurrent updates of arg boundaries, we need
2147 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2148 * validation.
2149 */
2150 mmap_read_lock(mm);
2151 vma = find_vma(mm, addr);
2152
2153 spin_lock(&mm->arg_lock);
2154 prctl_map.start_code = mm->start_code;
2155 prctl_map.end_code = mm->end_code;
2156 prctl_map.start_data = mm->start_data;
2157 prctl_map.end_data = mm->end_data;
2158 prctl_map.start_brk = mm->start_brk;
2159 prctl_map.brk = mm->brk;
2160 prctl_map.start_stack = mm->start_stack;
2161 prctl_map.arg_start = mm->arg_start;
2162 prctl_map.arg_end = mm->arg_end;
2163 prctl_map.env_start = mm->env_start;
2164 prctl_map.env_end = mm->env_end;
2165
2166 switch (opt) {
2167 case PR_SET_MM_START_CODE:
2168 prctl_map.start_code = addr;
2169 break;
2170 case PR_SET_MM_END_CODE:
2171 prctl_map.end_code = addr;
2172 break;
2173 case PR_SET_MM_START_DATA:
2174 prctl_map.start_data = addr;
2175 break;
2176 case PR_SET_MM_END_DATA:
2177 prctl_map.end_data = addr;
2178 break;
2179 case PR_SET_MM_START_STACK:
2180 prctl_map.start_stack = addr;
2181 break;
2182 case PR_SET_MM_START_BRK:
2183 prctl_map.start_brk = addr;
2184 break;
2185 case PR_SET_MM_BRK:
2186 prctl_map.brk = addr;
2187 break;
2188 case PR_SET_MM_ARG_START:
2189 prctl_map.arg_start = addr;
2190 break;
2191 case PR_SET_MM_ARG_END:
2192 prctl_map.arg_end = addr;
2193 break;
2194 case PR_SET_MM_ENV_START:
2195 prctl_map.env_start = addr;
2196 break;
2197 case PR_SET_MM_ENV_END:
2198 prctl_map.env_end = addr;
2199 break;
2200 default:
2201 goto out;
2202 }
2203
2204 error = validate_prctl_map_addr(&prctl_map);
2205 if (error)
2206 goto out;
2207
2208 switch (opt) {
2209 /*
2210 * If command line arguments and environment
2211 * are placed somewhere else on stack, we can
2212 * set them up here, ARG_START/END to setup
2213 * command line arguments and ENV_START/END
2214 * for environment.
2215 */
2216 case PR_SET_MM_START_STACK:
2217 case PR_SET_MM_ARG_START:
2218 case PR_SET_MM_ARG_END:
2219 case PR_SET_MM_ENV_START:
2220 case PR_SET_MM_ENV_END:
2221 if (!vma) {
2222 error = -EFAULT;
2223 goto out;
2224 }
2225 }
2226
2227 mm->start_code = prctl_map.start_code;
2228 mm->end_code = prctl_map.end_code;
2229 mm->start_data = prctl_map.start_data;
2230 mm->end_data = prctl_map.end_data;
2231 mm->start_brk = prctl_map.start_brk;
2232 mm->brk = prctl_map.brk;
2233 mm->start_stack = prctl_map.start_stack;
2234 mm->arg_start = prctl_map.arg_start;
2235 mm->arg_end = prctl_map.arg_end;
2236 mm->env_start = prctl_map.env_start;
2237 mm->env_end = prctl_map.env_end;
2238
2239 error = 0;
2240out:
2241 spin_unlock(&mm->arg_lock);
2242 mmap_read_unlock(mm);
2243 return error;
2244}
2245
2246#ifdef CONFIG_CHECKPOINT_RESTORE
2247static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2248{
2249 return put_user(me->clear_child_tid, tid_addr);
2250}
2251#else
2252static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2253{
2254 return -EINVAL;
2255}
2256#endif
2257
2258static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2259{
2260 /*
2261 * If task has has_child_subreaper - all its descendants
2262 * already have these flag too and new descendants will
2263 * inherit it on fork, skip them.
2264 *
2265 * If we've found child_reaper - skip descendants in
2266 * it's subtree as they will never get out pidns.
2267 */
2268 if (p->signal->has_child_subreaper ||
2269 is_child_reaper(task_pid(p)))
2270 return 0;
2271
2272 p->signal->has_child_subreaper = 1;
2273 return 1;
2274}
2275
2276int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2277{
2278 return -EINVAL;
2279}
2280
2281int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2282 unsigned long ctrl)
2283{
2284 return -EINVAL;
2285}
2286
2287#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2288
2289#ifdef CONFIG_ANON_VMA_NAME
2290
2291#define ANON_VMA_NAME_MAX_LEN 80
2292#define ANON_VMA_NAME_INVALID_CHARS "\\`$[]"
2293
2294static inline bool is_valid_name_char(char ch)
2295{
2296 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */
2297 return ch > 0x1f && ch < 0x7f &&
2298 !strchr(ANON_VMA_NAME_INVALID_CHARS, ch);
2299}
2300
2301static int prctl_set_vma(unsigned long opt, unsigned long addr,
2302 unsigned long size, unsigned long arg)
2303{
2304 struct mm_struct *mm = current->mm;
2305 const char __user *uname;
2306 struct anon_vma_name *anon_name = NULL;
2307 int error;
2308
2309 switch (opt) {
2310 case PR_SET_VMA_ANON_NAME:
2311 uname = (const char __user *)arg;
2312 if (uname) {
2313 char *name, *pch;
2314
2315 name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
2316 if (IS_ERR(name))
2317 return PTR_ERR(name);
2318
2319 for (pch = name; *pch != '\0'; pch++) {
2320 if (!is_valid_name_char(*pch)) {
2321 kfree(name);
2322 return -EINVAL;
2323 }
2324 }
2325 /* anon_vma has its own copy */
2326 anon_name = anon_vma_name_alloc(name);
2327 kfree(name);
2328 if (!anon_name)
2329 return -ENOMEM;
2330
2331 }
2332
2333 mmap_write_lock(mm);
2334 error = madvise_set_anon_name(mm, addr, size, anon_name);
2335 mmap_write_unlock(mm);
2336 anon_vma_name_put(anon_name);
2337 break;
2338 default:
2339 error = -EINVAL;
2340 }
2341
2342 return error;
2343}
2344
2345#else /* CONFIG_ANON_VMA_NAME */
2346static int prctl_set_vma(unsigned long opt, unsigned long start,
2347 unsigned long size, unsigned long arg)
2348{
2349 return -EINVAL;
2350}
2351#endif /* CONFIG_ANON_VMA_NAME */
2352
2353SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2354 unsigned long, arg4, unsigned long, arg5)
2355{
2356 struct task_struct *me = current;
2357 unsigned char comm[sizeof(me->comm)];
2358 long error;
2359
2360 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2361 if (error != -ENOSYS)
2362 return error;
2363
2364 error = 0;
2365 switch (option) {
2366 case PR_SET_PDEATHSIG:
2367 if (!valid_signal(arg2)) {
2368 error = -EINVAL;
2369 break;
2370 }
2371 me->pdeath_signal = arg2;
2372 break;
2373 case PR_GET_PDEATHSIG:
2374 error = put_user(me->pdeath_signal, (int __user *)arg2);
2375 break;
2376 case PR_GET_DUMPABLE:
2377 error = get_dumpable(me->mm);
2378 break;
2379 case PR_SET_DUMPABLE:
2380 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2381 error = -EINVAL;
2382 break;
2383 }
2384 set_dumpable(me->mm, arg2);
2385 break;
2386
2387 case PR_SET_UNALIGN:
2388 error = SET_UNALIGN_CTL(me, arg2);
2389 break;
2390 case PR_GET_UNALIGN:
2391 error = GET_UNALIGN_CTL(me, arg2);
2392 break;
2393 case PR_SET_FPEMU:
2394 error = SET_FPEMU_CTL(me, arg2);
2395 break;
2396 case PR_GET_FPEMU:
2397 error = GET_FPEMU_CTL(me, arg2);
2398 break;
2399 case PR_SET_FPEXC:
2400 error = SET_FPEXC_CTL(me, arg2);
2401 break;
2402 case PR_GET_FPEXC:
2403 error = GET_FPEXC_CTL(me, arg2);
2404 break;
2405 case PR_GET_TIMING:
2406 error = PR_TIMING_STATISTICAL;
2407 break;
2408 case PR_SET_TIMING:
2409 if (arg2 != PR_TIMING_STATISTICAL)
2410 error = -EINVAL;
2411 break;
2412 case PR_SET_NAME:
2413 comm[sizeof(me->comm) - 1] = 0;
2414 if (strncpy_from_user(comm, (char __user *)arg2,
2415 sizeof(me->comm) - 1) < 0)
2416 return -EFAULT;
2417 set_task_comm(me, comm);
2418 proc_comm_connector(me);
2419 break;
2420 case PR_GET_NAME:
2421 get_task_comm(comm, me);
2422 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2423 return -EFAULT;
2424 break;
2425 case PR_GET_ENDIAN:
2426 error = GET_ENDIAN(me, arg2);
2427 break;
2428 case PR_SET_ENDIAN:
2429 error = SET_ENDIAN(me, arg2);
2430 break;
2431 case PR_GET_SECCOMP:
2432 error = prctl_get_seccomp();
2433 break;
2434 case PR_SET_SECCOMP:
2435 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2436 break;
2437 case PR_GET_TSC:
2438 error = GET_TSC_CTL(arg2);
2439 break;
2440 case PR_SET_TSC:
2441 error = SET_TSC_CTL(arg2);
2442 break;
2443 case PR_TASK_PERF_EVENTS_DISABLE:
2444 error = perf_event_task_disable();
2445 break;
2446 case PR_TASK_PERF_EVENTS_ENABLE:
2447 error = perf_event_task_enable();
2448 break;
2449 case PR_GET_TIMERSLACK:
2450 if (current->timer_slack_ns > ULONG_MAX)
2451 error = ULONG_MAX;
2452 else
2453 error = current->timer_slack_ns;
2454 break;
2455 case PR_SET_TIMERSLACK:
2456 if (arg2 <= 0)
2457 current->timer_slack_ns =
2458 current->default_timer_slack_ns;
2459 else
2460 current->timer_slack_ns = arg2;
2461 break;
2462 case PR_MCE_KILL:
2463 if (arg4 | arg5)
2464 return -EINVAL;
2465 switch (arg2) {
2466 case PR_MCE_KILL_CLEAR:
2467 if (arg3 != 0)
2468 return -EINVAL;
2469 current->flags &= ~PF_MCE_PROCESS;
2470 break;
2471 case PR_MCE_KILL_SET:
2472 current->flags |= PF_MCE_PROCESS;
2473 if (arg3 == PR_MCE_KILL_EARLY)
2474 current->flags |= PF_MCE_EARLY;
2475 else if (arg3 == PR_MCE_KILL_LATE)
2476 current->flags &= ~PF_MCE_EARLY;
2477 else if (arg3 == PR_MCE_KILL_DEFAULT)
2478 current->flags &=
2479 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2480 else
2481 return -EINVAL;
2482 break;
2483 default:
2484 return -EINVAL;
2485 }
2486 break;
2487 case PR_MCE_KILL_GET:
2488 if (arg2 | arg3 | arg4 | arg5)
2489 return -EINVAL;
2490 if (current->flags & PF_MCE_PROCESS)
2491 error = (current->flags & PF_MCE_EARLY) ?
2492 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2493 else
2494 error = PR_MCE_KILL_DEFAULT;
2495 break;
2496 case PR_SET_MM:
2497 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2498 break;
2499 case PR_GET_TID_ADDRESS:
2500 error = prctl_get_tid_address(me, (int __user * __user *)arg2);
2501 break;
2502 case PR_SET_CHILD_SUBREAPER:
2503 me->signal->is_child_subreaper = !!arg2;
2504 if (!arg2)
2505 break;
2506
2507 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2508 break;
2509 case PR_GET_CHILD_SUBREAPER:
2510 error = put_user(me->signal->is_child_subreaper,
2511 (int __user *)arg2);
2512 break;
2513 case PR_SET_NO_NEW_PRIVS:
2514 if (arg2 != 1 || arg3 || arg4 || arg5)
2515 return -EINVAL;
2516
2517 task_set_no_new_privs(current);
2518 break;
2519 case PR_GET_NO_NEW_PRIVS:
2520 if (arg2 || arg3 || arg4 || arg5)
2521 return -EINVAL;
2522 return task_no_new_privs(current) ? 1 : 0;
2523 case PR_GET_THP_DISABLE:
2524 if (arg2 || arg3 || arg4 || arg5)
2525 return -EINVAL;
2526 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2527 break;
2528 case PR_SET_THP_DISABLE:
2529 if (arg3 || arg4 || arg5)
2530 return -EINVAL;
2531 if (mmap_write_lock_killable(me->mm))
2532 return -EINTR;
2533 if (arg2)
2534 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2535 else
2536 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2537 mmap_write_unlock(me->mm);
2538 break;
2539 case PR_MPX_ENABLE_MANAGEMENT:
2540 case PR_MPX_DISABLE_MANAGEMENT:
2541 /* No longer implemented: */
2542 return -EINVAL;
2543 case PR_SET_FP_MODE:
2544 error = SET_FP_MODE(me, arg2);
2545 break;
2546 case PR_GET_FP_MODE:
2547 error = GET_FP_MODE(me);
2548 break;
2549 case PR_SVE_SET_VL:
2550 error = SVE_SET_VL(arg2);
2551 break;
2552 case PR_SVE_GET_VL:
2553 error = SVE_GET_VL();
2554 break;
2555 case PR_SME_SET_VL:
2556 error = SME_SET_VL(arg2);
2557 break;
2558 case PR_SME_GET_VL:
2559 error = SME_GET_VL();
2560 break;
2561 case PR_GET_SPECULATION_CTRL:
2562 if (arg3 || arg4 || arg5)
2563 return -EINVAL;
2564 error = arch_prctl_spec_ctrl_get(me, arg2);
2565 break;
2566 case PR_SET_SPECULATION_CTRL:
2567 if (arg4 || arg5)
2568 return -EINVAL;
2569 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2570 break;
2571 case PR_PAC_RESET_KEYS:
2572 if (arg3 || arg4 || arg5)
2573 return -EINVAL;
2574 error = PAC_RESET_KEYS(me, arg2);
2575 break;
2576 case PR_PAC_SET_ENABLED_KEYS:
2577 if (arg4 || arg5)
2578 return -EINVAL;
2579 error = PAC_SET_ENABLED_KEYS(me, arg2, arg3);
2580 break;
2581 case PR_PAC_GET_ENABLED_KEYS:
2582 if (arg2 || arg3 || arg4 || arg5)
2583 return -EINVAL;
2584 error = PAC_GET_ENABLED_KEYS(me);
2585 break;
2586 case PR_SET_TAGGED_ADDR_CTRL:
2587 if (arg3 || arg4 || arg5)
2588 return -EINVAL;
2589 error = SET_TAGGED_ADDR_CTRL(arg2);
2590 break;
2591 case PR_GET_TAGGED_ADDR_CTRL:
2592 if (arg2 || arg3 || arg4 || arg5)
2593 return -EINVAL;
2594 error = GET_TAGGED_ADDR_CTRL();
2595 break;
2596 case PR_SET_IO_FLUSHER:
2597 if (!capable(CAP_SYS_RESOURCE))
2598 return -EPERM;
2599
2600 if (arg3 || arg4 || arg5)
2601 return -EINVAL;
2602
2603 if (arg2 == 1)
2604 current->flags |= PR_IO_FLUSHER;
2605 else if (!arg2)
2606 current->flags &= ~PR_IO_FLUSHER;
2607 else
2608 return -EINVAL;
2609 break;
2610 case PR_GET_IO_FLUSHER:
2611 if (!capable(CAP_SYS_RESOURCE))
2612 return -EPERM;
2613
2614 if (arg2 || arg3 || arg4 || arg5)
2615 return -EINVAL;
2616
2617 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2618 break;
2619 case PR_SET_SYSCALL_USER_DISPATCH:
2620 error = set_syscall_user_dispatch(arg2, arg3, arg4,
2621 (char __user *) arg5);
2622 break;
2623#ifdef CONFIG_SCHED_CORE
2624 case PR_SCHED_CORE:
2625 error = sched_core_share_pid(arg2, arg3, arg4, arg5);
2626 break;
2627#endif
2628 case PR_SET_VMA:
2629 error = prctl_set_vma(arg2, arg3, arg4, arg5);
2630 break;
2631 default:
2632 error = -EINVAL;
2633 break;
2634 }
2635 return error;
2636}
2637
2638SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2639 struct getcpu_cache __user *, unused)
2640{
2641 int err = 0;
2642 int cpu = raw_smp_processor_id();
2643
2644 if (cpup)
2645 err |= put_user(cpu, cpup);
2646 if (nodep)
2647 err |= put_user(cpu_to_node(cpu), nodep);
2648 return err ? -EFAULT : 0;
2649}
2650
2651/**
2652 * do_sysinfo - fill in sysinfo struct
2653 * @info: pointer to buffer to fill
2654 */
2655static int do_sysinfo(struct sysinfo *info)
2656{
2657 unsigned long mem_total, sav_total;
2658 unsigned int mem_unit, bitcount;
2659 struct timespec64 tp;
2660
2661 memset(info, 0, sizeof(struct sysinfo));
2662
2663 ktime_get_boottime_ts64(&tp);
2664 timens_add_boottime(&tp);
2665 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2666
2667 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2668
2669 info->procs = nr_threads;
2670
2671 si_meminfo(info);
2672 si_swapinfo(info);
2673
2674 /*
2675 * If the sum of all the available memory (i.e. ram + swap)
2676 * is less than can be stored in a 32 bit unsigned long then
2677 * we can be binary compatible with 2.2.x kernels. If not,
2678 * well, in that case 2.2.x was broken anyways...
2679 *
2680 * -Erik Andersen <andersee@debian.org>
2681 */
2682
2683 mem_total = info->totalram + info->totalswap;
2684 if (mem_total < info->totalram || mem_total < info->totalswap)
2685 goto out;
2686 bitcount = 0;
2687 mem_unit = info->mem_unit;
2688 while (mem_unit > 1) {
2689 bitcount++;
2690 mem_unit >>= 1;
2691 sav_total = mem_total;
2692 mem_total <<= 1;
2693 if (mem_total < sav_total)
2694 goto out;
2695 }
2696
2697 /*
2698 * If mem_total did not overflow, multiply all memory values by
2699 * info->mem_unit and set it to 1. This leaves things compatible
2700 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2701 * kernels...
2702 */
2703
2704 info->mem_unit = 1;
2705 info->totalram <<= bitcount;
2706 info->freeram <<= bitcount;
2707 info->sharedram <<= bitcount;
2708 info->bufferram <<= bitcount;
2709 info->totalswap <<= bitcount;
2710 info->freeswap <<= bitcount;
2711 info->totalhigh <<= bitcount;
2712 info->freehigh <<= bitcount;
2713
2714out:
2715 return 0;
2716}
2717
2718SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2719{
2720 struct sysinfo val;
2721
2722 do_sysinfo(&val);
2723
2724 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2725 return -EFAULT;
2726
2727 return 0;
2728}
2729
2730#ifdef CONFIG_COMPAT
2731struct compat_sysinfo {
2732 s32 uptime;
2733 u32 loads[3];
2734 u32 totalram;
2735 u32 freeram;
2736 u32 sharedram;
2737 u32 bufferram;
2738 u32 totalswap;
2739 u32 freeswap;
2740 u16 procs;
2741 u16 pad;
2742 u32 totalhigh;
2743 u32 freehigh;
2744 u32 mem_unit;
2745 char _f[20-2*sizeof(u32)-sizeof(int)];
2746};
2747
2748COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2749{
2750 struct sysinfo s;
2751 struct compat_sysinfo s_32;
2752
2753 do_sysinfo(&s);
2754
2755 /* Check to see if any memory value is too large for 32-bit and scale
2756 * down if needed
2757 */
2758 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2759 int bitcount = 0;
2760
2761 while (s.mem_unit < PAGE_SIZE) {
2762 s.mem_unit <<= 1;
2763 bitcount++;
2764 }
2765
2766 s.totalram >>= bitcount;
2767 s.freeram >>= bitcount;
2768 s.sharedram >>= bitcount;
2769 s.bufferram >>= bitcount;
2770 s.totalswap >>= bitcount;
2771 s.freeswap >>= bitcount;
2772 s.totalhigh >>= bitcount;
2773 s.freehigh >>= bitcount;
2774 }
2775
2776 memset(&s_32, 0, sizeof(s_32));
2777 s_32.uptime = s.uptime;
2778 s_32.loads[0] = s.loads[0];
2779 s_32.loads[1] = s.loads[1];
2780 s_32.loads[2] = s.loads[2];
2781 s_32.totalram = s.totalram;
2782 s_32.freeram = s.freeram;
2783 s_32.sharedram = s.sharedram;
2784 s_32.bufferram = s.bufferram;
2785 s_32.totalswap = s.totalswap;
2786 s_32.freeswap = s.freeswap;
2787 s_32.procs = s.procs;
2788 s_32.totalhigh = s.totalhigh;
2789 s_32.freehigh = s.freehigh;
2790 s_32.mem_unit = s.mem_unit;
2791 if (copy_to_user(info, &s_32, sizeof(s_32)))
2792 return -EFAULT;
2793 return 0;
2794}
2795#endif /* CONFIG_COMPAT */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/kernel/sys.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/utsname.h>
11#include <linux/mman.h>
12#include <linux/reboot.h>
13#include <linux/prctl.h>
14#include <linux/highuid.h>
15#include <linux/fs.h>
16#include <linux/kmod.h>
17#include <linux/perf_event.h>
18#include <linux/resource.h>
19#include <linux/kernel.h>
20#include <linux/workqueue.h>
21#include <linux/capability.h>
22#include <linux/device.h>
23#include <linux/key.h>
24#include <linux/times.h>
25#include <linux/posix-timers.h>
26#include <linux/security.h>
27#include <linux/dcookies.h>
28#include <linux/suspend.h>
29#include <linux/tty.h>
30#include <linux/signal.h>
31#include <linux/cn_proc.h>
32#include <linux/getcpu.h>
33#include <linux/task_io_accounting_ops.h>
34#include <linux/seccomp.h>
35#include <linux/cpu.h>
36#include <linux/personality.h>
37#include <linux/ptrace.h>
38#include <linux/fs_struct.h>
39#include <linux/file.h>
40#include <linux/mount.h>
41#include <linux/gfp.h>
42#include <linux/syscore_ops.h>
43#include <linux/version.h>
44#include <linux/ctype.h>
45
46#include <linux/compat.h>
47#include <linux/syscalls.h>
48#include <linux/kprobes.h>
49#include <linux/user_namespace.h>
50#include <linux/binfmts.h>
51
52#include <linux/sched.h>
53#include <linux/sched/autogroup.h>
54#include <linux/sched/loadavg.h>
55#include <linux/sched/stat.h>
56#include <linux/sched/mm.h>
57#include <linux/sched/coredump.h>
58#include <linux/sched/task.h>
59#include <linux/sched/cputime.h>
60#include <linux/rcupdate.h>
61#include <linux/uidgid.h>
62#include <linux/cred.h>
63
64#include <linux/nospec.h>
65
66#include <linux/kmsg_dump.h>
67/* Move somewhere else to avoid recompiling? */
68#include <generated/utsrelease.h>
69
70#include <linux/uaccess.h>
71#include <asm/io.h>
72#include <asm/unistd.h>
73
74#include "uid16.h"
75
76#ifndef SET_UNALIGN_CTL
77# define SET_UNALIGN_CTL(a, b) (-EINVAL)
78#endif
79#ifndef GET_UNALIGN_CTL
80# define GET_UNALIGN_CTL(a, b) (-EINVAL)
81#endif
82#ifndef SET_FPEMU_CTL
83# define SET_FPEMU_CTL(a, b) (-EINVAL)
84#endif
85#ifndef GET_FPEMU_CTL
86# define GET_FPEMU_CTL(a, b) (-EINVAL)
87#endif
88#ifndef SET_FPEXC_CTL
89# define SET_FPEXC_CTL(a, b) (-EINVAL)
90#endif
91#ifndef GET_FPEXC_CTL
92# define GET_FPEXC_CTL(a, b) (-EINVAL)
93#endif
94#ifndef GET_ENDIAN
95# define GET_ENDIAN(a, b) (-EINVAL)
96#endif
97#ifndef SET_ENDIAN
98# define SET_ENDIAN(a, b) (-EINVAL)
99#endif
100#ifndef GET_TSC_CTL
101# define GET_TSC_CTL(a) (-EINVAL)
102#endif
103#ifndef SET_TSC_CTL
104# define SET_TSC_CTL(a) (-EINVAL)
105#endif
106#ifndef GET_FP_MODE
107# define GET_FP_MODE(a) (-EINVAL)
108#endif
109#ifndef SET_FP_MODE
110# define SET_FP_MODE(a,b) (-EINVAL)
111#endif
112#ifndef SVE_SET_VL
113# define SVE_SET_VL(a) (-EINVAL)
114#endif
115#ifndef SVE_GET_VL
116# define SVE_GET_VL() (-EINVAL)
117#endif
118#ifndef PAC_RESET_KEYS
119# define PAC_RESET_KEYS(a, b) (-EINVAL)
120#endif
121#ifndef SET_TAGGED_ADDR_CTRL
122# define SET_TAGGED_ADDR_CTRL(a) (-EINVAL)
123#endif
124#ifndef GET_TAGGED_ADDR_CTRL
125# define GET_TAGGED_ADDR_CTRL() (-EINVAL)
126#endif
127
128/*
129 * this is where the system-wide overflow UID and GID are defined, for
130 * architectures that now have 32-bit UID/GID but didn't in the past
131 */
132
133int overflowuid = DEFAULT_OVERFLOWUID;
134int overflowgid = DEFAULT_OVERFLOWGID;
135
136EXPORT_SYMBOL(overflowuid);
137EXPORT_SYMBOL(overflowgid);
138
139/*
140 * the same as above, but for filesystems which can only store a 16-bit
141 * UID and GID. as such, this is needed on all architectures
142 */
143
144int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
145int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
146
147EXPORT_SYMBOL(fs_overflowuid);
148EXPORT_SYMBOL(fs_overflowgid);
149
150/*
151 * Returns true if current's euid is same as p's uid or euid,
152 * or has CAP_SYS_NICE to p's user_ns.
153 *
154 * Called with rcu_read_lock, creds are safe
155 */
156static bool set_one_prio_perm(struct task_struct *p)
157{
158 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
159
160 if (uid_eq(pcred->uid, cred->euid) ||
161 uid_eq(pcred->euid, cred->euid))
162 return true;
163 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
164 return true;
165 return false;
166}
167
168/*
169 * set the priority of a task
170 * - the caller must hold the RCU read lock
171 */
172static int set_one_prio(struct task_struct *p, int niceval, int error)
173{
174 int no_nice;
175
176 if (!set_one_prio_perm(p)) {
177 error = -EPERM;
178 goto out;
179 }
180 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
181 error = -EACCES;
182 goto out;
183 }
184 no_nice = security_task_setnice(p, niceval);
185 if (no_nice) {
186 error = no_nice;
187 goto out;
188 }
189 if (error == -ESRCH)
190 error = 0;
191 set_user_nice(p, niceval);
192out:
193 return error;
194}
195
196SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
197{
198 struct task_struct *g, *p;
199 struct user_struct *user;
200 const struct cred *cred = current_cred();
201 int error = -EINVAL;
202 struct pid *pgrp;
203 kuid_t uid;
204
205 if (which > PRIO_USER || which < PRIO_PROCESS)
206 goto out;
207
208 /* normalize: avoid signed division (rounding problems) */
209 error = -ESRCH;
210 if (niceval < MIN_NICE)
211 niceval = MIN_NICE;
212 if (niceval > MAX_NICE)
213 niceval = MAX_NICE;
214
215 rcu_read_lock();
216 read_lock(&tasklist_lock);
217 switch (which) {
218 case PRIO_PROCESS:
219 if (who)
220 p = find_task_by_vpid(who);
221 else
222 p = current;
223 if (p)
224 error = set_one_prio(p, niceval, error);
225 break;
226 case PRIO_PGRP:
227 if (who)
228 pgrp = find_vpid(who);
229 else
230 pgrp = task_pgrp(current);
231 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
232 error = set_one_prio(p, niceval, error);
233 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
234 break;
235 case PRIO_USER:
236 uid = make_kuid(cred->user_ns, who);
237 user = cred->user;
238 if (!who)
239 uid = cred->uid;
240 else if (!uid_eq(uid, cred->uid)) {
241 user = find_user(uid);
242 if (!user)
243 goto out_unlock; /* No processes for this user */
244 }
245 do_each_thread(g, p) {
246 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
247 error = set_one_prio(p, niceval, error);
248 } while_each_thread(g, p);
249 if (!uid_eq(uid, cred->uid))
250 free_uid(user); /* For find_user() */
251 break;
252 }
253out_unlock:
254 read_unlock(&tasklist_lock);
255 rcu_read_unlock();
256out:
257 return error;
258}
259
260/*
261 * Ugh. To avoid negative return values, "getpriority()" will
262 * not return the normal nice-value, but a negated value that
263 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
264 * to stay compatible.
265 */
266SYSCALL_DEFINE2(getpriority, int, which, int, who)
267{
268 struct task_struct *g, *p;
269 struct user_struct *user;
270 const struct cred *cred = current_cred();
271 long niceval, retval = -ESRCH;
272 struct pid *pgrp;
273 kuid_t uid;
274
275 if (which > PRIO_USER || which < PRIO_PROCESS)
276 return -EINVAL;
277
278 rcu_read_lock();
279 read_lock(&tasklist_lock);
280 switch (which) {
281 case PRIO_PROCESS:
282 if (who)
283 p = find_task_by_vpid(who);
284 else
285 p = current;
286 if (p) {
287 niceval = nice_to_rlimit(task_nice(p));
288 if (niceval > retval)
289 retval = niceval;
290 }
291 break;
292 case PRIO_PGRP:
293 if (who)
294 pgrp = find_vpid(who);
295 else
296 pgrp = task_pgrp(current);
297 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
298 niceval = nice_to_rlimit(task_nice(p));
299 if (niceval > retval)
300 retval = niceval;
301 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
302 break;
303 case PRIO_USER:
304 uid = make_kuid(cred->user_ns, who);
305 user = cred->user;
306 if (!who)
307 uid = cred->uid;
308 else if (!uid_eq(uid, cred->uid)) {
309 user = find_user(uid);
310 if (!user)
311 goto out_unlock; /* No processes for this user */
312 }
313 do_each_thread(g, p) {
314 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
315 niceval = nice_to_rlimit(task_nice(p));
316 if (niceval > retval)
317 retval = niceval;
318 }
319 } while_each_thread(g, p);
320 if (!uid_eq(uid, cred->uid))
321 free_uid(user); /* for find_user() */
322 break;
323 }
324out_unlock:
325 read_unlock(&tasklist_lock);
326 rcu_read_unlock();
327
328 return retval;
329}
330
331/*
332 * Unprivileged users may change the real gid to the effective gid
333 * or vice versa. (BSD-style)
334 *
335 * If you set the real gid at all, or set the effective gid to a value not
336 * equal to the real gid, then the saved gid is set to the new effective gid.
337 *
338 * This makes it possible for a setgid program to completely drop its
339 * privileges, which is often a useful assertion to make when you are doing
340 * a security audit over a program.
341 *
342 * The general idea is that a program which uses just setregid() will be
343 * 100% compatible with BSD. A program which uses just setgid() will be
344 * 100% compatible with POSIX with saved IDs.
345 *
346 * SMP: There are not races, the GIDs are checked only by filesystem
347 * operations (as far as semantic preservation is concerned).
348 */
349#ifdef CONFIG_MULTIUSER
350long __sys_setregid(gid_t rgid, gid_t egid)
351{
352 struct user_namespace *ns = current_user_ns();
353 const struct cred *old;
354 struct cred *new;
355 int retval;
356 kgid_t krgid, kegid;
357
358 krgid = make_kgid(ns, rgid);
359 kegid = make_kgid(ns, egid);
360
361 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
362 return -EINVAL;
363 if ((egid != (gid_t) -1) && !gid_valid(kegid))
364 return -EINVAL;
365
366 new = prepare_creds();
367 if (!new)
368 return -ENOMEM;
369 old = current_cred();
370
371 retval = -EPERM;
372 if (rgid != (gid_t) -1) {
373 if (gid_eq(old->gid, krgid) ||
374 gid_eq(old->egid, krgid) ||
375 ns_capable(old->user_ns, CAP_SETGID))
376 new->gid = krgid;
377 else
378 goto error;
379 }
380 if (egid != (gid_t) -1) {
381 if (gid_eq(old->gid, kegid) ||
382 gid_eq(old->egid, kegid) ||
383 gid_eq(old->sgid, kegid) ||
384 ns_capable(old->user_ns, CAP_SETGID))
385 new->egid = kegid;
386 else
387 goto error;
388 }
389
390 if (rgid != (gid_t) -1 ||
391 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
392 new->sgid = new->egid;
393 new->fsgid = new->egid;
394
395 return commit_creds(new);
396
397error:
398 abort_creds(new);
399 return retval;
400}
401
402SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
403{
404 return __sys_setregid(rgid, egid);
405}
406
407/*
408 * setgid() is implemented like SysV w/ SAVED_IDS
409 *
410 * SMP: Same implicit races as above.
411 */
412long __sys_setgid(gid_t gid)
413{
414 struct user_namespace *ns = current_user_ns();
415 const struct cred *old;
416 struct cred *new;
417 int retval;
418 kgid_t kgid;
419
420 kgid = make_kgid(ns, gid);
421 if (!gid_valid(kgid))
422 return -EINVAL;
423
424 new = prepare_creds();
425 if (!new)
426 return -ENOMEM;
427 old = current_cred();
428
429 retval = -EPERM;
430 if (ns_capable(old->user_ns, CAP_SETGID))
431 new->gid = new->egid = new->sgid = new->fsgid = kgid;
432 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
433 new->egid = new->fsgid = kgid;
434 else
435 goto error;
436
437 return commit_creds(new);
438
439error:
440 abort_creds(new);
441 return retval;
442}
443
444SYSCALL_DEFINE1(setgid, gid_t, gid)
445{
446 return __sys_setgid(gid);
447}
448
449/*
450 * change the user struct in a credentials set to match the new UID
451 */
452static int set_user(struct cred *new)
453{
454 struct user_struct *new_user;
455
456 new_user = alloc_uid(new->uid);
457 if (!new_user)
458 return -EAGAIN;
459
460 /*
461 * We don't fail in case of NPROC limit excess here because too many
462 * poorly written programs don't check set*uid() return code, assuming
463 * it never fails if called by root. We may still enforce NPROC limit
464 * for programs doing set*uid()+execve() by harmlessly deferring the
465 * failure to the execve() stage.
466 */
467 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
468 new_user != INIT_USER)
469 current->flags |= PF_NPROC_EXCEEDED;
470 else
471 current->flags &= ~PF_NPROC_EXCEEDED;
472
473 free_uid(new->user);
474 new->user = new_user;
475 return 0;
476}
477
478/*
479 * Unprivileged users may change the real uid to the effective uid
480 * or vice versa. (BSD-style)
481 *
482 * If you set the real uid at all, or set the effective uid to a value not
483 * equal to the real uid, then the saved uid is set to the new effective uid.
484 *
485 * This makes it possible for a setuid program to completely drop its
486 * privileges, which is often a useful assertion to make when you are doing
487 * a security audit over a program.
488 *
489 * The general idea is that a program which uses just setreuid() will be
490 * 100% compatible with BSD. A program which uses just setuid() will be
491 * 100% compatible with POSIX with saved IDs.
492 */
493long __sys_setreuid(uid_t ruid, uid_t euid)
494{
495 struct user_namespace *ns = current_user_ns();
496 const struct cred *old;
497 struct cred *new;
498 int retval;
499 kuid_t kruid, keuid;
500
501 kruid = make_kuid(ns, ruid);
502 keuid = make_kuid(ns, euid);
503
504 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
505 return -EINVAL;
506 if ((euid != (uid_t) -1) && !uid_valid(keuid))
507 return -EINVAL;
508
509 new = prepare_creds();
510 if (!new)
511 return -ENOMEM;
512 old = current_cred();
513
514 retval = -EPERM;
515 if (ruid != (uid_t) -1) {
516 new->uid = kruid;
517 if (!uid_eq(old->uid, kruid) &&
518 !uid_eq(old->euid, kruid) &&
519 !ns_capable_setid(old->user_ns, CAP_SETUID))
520 goto error;
521 }
522
523 if (euid != (uid_t) -1) {
524 new->euid = keuid;
525 if (!uid_eq(old->uid, keuid) &&
526 !uid_eq(old->euid, keuid) &&
527 !uid_eq(old->suid, keuid) &&
528 !ns_capable_setid(old->user_ns, CAP_SETUID))
529 goto error;
530 }
531
532 if (!uid_eq(new->uid, old->uid)) {
533 retval = set_user(new);
534 if (retval < 0)
535 goto error;
536 }
537 if (ruid != (uid_t) -1 ||
538 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
539 new->suid = new->euid;
540 new->fsuid = new->euid;
541
542 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
543 if (retval < 0)
544 goto error;
545
546 return commit_creds(new);
547
548error:
549 abort_creds(new);
550 return retval;
551}
552
553SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
554{
555 return __sys_setreuid(ruid, euid);
556}
557
558/*
559 * setuid() is implemented like SysV with SAVED_IDS
560 *
561 * Note that SAVED_ID's is deficient in that a setuid root program
562 * like sendmail, for example, cannot set its uid to be a normal
563 * user and then switch back, because if you're root, setuid() sets
564 * the saved uid too. If you don't like this, blame the bright people
565 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
566 * will allow a root program to temporarily drop privileges and be able to
567 * regain them by swapping the real and effective uid.
568 */
569long __sys_setuid(uid_t uid)
570{
571 struct user_namespace *ns = current_user_ns();
572 const struct cred *old;
573 struct cred *new;
574 int retval;
575 kuid_t kuid;
576
577 kuid = make_kuid(ns, uid);
578 if (!uid_valid(kuid))
579 return -EINVAL;
580
581 new = prepare_creds();
582 if (!new)
583 return -ENOMEM;
584 old = current_cred();
585
586 retval = -EPERM;
587 if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
588 new->suid = new->uid = kuid;
589 if (!uid_eq(kuid, old->uid)) {
590 retval = set_user(new);
591 if (retval < 0)
592 goto error;
593 }
594 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
595 goto error;
596 }
597
598 new->fsuid = new->euid = kuid;
599
600 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
601 if (retval < 0)
602 goto error;
603
604 return commit_creds(new);
605
606error:
607 abort_creds(new);
608 return retval;
609}
610
611SYSCALL_DEFINE1(setuid, uid_t, uid)
612{
613 return __sys_setuid(uid);
614}
615
616
617/*
618 * This function implements a generic ability to update ruid, euid,
619 * and suid. This allows you to implement the 4.4 compatible seteuid().
620 */
621long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
622{
623 struct user_namespace *ns = current_user_ns();
624 const struct cred *old;
625 struct cred *new;
626 int retval;
627 kuid_t kruid, keuid, ksuid;
628
629 kruid = make_kuid(ns, ruid);
630 keuid = make_kuid(ns, euid);
631 ksuid = make_kuid(ns, suid);
632
633 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
634 return -EINVAL;
635
636 if ((euid != (uid_t) -1) && !uid_valid(keuid))
637 return -EINVAL;
638
639 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
640 return -EINVAL;
641
642 new = prepare_creds();
643 if (!new)
644 return -ENOMEM;
645
646 old = current_cred();
647
648 retval = -EPERM;
649 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
650 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
651 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
652 goto error;
653 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
654 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
655 goto error;
656 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
657 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
658 goto error;
659 }
660
661 if (ruid != (uid_t) -1) {
662 new->uid = kruid;
663 if (!uid_eq(kruid, old->uid)) {
664 retval = set_user(new);
665 if (retval < 0)
666 goto error;
667 }
668 }
669 if (euid != (uid_t) -1)
670 new->euid = keuid;
671 if (suid != (uid_t) -1)
672 new->suid = ksuid;
673 new->fsuid = new->euid;
674
675 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
676 if (retval < 0)
677 goto error;
678
679 return commit_creds(new);
680
681error:
682 abort_creds(new);
683 return retval;
684}
685
686SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
687{
688 return __sys_setresuid(ruid, euid, suid);
689}
690
691SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
692{
693 const struct cred *cred = current_cred();
694 int retval;
695 uid_t ruid, euid, suid;
696
697 ruid = from_kuid_munged(cred->user_ns, cred->uid);
698 euid = from_kuid_munged(cred->user_ns, cred->euid);
699 suid = from_kuid_munged(cred->user_ns, cred->suid);
700
701 retval = put_user(ruid, ruidp);
702 if (!retval) {
703 retval = put_user(euid, euidp);
704 if (!retval)
705 return put_user(suid, suidp);
706 }
707 return retval;
708}
709
710/*
711 * Same as above, but for rgid, egid, sgid.
712 */
713long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
714{
715 struct user_namespace *ns = current_user_ns();
716 const struct cred *old;
717 struct cred *new;
718 int retval;
719 kgid_t krgid, kegid, ksgid;
720
721 krgid = make_kgid(ns, rgid);
722 kegid = make_kgid(ns, egid);
723 ksgid = make_kgid(ns, sgid);
724
725 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
726 return -EINVAL;
727 if ((egid != (gid_t) -1) && !gid_valid(kegid))
728 return -EINVAL;
729 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
730 return -EINVAL;
731
732 new = prepare_creds();
733 if (!new)
734 return -ENOMEM;
735 old = current_cred();
736
737 retval = -EPERM;
738 if (!ns_capable(old->user_ns, CAP_SETGID)) {
739 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
740 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
741 goto error;
742 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
743 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
744 goto error;
745 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
746 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
747 goto error;
748 }
749
750 if (rgid != (gid_t) -1)
751 new->gid = krgid;
752 if (egid != (gid_t) -1)
753 new->egid = kegid;
754 if (sgid != (gid_t) -1)
755 new->sgid = ksgid;
756 new->fsgid = new->egid;
757
758 return commit_creds(new);
759
760error:
761 abort_creds(new);
762 return retval;
763}
764
765SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
766{
767 return __sys_setresgid(rgid, egid, sgid);
768}
769
770SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
771{
772 const struct cred *cred = current_cred();
773 int retval;
774 gid_t rgid, egid, sgid;
775
776 rgid = from_kgid_munged(cred->user_ns, cred->gid);
777 egid = from_kgid_munged(cred->user_ns, cred->egid);
778 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
779
780 retval = put_user(rgid, rgidp);
781 if (!retval) {
782 retval = put_user(egid, egidp);
783 if (!retval)
784 retval = put_user(sgid, sgidp);
785 }
786
787 return retval;
788}
789
790
791/*
792 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
793 * is used for "access()" and for the NFS daemon (letting nfsd stay at
794 * whatever uid it wants to). It normally shadows "euid", except when
795 * explicitly set by setfsuid() or for access..
796 */
797long __sys_setfsuid(uid_t uid)
798{
799 const struct cred *old;
800 struct cred *new;
801 uid_t old_fsuid;
802 kuid_t kuid;
803
804 old = current_cred();
805 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
806
807 kuid = make_kuid(old->user_ns, uid);
808 if (!uid_valid(kuid))
809 return old_fsuid;
810
811 new = prepare_creds();
812 if (!new)
813 return old_fsuid;
814
815 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
816 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
817 ns_capable_setid(old->user_ns, CAP_SETUID)) {
818 if (!uid_eq(kuid, old->fsuid)) {
819 new->fsuid = kuid;
820 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
821 goto change_okay;
822 }
823 }
824
825 abort_creds(new);
826 return old_fsuid;
827
828change_okay:
829 commit_creds(new);
830 return old_fsuid;
831}
832
833SYSCALL_DEFINE1(setfsuid, uid_t, uid)
834{
835 return __sys_setfsuid(uid);
836}
837
838/*
839 * Samma på svenska..
840 */
841long __sys_setfsgid(gid_t gid)
842{
843 const struct cred *old;
844 struct cred *new;
845 gid_t old_fsgid;
846 kgid_t kgid;
847
848 old = current_cred();
849 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
850
851 kgid = make_kgid(old->user_ns, gid);
852 if (!gid_valid(kgid))
853 return old_fsgid;
854
855 new = prepare_creds();
856 if (!new)
857 return old_fsgid;
858
859 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
860 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
861 ns_capable(old->user_ns, CAP_SETGID)) {
862 if (!gid_eq(kgid, old->fsgid)) {
863 new->fsgid = kgid;
864 goto change_okay;
865 }
866 }
867
868 abort_creds(new);
869 return old_fsgid;
870
871change_okay:
872 commit_creds(new);
873 return old_fsgid;
874}
875
876SYSCALL_DEFINE1(setfsgid, gid_t, gid)
877{
878 return __sys_setfsgid(gid);
879}
880#endif /* CONFIG_MULTIUSER */
881
882/**
883 * sys_getpid - return the thread group id of the current process
884 *
885 * Note, despite the name, this returns the tgid not the pid. The tgid and
886 * the pid are identical unless CLONE_THREAD was specified on clone() in
887 * which case the tgid is the same in all threads of the same group.
888 *
889 * This is SMP safe as current->tgid does not change.
890 */
891SYSCALL_DEFINE0(getpid)
892{
893 return task_tgid_vnr(current);
894}
895
896/* Thread ID - the internal kernel "pid" */
897SYSCALL_DEFINE0(gettid)
898{
899 return task_pid_vnr(current);
900}
901
902/*
903 * Accessing ->real_parent is not SMP-safe, it could
904 * change from under us. However, we can use a stale
905 * value of ->real_parent under rcu_read_lock(), see
906 * release_task()->call_rcu(delayed_put_task_struct).
907 */
908SYSCALL_DEFINE0(getppid)
909{
910 int pid;
911
912 rcu_read_lock();
913 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
914 rcu_read_unlock();
915
916 return pid;
917}
918
919SYSCALL_DEFINE0(getuid)
920{
921 /* Only we change this so SMP safe */
922 return from_kuid_munged(current_user_ns(), current_uid());
923}
924
925SYSCALL_DEFINE0(geteuid)
926{
927 /* Only we change this so SMP safe */
928 return from_kuid_munged(current_user_ns(), current_euid());
929}
930
931SYSCALL_DEFINE0(getgid)
932{
933 /* Only we change this so SMP safe */
934 return from_kgid_munged(current_user_ns(), current_gid());
935}
936
937SYSCALL_DEFINE0(getegid)
938{
939 /* Only we change this so SMP safe */
940 return from_kgid_munged(current_user_ns(), current_egid());
941}
942
943static void do_sys_times(struct tms *tms)
944{
945 u64 tgutime, tgstime, cutime, cstime;
946
947 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
948 cutime = current->signal->cutime;
949 cstime = current->signal->cstime;
950 tms->tms_utime = nsec_to_clock_t(tgutime);
951 tms->tms_stime = nsec_to_clock_t(tgstime);
952 tms->tms_cutime = nsec_to_clock_t(cutime);
953 tms->tms_cstime = nsec_to_clock_t(cstime);
954}
955
956SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
957{
958 if (tbuf) {
959 struct tms tmp;
960
961 do_sys_times(&tmp);
962 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
963 return -EFAULT;
964 }
965 force_successful_syscall_return();
966 return (long) jiffies_64_to_clock_t(get_jiffies_64());
967}
968
969#ifdef CONFIG_COMPAT
970static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
971{
972 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
973}
974
975COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
976{
977 if (tbuf) {
978 struct tms tms;
979 struct compat_tms tmp;
980
981 do_sys_times(&tms);
982 /* Convert our struct tms to the compat version. */
983 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
984 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
985 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
986 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
987 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
988 return -EFAULT;
989 }
990 force_successful_syscall_return();
991 return compat_jiffies_to_clock_t(jiffies);
992}
993#endif
994
995/*
996 * This needs some heavy checking ...
997 * I just haven't the stomach for it. I also don't fully
998 * understand sessions/pgrp etc. Let somebody who does explain it.
999 *
1000 * OK, I think I have the protection semantics right.... this is really
1001 * only important on a multi-user system anyway, to make sure one user
1002 * can't send a signal to a process owned by another. -TYT, 12/12/91
1003 *
1004 * !PF_FORKNOEXEC check to conform completely to POSIX.
1005 */
1006SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1007{
1008 struct task_struct *p;
1009 struct task_struct *group_leader = current->group_leader;
1010 struct pid *pgrp;
1011 int err;
1012
1013 if (!pid)
1014 pid = task_pid_vnr(group_leader);
1015 if (!pgid)
1016 pgid = pid;
1017 if (pgid < 0)
1018 return -EINVAL;
1019 rcu_read_lock();
1020
1021 /* From this point forward we keep holding onto the tasklist lock
1022 * so that our parent does not change from under us. -DaveM
1023 */
1024 write_lock_irq(&tasklist_lock);
1025
1026 err = -ESRCH;
1027 p = find_task_by_vpid(pid);
1028 if (!p)
1029 goto out;
1030
1031 err = -EINVAL;
1032 if (!thread_group_leader(p))
1033 goto out;
1034
1035 if (same_thread_group(p->real_parent, group_leader)) {
1036 err = -EPERM;
1037 if (task_session(p) != task_session(group_leader))
1038 goto out;
1039 err = -EACCES;
1040 if (!(p->flags & PF_FORKNOEXEC))
1041 goto out;
1042 } else {
1043 err = -ESRCH;
1044 if (p != group_leader)
1045 goto out;
1046 }
1047
1048 err = -EPERM;
1049 if (p->signal->leader)
1050 goto out;
1051
1052 pgrp = task_pid(p);
1053 if (pgid != pid) {
1054 struct task_struct *g;
1055
1056 pgrp = find_vpid(pgid);
1057 g = pid_task(pgrp, PIDTYPE_PGID);
1058 if (!g || task_session(g) != task_session(group_leader))
1059 goto out;
1060 }
1061
1062 err = security_task_setpgid(p, pgid);
1063 if (err)
1064 goto out;
1065
1066 if (task_pgrp(p) != pgrp)
1067 change_pid(p, PIDTYPE_PGID, pgrp);
1068
1069 err = 0;
1070out:
1071 /* All paths lead to here, thus we are safe. -DaveM */
1072 write_unlock_irq(&tasklist_lock);
1073 rcu_read_unlock();
1074 return err;
1075}
1076
1077static int do_getpgid(pid_t pid)
1078{
1079 struct task_struct *p;
1080 struct pid *grp;
1081 int retval;
1082
1083 rcu_read_lock();
1084 if (!pid)
1085 grp = task_pgrp(current);
1086 else {
1087 retval = -ESRCH;
1088 p = find_task_by_vpid(pid);
1089 if (!p)
1090 goto out;
1091 grp = task_pgrp(p);
1092 if (!grp)
1093 goto out;
1094
1095 retval = security_task_getpgid(p);
1096 if (retval)
1097 goto out;
1098 }
1099 retval = pid_vnr(grp);
1100out:
1101 rcu_read_unlock();
1102 return retval;
1103}
1104
1105SYSCALL_DEFINE1(getpgid, pid_t, pid)
1106{
1107 return do_getpgid(pid);
1108}
1109
1110#ifdef __ARCH_WANT_SYS_GETPGRP
1111
1112SYSCALL_DEFINE0(getpgrp)
1113{
1114 return do_getpgid(0);
1115}
1116
1117#endif
1118
1119SYSCALL_DEFINE1(getsid, pid_t, pid)
1120{
1121 struct task_struct *p;
1122 struct pid *sid;
1123 int retval;
1124
1125 rcu_read_lock();
1126 if (!pid)
1127 sid = task_session(current);
1128 else {
1129 retval = -ESRCH;
1130 p = find_task_by_vpid(pid);
1131 if (!p)
1132 goto out;
1133 sid = task_session(p);
1134 if (!sid)
1135 goto out;
1136
1137 retval = security_task_getsid(p);
1138 if (retval)
1139 goto out;
1140 }
1141 retval = pid_vnr(sid);
1142out:
1143 rcu_read_unlock();
1144 return retval;
1145}
1146
1147static void set_special_pids(struct pid *pid)
1148{
1149 struct task_struct *curr = current->group_leader;
1150
1151 if (task_session(curr) != pid)
1152 change_pid(curr, PIDTYPE_SID, pid);
1153
1154 if (task_pgrp(curr) != pid)
1155 change_pid(curr, PIDTYPE_PGID, pid);
1156}
1157
1158int ksys_setsid(void)
1159{
1160 struct task_struct *group_leader = current->group_leader;
1161 struct pid *sid = task_pid(group_leader);
1162 pid_t session = pid_vnr(sid);
1163 int err = -EPERM;
1164
1165 write_lock_irq(&tasklist_lock);
1166 /* Fail if I am already a session leader */
1167 if (group_leader->signal->leader)
1168 goto out;
1169
1170 /* Fail if a process group id already exists that equals the
1171 * proposed session id.
1172 */
1173 if (pid_task(sid, PIDTYPE_PGID))
1174 goto out;
1175
1176 group_leader->signal->leader = 1;
1177 set_special_pids(sid);
1178
1179 proc_clear_tty(group_leader);
1180
1181 err = session;
1182out:
1183 write_unlock_irq(&tasklist_lock);
1184 if (err > 0) {
1185 proc_sid_connector(group_leader);
1186 sched_autogroup_create_attach(group_leader);
1187 }
1188 return err;
1189}
1190
1191SYSCALL_DEFINE0(setsid)
1192{
1193 return ksys_setsid();
1194}
1195
1196DECLARE_RWSEM(uts_sem);
1197
1198#ifdef COMPAT_UTS_MACHINE
1199#define override_architecture(name) \
1200 (personality(current->personality) == PER_LINUX32 && \
1201 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1202 sizeof(COMPAT_UTS_MACHINE)))
1203#else
1204#define override_architecture(name) 0
1205#endif
1206
1207/*
1208 * Work around broken programs that cannot handle "Linux 3.0".
1209 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1210 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1211 * 2.6.60.
1212 */
1213static int override_release(char __user *release, size_t len)
1214{
1215 int ret = 0;
1216
1217 if (current->personality & UNAME26) {
1218 const char *rest = UTS_RELEASE;
1219 char buf[65] = { 0 };
1220 int ndots = 0;
1221 unsigned v;
1222 size_t copy;
1223
1224 while (*rest) {
1225 if (*rest == '.' && ++ndots >= 3)
1226 break;
1227 if (!isdigit(*rest) && *rest != '.')
1228 break;
1229 rest++;
1230 }
1231 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1232 copy = clamp_t(size_t, len, 1, sizeof(buf));
1233 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1234 ret = copy_to_user(release, buf, copy + 1);
1235 }
1236 return ret;
1237}
1238
1239SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1240{
1241 struct new_utsname tmp;
1242
1243 down_read(&uts_sem);
1244 memcpy(&tmp, utsname(), sizeof(tmp));
1245 up_read(&uts_sem);
1246 if (copy_to_user(name, &tmp, sizeof(tmp)))
1247 return -EFAULT;
1248
1249 if (override_release(name->release, sizeof(name->release)))
1250 return -EFAULT;
1251 if (override_architecture(name))
1252 return -EFAULT;
1253 return 0;
1254}
1255
1256#ifdef __ARCH_WANT_SYS_OLD_UNAME
1257/*
1258 * Old cruft
1259 */
1260SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1261{
1262 struct old_utsname tmp;
1263
1264 if (!name)
1265 return -EFAULT;
1266
1267 down_read(&uts_sem);
1268 memcpy(&tmp, utsname(), sizeof(tmp));
1269 up_read(&uts_sem);
1270 if (copy_to_user(name, &tmp, sizeof(tmp)))
1271 return -EFAULT;
1272
1273 if (override_release(name->release, sizeof(name->release)))
1274 return -EFAULT;
1275 if (override_architecture(name))
1276 return -EFAULT;
1277 return 0;
1278}
1279
1280SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1281{
1282 struct oldold_utsname tmp = {};
1283
1284 if (!name)
1285 return -EFAULT;
1286
1287 down_read(&uts_sem);
1288 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1289 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1290 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1291 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1292 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1293 up_read(&uts_sem);
1294 if (copy_to_user(name, &tmp, sizeof(tmp)))
1295 return -EFAULT;
1296
1297 if (override_architecture(name))
1298 return -EFAULT;
1299 if (override_release(name->release, sizeof(name->release)))
1300 return -EFAULT;
1301 return 0;
1302}
1303#endif
1304
1305SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1306{
1307 int errno;
1308 char tmp[__NEW_UTS_LEN];
1309
1310 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1311 return -EPERM;
1312
1313 if (len < 0 || len > __NEW_UTS_LEN)
1314 return -EINVAL;
1315 errno = -EFAULT;
1316 if (!copy_from_user(tmp, name, len)) {
1317 struct new_utsname *u;
1318
1319 down_write(&uts_sem);
1320 u = utsname();
1321 memcpy(u->nodename, tmp, len);
1322 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1323 errno = 0;
1324 uts_proc_notify(UTS_PROC_HOSTNAME);
1325 up_write(&uts_sem);
1326 }
1327 return errno;
1328}
1329
1330#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1331
1332SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1333{
1334 int i;
1335 struct new_utsname *u;
1336 char tmp[__NEW_UTS_LEN + 1];
1337
1338 if (len < 0)
1339 return -EINVAL;
1340 down_read(&uts_sem);
1341 u = utsname();
1342 i = 1 + strlen(u->nodename);
1343 if (i > len)
1344 i = len;
1345 memcpy(tmp, u->nodename, i);
1346 up_read(&uts_sem);
1347 if (copy_to_user(name, tmp, i))
1348 return -EFAULT;
1349 return 0;
1350}
1351
1352#endif
1353
1354/*
1355 * Only setdomainname; getdomainname can be implemented by calling
1356 * uname()
1357 */
1358SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1359{
1360 int errno;
1361 char tmp[__NEW_UTS_LEN];
1362
1363 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1364 return -EPERM;
1365 if (len < 0 || len > __NEW_UTS_LEN)
1366 return -EINVAL;
1367
1368 errno = -EFAULT;
1369 if (!copy_from_user(tmp, name, len)) {
1370 struct new_utsname *u;
1371
1372 down_write(&uts_sem);
1373 u = utsname();
1374 memcpy(u->domainname, tmp, len);
1375 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1376 errno = 0;
1377 uts_proc_notify(UTS_PROC_DOMAINNAME);
1378 up_write(&uts_sem);
1379 }
1380 return errno;
1381}
1382
1383SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1384{
1385 struct rlimit value;
1386 int ret;
1387
1388 ret = do_prlimit(current, resource, NULL, &value);
1389 if (!ret)
1390 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1391
1392 return ret;
1393}
1394
1395#ifdef CONFIG_COMPAT
1396
1397COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1398 struct compat_rlimit __user *, rlim)
1399{
1400 struct rlimit r;
1401 struct compat_rlimit r32;
1402
1403 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1404 return -EFAULT;
1405
1406 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1407 r.rlim_cur = RLIM_INFINITY;
1408 else
1409 r.rlim_cur = r32.rlim_cur;
1410 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1411 r.rlim_max = RLIM_INFINITY;
1412 else
1413 r.rlim_max = r32.rlim_max;
1414 return do_prlimit(current, resource, &r, NULL);
1415}
1416
1417COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1418 struct compat_rlimit __user *, rlim)
1419{
1420 struct rlimit r;
1421 int ret;
1422
1423 ret = do_prlimit(current, resource, NULL, &r);
1424 if (!ret) {
1425 struct compat_rlimit r32;
1426 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1427 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1428 else
1429 r32.rlim_cur = r.rlim_cur;
1430 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1431 r32.rlim_max = COMPAT_RLIM_INFINITY;
1432 else
1433 r32.rlim_max = r.rlim_max;
1434
1435 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1436 return -EFAULT;
1437 }
1438 return ret;
1439}
1440
1441#endif
1442
1443#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1444
1445/*
1446 * Back compatibility for getrlimit. Needed for some apps.
1447 */
1448SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1449 struct rlimit __user *, rlim)
1450{
1451 struct rlimit x;
1452 if (resource >= RLIM_NLIMITS)
1453 return -EINVAL;
1454
1455 resource = array_index_nospec(resource, RLIM_NLIMITS);
1456 task_lock(current->group_leader);
1457 x = current->signal->rlim[resource];
1458 task_unlock(current->group_leader);
1459 if (x.rlim_cur > 0x7FFFFFFF)
1460 x.rlim_cur = 0x7FFFFFFF;
1461 if (x.rlim_max > 0x7FFFFFFF)
1462 x.rlim_max = 0x7FFFFFFF;
1463 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1464}
1465
1466#ifdef CONFIG_COMPAT
1467COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1468 struct compat_rlimit __user *, rlim)
1469{
1470 struct rlimit r;
1471
1472 if (resource >= RLIM_NLIMITS)
1473 return -EINVAL;
1474
1475 resource = array_index_nospec(resource, RLIM_NLIMITS);
1476 task_lock(current->group_leader);
1477 r = current->signal->rlim[resource];
1478 task_unlock(current->group_leader);
1479 if (r.rlim_cur > 0x7FFFFFFF)
1480 r.rlim_cur = 0x7FFFFFFF;
1481 if (r.rlim_max > 0x7FFFFFFF)
1482 r.rlim_max = 0x7FFFFFFF;
1483
1484 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1485 put_user(r.rlim_max, &rlim->rlim_max))
1486 return -EFAULT;
1487 return 0;
1488}
1489#endif
1490
1491#endif
1492
1493static inline bool rlim64_is_infinity(__u64 rlim64)
1494{
1495#if BITS_PER_LONG < 64
1496 return rlim64 >= ULONG_MAX;
1497#else
1498 return rlim64 == RLIM64_INFINITY;
1499#endif
1500}
1501
1502static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1503{
1504 if (rlim->rlim_cur == RLIM_INFINITY)
1505 rlim64->rlim_cur = RLIM64_INFINITY;
1506 else
1507 rlim64->rlim_cur = rlim->rlim_cur;
1508 if (rlim->rlim_max == RLIM_INFINITY)
1509 rlim64->rlim_max = RLIM64_INFINITY;
1510 else
1511 rlim64->rlim_max = rlim->rlim_max;
1512}
1513
1514static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1515{
1516 if (rlim64_is_infinity(rlim64->rlim_cur))
1517 rlim->rlim_cur = RLIM_INFINITY;
1518 else
1519 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1520 if (rlim64_is_infinity(rlim64->rlim_max))
1521 rlim->rlim_max = RLIM_INFINITY;
1522 else
1523 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1524}
1525
1526/* make sure you are allowed to change @tsk limits before calling this */
1527int do_prlimit(struct task_struct *tsk, unsigned int resource,
1528 struct rlimit *new_rlim, struct rlimit *old_rlim)
1529{
1530 struct rlimit *rlim;
1531 int retval = 0;
1532
1533 if (resource >= RLIM_NLIMITS)
1534 return -EINVAL;
1535 if (new_rlim) {
1536 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1537 return -EINVAL;
1538 if (resource == RLIMIT_NOFILE &&
1539 new_rlim->rlim_max > sysctl_nr_open)
1540 return -EPERM;
1541 }
1542
1543 /* protect tsk->signal and tsk->sighand from disappearing */
1544 read_lock(&tasklist_lock);
1545 if (!tsk->sighand) {
1546 retval = -ESRCH;
1547 goto out;
1548 }
1549
1550 rlim = tsk->signal->rlim + resource;
1551 task_lock(tsk->group_leader);
1552 if (new_rlim) {
1553 /* Keep the capable check against init_user_ns until
1554 cgroups can contain all limits */
1555 if (new_rlim->rlim_max > rlim->rlim_max &&
1556 !capable(CAP_SYS_RESOURCE))
1557 retval = -EPERM;
1558 if (!retval)
1559 retval = security_task_setrlimit(tsk, resource, new_rlim);
1560 }
1561 if (!retval) {
1562 if (old_rlim)
1563 *old_rlim = *rlim;
1564 if (new_rlim)
1565 *rlim = *new_rlim;
1566 }
1567 task_unlock(tsk->group_leader);
1568
1569 /*
1570 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1571 * infite. In case of RLIM_INFINITY the posix CPU timer code
1572 * ignores the rlimit.
1573 */
1574 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1575 new_rlim->rlim_cur != RLIM_INFINITY &&
1576 IS_ENABLED(CONFIG_POSIX_TIMERS))
1577 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1578out:
1579 read_unlock(&tasklist_lock);
1580 return retval;
1581}
1582
1583/* rcu lock must be held */
1584static int check_prlimit_permission(struct task_struct *task,
1585 unsigned int flags)
1586{
1587 const struct cred *cred = current_cred(), *tcred;
1588 bool id_match;
1589
1590 if (current == task)
1591 return 0;
1592
1593 tcred = __task_cred(task);
1594 id_match = (uid_eq(cred->uid, tcred->euid) &&
1595 uid_eq(cred->uid, tcred->suid) &&
1596 uid_eq(cred->uid, tcred->uid) &&
1597 gid_eq(cred->gid, tcred->egid) &&
1598 gid_eq(cred->gid, tcred->sgid) &&
1599 gid_eq(cred->gid, tcred->gid));
1600 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1601 return -EPERM;
1602
1603 return security_task_prlimit(cred, tcred, flags);
1604}
1605
1606SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1607 const struct rlimit64 __user *, new_rlim,
1608 struct rlimit64 __user *, old_rlim)
1609{
1610 struct rlimit64 old64, new64;
1611 struct rlimit old, new;
1612 struct task_struct *tsk;
1613 unsigned int checkflags = 0;
1614 int ret;
1615
1616 if (old_rlim)
1617 checkflags |= LSM_PRLIMIT_READ;
1618
1619 if (new_rlim) {
1620 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1621 return -EFAULT;
1622 rlim64_to_rlim(&new64, &new);
1623 checkflags |= LSM_PRLIMIT_WRITE;
1624 }
1625
1626 rcu_read_lock();
1627 tsk = pid ? find_task_by_vpid(pid) : current;
1628 if (!tsk) {
1629 rcu_read_unlock();
1630 return -ESRCH;
1631 }
1632 ret = check_prlimit_permission(tsk, checkflags);
1633 if (ret) {
1634 rcu_read_unlock();
1635 return ret;
1636 }
1637 get_task_struct(tsk);
1638 rcu_read_unlock();
1639
1640 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1641 old_rlim ? &old : NULL);
1642
1643 if (!ret && old_rlim) {
1644 rlim_to_rlim64(&old, &old64);
1645 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1646 ret = -EFAULT;
1647 }
1648
1649 put_task_struct(tsk);
1650 return ret;
1651}
1652
1653SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1654{
1655 struct rlimit new_rlim;
1656
1657 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1658 return -EFAULT;
1659 return do_prlimit(current, resource, &new_rlim, NULL);
1660}
1661
1662/*
1663 * It would make sense to put struct rusage in the task_struct,
1664 * except that would make the task_struct be *really big*. After
1665 * task_struct gets moved into malloc'ed memory, it would
1666 * make sense to do this. It will make moving the rest of the information
1667 * a lot simpler! (Which we're not doing right now because we're not
1668 * measuring them yet).
1669 *
1670 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1671 * races with threads incrementing their own counters. But since word
1672 * reads are atomic, we either get new values or old values and we don't
1673 * care which for the sums. We always take the siglock to protect reading
1674 * the c* fields from p->signal from races with exit.c updating those
1675 * fields when reaping, so a sample either gets all the additions of a
1676 * given child after it's reaped, or none so this sample is before reaping.
1677 *
1678 * Locking:
1679 * We need to take the siglock for CHILDEREN, SELF and BOTH
1680 * for the cases current multithreaded, non-current single threaded
1681 * non-current multithreaded. Thread traversal is now safe with
1682 * the siglock held.
1683 * Strictly speaking, we donot need to take the siglock if we are current and
1684 * single threaded, as no one else can take our signal_struct away, no one
1685 * else can reap the children to update signal->c* counters, and no one else
1686 * can race with the signal-> fields. If we do not take any lock, the
1687 * signal-> fields could be read out of order while another thread was just
1688 * exiting. So we should place a read memory barrier when we avoid the lock.
1689 * On the writer side, write memory barrier is implied in __exit_signal
1690 * as __exit_signal releases the siglock spinlock after updating the signal->
1691 * fields. But we don't do this yet to keep things simple.
1692 *
1693 */
1694
1695static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1696{
1697 r->ru_nvcsw += t->nvcsw;
1698 r->ru_nivcsw += t->nivcsw;
1699 r->ru_minflt += t->min_flt;
1700 r->ru_majflt += t->maj_flt;
1701 r->ru_inblock += task_io_get_inblock(t);
1702 r->ru_oublock += task_io_get_oublock(t);
1703}
1704
1705void getrusage(struct task_struct *p, int who, struct rusage *r)
1706{
1707 struct task_struct *t;
1708 unsigned long flags;
1709 u64 tgutime, tgstime, utime, stime;
1710 unsigned long maxrss = 0;
1711
1712 memset((char *)r, 0, sizeof (*r));
1713 utime = stime = 0;
1714
1715 if (who == RUSAGE_THREAD) {
1716 task_cputime_adjusted(current, &utime, &stime);
1717 accumulate_thread_rusage(p, r);
1718 maxrss = p->signal->maxrss;
1719 goto out;
1720 }
1721
1722 if (!lock_task_sighand(p, &flags))
1723 return;
1724
1725 switch (who) {
1726 case RUSAGE_BOTH:
1727 case RUSAGE_CHILDREN:
1728 utime = p->signal->cutime;
1729 stime = p->signal->cstime;
1730 r->ru_nvcsw = p->signal->cnvcsw;
1731 r->ru_nivcsw = p->signal->cnivcsw;
1732 r->ru_minflt = p->signal->cmin_flt;
1733 r->ru_majflt = p->signal->cmaj_flt;
1734 r->ru_inblock = p->signal->cinblock;
1735 r->ru_oublock = p->signal->coublock;
1736 maxrss = p->signal->cmaxrss;
1737
1738 if (who == RUSAGE_CHILDREN)
1739 break;
1740 /* fall through */
1741
1742 case RUSAGE_SELF:
1743 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1744 utime += tgutime;
1745 stime += tgstime;
1746 r->ru_nvcsw += p->signal->nvcsw;
1747 r->ru_nivcsw += p->signal->nivcsw;
1748 r->ru_minflt += p->signal->min_flt;
1749 r->ru_majflt += p->signal->maj_flt;
1750 r->ru_inblock += p->signal->inblock;
1751 r->ru_oublock += p->signal->oublock;
1752 if (maxrss < p->signal->maxrss)
1753 maxrss = p->signal->maxrss;
1754 t = p;
1755 do {
1756 accumulate_thread_rusage(t, r);
1757 } while_each_thread(p, t);
1758 break;
1759
1760 default:
1761 BUG();
1762 }
1763 unlock_task_sighand(p, &flags);
1764
1765out:
1766 r->ru_utime = ns_to_timeval(utime);
1767 r->ru_stime = ns_to_timeval(stime);
1768
1769 if (who != RUSAGE_CHILDREN) {
1770 struct mm_struct *mm = get_task_mm(p);
1771
1772 if (mm) {
1773 setmax_mm_hiwater_rss(&maxrss, mm);
1774 mmput(mm);
1775 }
1776 }
1777 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1778}
1779
1780SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1781{
1782 struct rusage r;
1783
1784 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1785 who != RUSAGE_THREAD)
1786 return -EINVAL;
1787
1788 getrusage(current, who, &r);
1789 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1790}
1791
1792#ifdef CONFIG_COMPAT
1793COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1794{
1795 struct rusage r;
1796
1797 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1798 who != RUSAGE_THREAD)
1799 return -EINVAL;
1800
1801 getrusage(current, who, &r);
1802 return put_compat_rusage(&r, ru);
1803}
1804#endif
1805
1806SYSCALL_DEFINE1(umask, int, mask)
1807{
1808 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1809 return mask;
1810}
1811
1812static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1813{
1814 struct fd exe;
1815 struct file *old_exe, *exe_file;
1816 struct inode *inode;
1817 int err;
1818
1819 exe = fdget(fd);
1820 if (!exe.file)
1821 return -EBADF;
1822
1823 inode = file_inode(exe.file);
1824
1825 /*
1826 * Because the original mm->exe_file points to executable file, make
1827 * sure that this one is executable as well, to avoid breaking an
1828 * overall picture.
1829 */
1830 err = -EACCES;
1831 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1832 goto exit;
1833
1834 err = inode_permission(inode, MAY_EXEC);
1835 if (err)
1836 goto exit;
1837
1838 /*
1839 * Forbid mm->exe_file change if old file still mapped.
1840 */
1841 exe_file = get_mm_exe_file(mm);
1842 err = -EBUSY;
1843 if (exe_file) {
1844 struct vm_area_struct *vma;
1845
1846 down_read(&mm->mmap_sem);
1847 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1848 if (!vma->vm_file)
1849 continue;
1850 if (path_equal(&vma->vm_file->f_path,
1851 &exe_file->f_path))
1852 goto exit_err;
1853 }
1854
1855 up_read(&mm->mmap_sem);
1856 fput(exe_file);
1857 }
1858
1859 err = 0;
1860 /* set the new file, lockless */
1861 get_file(exe.file);
1862 old_exe = xchg(&mm->exe_file, exe.file);
1863 if (old_exe)
1864 fput(old_exe);
1865exit:
1866 fdput(exe);
1867 return err;
1868exit_err:
1869 up_read(&mm->mmap_sem);
1870 fput(exe_file);
1871 goto exit;
1872}
1873
1874/*
1875 * Check arithmetic relations of passed addresses.
1876 *
1877 * WARNING: we don't require any capability here so be very careful
1878 * in what is allowed for modification from userspace.
1879 */
1880static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1881{
1882 unsigned long mmap_max_addr = TASK_SIZE;
1883 int error = -EINVAL, i;
1884
1885 static const unsigned char offsets[] = {
1886 offsetof(struct prctl_mm_map, start_code),
1887 offsetof(struct prctl_mm_map, end_code),
1888 offsetof(struct prctl_mm_map, start_data),
1889 offsetof(struct prctl_mm_map, end_data),
1890 offsetof(struct prctl_mm_map, start_brk),
1891 offsetof(struct prctl_mm_map, brk),
1892 offsetof(struct prctl_mm_map, start_stack),
1893 offsetof(struct prctl_mm_map, arg_start),
1894 offsetof(struct prctl_mm_map, arg_end),
1895 offsetof(struct prctl_mm_map, env_start),
1896 offsetof(struct prctl_mm_map, env_end),
1897 };
1898
1899 /*
1900 * Make sure the members are not somewhere outside
1901 * of allowed address space.
1902 */
1903 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1904 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1905
1906 if ((unsigned long)val >= mmap_max_addr ||
1907 (unsigned long)val < mmap_min_addr)
1908 goto out;
1909 }
1910
1911 /*
1912 * Make sure the pairs are ordered.
1913 */
1914#define __prctl_check_order(__m1, __op, __m2) \
1915 ((unsigned long)prctl_map->__m1 __op \
1916 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1917 error = __prctl_check_order(start_code, <, end_code);
1918 error |= __prctl_check_order(start_data,<=, end_data);
1919 error |= __prctl_check_order(start_brk, <=, brk);
1920 error |= __prctl_check_order(arg_start, <=, arg_end);
1921 error |= __prctl_check_order(env_start, <=, env_end);
1922 if (error)
1923 goto out;
1924#undef __prctl_check_order
1925
1926 error = -EINVAL;
1927
1928 /*
1929 * @brk should be after @end_data in traditional maps.
1930 */
1931 if (prctl_map->start_brk <= prctl_map->end_data ||
1932 prctl_map->brk <= prctl_map->end_data)
1933 goto out;
1934
1935 /*
1936 * Neither we should allow to override limits if they set.
1937 */
1938 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1939 prctl_map->start_brk, prctl_map->end_data,
1940 prctl_map->start_data))
1941 goto out;
1942
1943 error = 0;
1944out:
1945 return error;
1946}
1947
1948#ifdef CONFIG_CHECKPOINT_RESTORE
1949static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1950{
1951 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1952 unsigned long user_auxv[AT_VECTOR_SIZE];
1953 struct mm_struct *mm = current->mm;
1954 int error;
1955
1956 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1957 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1958
1959 if (opt == PR_SET_MM_MAP_SIZE)
1960 return put_user((unsigned int)sizeof(prctl_map),
1961 (unsigned int __user *)addr);
1962
1963 if (data_size != sizeof(prctl_map))
1964 return -EINVAL;
1965
1966 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1967 return -EFAULT;
1968
1969 error = validate_prctl_map_addr(&prctl_map);
1970 if (error)
1971 return error;
1972
1973 if (prctl_map.auxv_size) {
1974 /*
1975 * Someone is trying to cheat the auxv vector.
1976 */
1977 if (!prctl_map.auxv ||
1978 prctl_map.auxv_size > sizeof(mm->saved_auxv))
1979 return -EINVAL;
1980
1981 memset(user_auxv, 0, sizeof(user_auxv));
1982 if (copy_from_user(user_auxv,
1983 (const void __user *)prctl_map.auxv,
1984 prctl_map.auxv_size))
1985 return -EFAULT;
1986
1987 /* Last entry must be AT_NULL as specification requires */
1988 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1989 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1990 }
1991
1992 if (prctl_map.exe_fd != (u32)-1) {
1993 /*
1994 * Make sure the caller has the rights to
1995 * change /proc/pid/exe link: only local sys admin should
1996 * be allowed to.
1997 */
1998 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1999 return -EINVAL;
2000
2001 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2002 if (error)
2003 return error;
2004 }
2005
2006 /*
2007 * arg_lock protects concurent updates but we still need mmap_sem for
2008 * read to exclude races with sys_brk.
2009 */
2010 down_read(&mm->mmap_sem);
2011
2012 /*
2013 * We don't validate if these members are pointing to
2014 * real present VMAs because application may have correspond
2015 * VMAs already unmapped and kernel uses these members for statistics
2016 * output in procfs mostly, except
2017 *
2018 * - @start_brk/@brk which are used in do_brk but kernel lookups
2019 * for VMAs when updating these memvers so anything wrong written
2020 * here cause kernel to swear at userspace program but won't lead
2021 * to any problem in kernel itself
2022 */
2023
2024 spin_lock(&mm->arg_lock);
2025 mm->start_code = prctl_map.start_code;
2026 mm->end_code = prctl_map.end_code;
2027 mm->start_data = prctl_map.start_data;
2028 mm->end_data = prctl_map.end_data;
2029 mm->start_brk = prctl_map.start_brk;
2030 mm->brk = prctl_map.brk;
2031 mm->start_stack = prctl_map.start_stack;
2032 mm->arg_start = prctl_map.arg_start;
2033 mm->arg_end = prctl_map.arg_end;
2034 mm->env_start = prctl_map.env_start;
2035 mm->env_end = prctl_map.env_end;
2036 spin_unlock(&mm->arg_lock);
2037
2038 /*
2039 * Note this update of @saved_auxv is lockless thus
2040 * if someone reads this member in procfs while we're
2041 * updating -- it may get partly updated results. It's
2042 * known and acceptable trade off: we leave it as is to
2043 * not introduce additional locks here making the kernel
2044 * more complex.
2045 */
2046 if (prctl_map.auxv_size)
2047 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2048
2049 up_read(&mm->mmap_sem);
2050 return 0;
2051}
2052#endif /* CONFIG_CHECKPOINT_RESTORE */
2053
2054static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2055 unsigned long len)
2056{
2057 /*
2058 * This doesn't move the auxiliary vector itself since it's pinned to
2059 * mm_struct, but it permits filling the vector with new values. It's
2060 * up to the caller to provide sane values here, otherwise userspace
2061 * tools which use this vector might be unhappy.
2062 */
2063 unsigned long user_auxv[AT_VECTOR_SIZE];
2064
2065 if (len > sizeof(user_auxv))
2066 return -EINVAL;
2067
2068 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2069 return -EFAULT;
2070
2071 /* Make sure the last entry is always AT_NULL */
2072 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2073 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2074
2075 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2076
2077 task_lock(current);
2078 memcpy(mm->saved_auxv, user_auxv, len);
2079 task_unlock(current);
2080
2081 return 0;
2082}
2083
2084static int prctl_set_mm(int opt, unsigned long addr,
2085 unsigned long arg4, unsigned long arg5)
2086{
2087 struct mm_struct *mm = current->mm;
2088 struct prctl_mm_map prctl_map = {
2089 .auxv = NULL,
2090 .auxv_size = 0,
2091 .exe_fd = -1,
2092 };
2093 struct vm_area_struct *vma;
2094 int error;
2095
2096 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2097 opt != PR_SET_MM_MAP &&
2098 opt != PR_SET_MM_MAP_SIZE)))
2099 return -EINVAL;
2100
2101#ifdef CONFIG_CHECKPOINT_RESTORE
2102 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2103 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2104#endif
2105
2106 if (!capable(CAP_SYS_RESOURCE))
2107 return -EPERM;
2108
2109 if (opt == PR_SET_MM_EXE_FILE)
2110 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2111
2112 if (opt == PR_SET_MM_AUXV)
2113 return prctl_set_auxv(mm, addr, arg4);
2114
2115 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2116 return -EINVAL;
2117
2118 error = -EINVAL;
2119
2120 /*
2121 * arg_lock protects concurent updates of arg boundaries, we need
2122 * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
2123 * validation.
2124 */
2125 down_read(&mm->mmap_sem);
2126 vma = find_vma(mm, addr);
2127
2128 spin_lock(&mm->arg_lock);
2129 prctl_map.start_code = mm->start_code;
2130 prctl_map.end_code = mm->end_code;
2131 prctl_map.start_data = mm->start_data;
2132 prctl_map.end_data = mm->end_data;
2133 prctl_map.start_brk = mm->start_brk;
2134 prctl_map.brk = mm->brk;
2135 prctl_map.start_stack = mm->start_stack;
2136 prctl_map.arg_start = mm->arg_start;
2137 prctl_map.arg_end = mm->arg_end;
2138 prctl_map.env_start = mm->env_start;
2139 prctl_map.env_end = mm->env_end;
2140
2141 switch (opt) {
2142 case PR_SET_MM_START_CODE:
2143 prctl_map.start_code = addr;
2144 break;
2145 case PR_SET_MM_END_CODE:
2146 prctl_map.end_code = addr;
2147 break;
2148 case PR_SET_MM_START_DATA:
2149 prctl_map.start_data = addr;
2150 break;
2151 case PR_SET_MM_END_DATA:
2152 prctl_map.end_data = addr;
2153 break;
2154 case PR_SET_MM_START_STACK:
2155 prctl_map.start_stack = addr;
2156 break;
2157 case PR_SET_MM_START_BRK:
2158 prctl_map.start_brk = addr;
2159 break;
2160 case PR_SET_MM_BRK:
2161 prctl_map.brk = addr;
2162 break;
2163 case PR_SET_MM_ARG_START:
2164 prctl_map.arg_start = addr;
2165 break;
2166 case PR_SET_MM_ARG_END:
2167 prctl_map.arg_end = addr;
2168 break;
2169 case PR_SET_MM_ENV_START:
2170 prctl_map.env_start = addr;
2171 break;
2172 case PR_SET_MM_ENV_END:
2173 prctl_map.env_end = addr;
2174 break;
2175 default:
2176 goto out;
2177 }
2178
2179 error = validate_prctl_map_addr(&prctl_map);
2180 if (error)
2181 goto out;
2182
2183 switch (opt) {
2184 /*
2185 * If command line arguments and environment
2186 * are placed somewhere else on stack, we can
2187 * set them up here, ARG_START/END to setup
2188 * command line argumets and ENV_START/END
2189 * for environment.
2190 */
2191 case PR_SET_MM_START_STACK:
2192 case PR_SET_MM_ARG_START:
2193 case PR_SET_MM_ARG_END:
2194 case PR_SET_MM_ENV_START:
2195 case PR_SET_MM_ENV_END:
2196 if (!vma) {
2197 error = -EFAULT;
2198 goto out;
2199 }
2200 }
2201
2202 mm->start_code = prctl_map.start_code;
2203 mm->end_code = prctl_map.end_code;
2204 mm->start_data = prctl_map.start_data;
2205 mm->end_data = prctl_map.end_data;
2206 mm->start_brk = prctl_map.start_brk;
2207 mm->brk = prctl_map.brk;
2208 mm->start_stack = prctl_map.start_stack;
2209 mm->arg_start = prctl_map.arg_start;
2210 mm->arg_end = prctl_map.arg_end;
2211 mm->env_start = prctl_map.env_start;
2212 mm->env_end = prctl_map.env_end;
2213
2214 error = 0;
2215out:
2216 spin_unlock(&mm->arg_lock);
2217 up_read(&mm->mmap_sem);
2218 return error;
2219}
2220
2221#ifdef CONFIG_CHECKPOINT_RESTORE
2222static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2223{
2224 return put_user(me->clear_child_tid, tid_addr);
2225}
2226#else
2227static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2228{
2229 return -EINVAL;
2230}
2231#endif
2232
2233static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2234{
2235 /*
2236 * If task has has_child_subreaper - all its decendants
2237 * already have these flag too and new decendants will
2238 * inherit it on fork, skip them.
2239 *
2240 * If we've found child_reaper - skip descendants in
2241 * it's subtree as they will never get out pidns.
2242 */
2243 if (p->signal->has_child_subreaper ||
2244 is_child_reaper(task_pid(p)))
2245 return 0;
2246
2247 p->signal->has_child_subreaper = 1;
2248 return 1;
2249}
2250
2251int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2252{
2253 return -EINVAL;
2254}
2255
2256int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2257 unsigned long ctrl)
2258{
2259 return -EINVAL;
2260}
2261
2262SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2263 unsigned long, arg4, unsigned long, arg5)
2264{
2265 struct task_struct *me = current;
2266 unsigned char comm[sizeof(me->comm)];
2267 long error;
2268
2269 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2270 if (error != -ENOSYS)
2271 return error;
2272
2273 error = 0;
2274 switch (option) {
2275 case PR_SET_PDEATHSIG:
2276 if (!valid_signal(arg2)) {
2277 error = -EINVAL;
2278 break;
2279 }
2280 me->pdeath_signal = arg2;
2281 break;
2282 case PR_GET_PDEATHSIG:
2283 error = put_user(me->pdeath_signal, (int __user *)arg2);
2284 break;
2285 case PR_GET_DUMPABLE:
2286 error = get_dumpable(me->mm);
2287 break;
2288 case PR_SET_DUMPABLE:
2289 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2290 error = -EINVAL;
2291 break;
2292 }
2293 set_dumpable(me->mm, arg2);
2294 break;
2295
2296 case PR_SET_UNALIGN:
2297 error = SET_UNALIGN_CTL(me, arg2);
2298 break;
2299 case PR_GET_UNALIGN:
2300 error = GET_UNALIGN_CTL(me, arg2);
2301 break;
2302 case PR_SET_FPEMU:
2303 error = SET_FPEMU_CTL(me, arg2);
2304 break;
2305 case PR_GET_FPEMU:
2306 error = GET_FPEMU_CTL(me, arg2);
2307 break;
2308 case PR_SET_FPEXC:
2309 error = SET_FPEXC_CTL(me, arg2);
2310 break;
2311 case PR_GET_FPEXC:
2312 error = GET_FPEXC_CTL(me, arg2);
2313 break;
2314 case PR_GET_TIMING:
2315 error = PR_TIMING_STATISTICAL;
2316 break;
2317 case PR_SET_TIMING:
2318 if (arg2 != PR_TIMING_STATISTICAL)
2319 error = -EINVAL;
2320 break;
2321 case PR_SET_NAME:
2322 comm[sizeof(me->comm) - 1] = 0;
2323 if (strncpy_from_user(comm, (char __user *)arg2,
2324 sizeof(me->comm) - 1) < 0)
2325 return -EFAULT;
2326 set_task_comm(me, comm);
2327 proc_comm_connector(me);
2328 break;
2329 case PR_GET_NAME:
2330 get_task_comm(comm, me);
2331 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2332 return -EFAULT;
2333 break;
2334 case PR_GET_ENDIAN:
2335 error = GET_ENDIAN(me, arg2);
2336 break;
2337 case PR_SET_ENDIAN:
2338 error = SET_ENDIAN(me, arg2);
2339 break;
2340 case PR_GET_SECCOMP:
2341 error = prctl_get_seccomp();
2342 break;
2343 case PR_SET_SECCOMP:
2344 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2345 break;
2346 case PR_GET_TSC:
2347 error = GET_TSC_CTL(arg2);
2348 break;
2349 case PR_SET_TSC:
2350 error = SET_TSC_CTL(arg2);
2351 break;
2352 case PR_TASK_PERF_EVENTS_DISABLE:
2353 error = perf_event_task_disable();
2354 break;
2355 case PR_TASK_PERF_EVENTS_ENABLE:
2356 error = perf_event_task_enable();
2357 break;
2358 case PR_GET_TIMERSLACK:
2359 if (current->timer_slack_ns > ULONG_MAX)
2360 error = ULONG_MAX;
2361 else
2362 error = current->timer_slack_ns;
2363 break;
2364 case PR_SET_TIMERSLACK:
2365 if (arg2 <= 0)
2366 current->timer_slack_ns =
2367 current->default_timer_slack_ns;
2368 else
2369 current->timer_slack_ns = arg2;
2370 break;
2371 case PR_MCE_KILL:
2372 if (arg4 | arg5)
2373 return -EINVAL;
2374 switch (arg2) {
2375 case PR_MCE_KILL_CLEAR:
2376 if (arg3 != 0)
2377 return -EINVAL;
2378 current->flags &= ~PF_MCE_PROCESS;
2379 break;
2380 case PR_MCE_KILL_SET:
2381 current->flags |= PF_MCE_PROCESS;
2382 if (arg3 == PR_MCE_KILL_EARLY)
2383 current->flags |= PF_MCE_EARLY;
2384 else if (arg3 == PR_MCE_KILL_LATE)
2385 current->flags &= ~PF_MCE_EARLY;
2386 else if (arg3 == PR_MCE_KILL_DEFAULT)
2387 current->flags &=
2388 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2389 else
2390 return -EINVAL;
2391 break;
2392 default:
2393 return -EINVAL;
2394 }
2395 break;
2396 case PR_MCE_KILL_GET:
2397 if (arg2 | arg3 | arg4 | arg5)
2398 return -EINVAL;
2399 if (current->flags & PF_MCE_PROCESS)
2400 error = (current->flags & PF_MCE_EARLY) ?
2401 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2402 else
2403 error = PR_MCE_KILL_DEFAULT;
2404 break;
2405 case PR_SET_MM:
2406 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2407 break;
2408 case PR_GET_TID_ADDRESS:
2409 error = prctl_get_tid_address(me, (int __user **)arg2);
2410 break;
2411 case PR_SET_CHILD_SUBREAPER:
2412 me->signal->is_child_subreaper = !!arg2;
2413 if (!arg2)
2414 break;
2415
2416 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2417 break;
2418 case PR_GET_CHILD_SUBREAPER:
2419 error = put_user(me->signal->is_child_subreaper,
2420 (int __user *)arg2);
2421 break;
2422 case PR_SET_NO_NEW_PRIVS:
2423 if (arg2 != 1 || arg3 || arg4 || arg5)
2424 return -EINVAL;
2425
2426 task_set_no_new_privs(current);
2427 break;
2428 case PR_GET_NO_NEW_PRIVS:
2429 if (arg2 || arg3 || arg4 || arg5)
2430 return -EINVAL;
2431 return task_no_new_privs(current) ? 1 : 0;
2432 case PR_GET_THP_DISABLE:
2433 if (arg2 || arg3 || arg4 || arg5)
2434 return -EINVAL;
2435 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2436 break;
2437 case PR_SET_THP_DISABLE:
2438 if (arg3 || arg4 || arg5)
2439 return -EINVAL;
2440 if (down_write_killable(&me->mm->mmap_sem))
2441 return -EINTR;
2442 if (arg2)
2443 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2444 else
2445 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2446 up_write(&me->mm->mmap_sem);
2447 break;
2448 case PR_MPX_ENABLE_MANAGEMENT:
2449 case PR_MPX_DISABLE_MANAGEMENT:
2450 /* No longer implemented: */
2451 return -EINVAL;
2452 case PR_SET_FP_MODE:
2453 error = SET_FP_MODE(me, arg2);
2454 break;
2455 case PR_GET_FP_MODE:
2456 error = GET_FP_MODE(me);
2457 break;
2458 case PR_SVE_SET_VL:
2459 error = SVE_SET_VL(arg2);
2460 break;
2461 case PR_SVE_GET_VL:
2462 error = SVE_GET_VL();
2463 break;
2464 case PR_GET_SPECULATION_CTRL:
2465 if (arg3 || arg4 || arg5)
2466 return -EINVAL;
2467 error = arch_prctl_spec_ctrl_get(me, arg2);
2468 break;
2469 case PR_SET_SPECULATION_CTRL:
2470 if (arg4 || arg5)
2471 return -EINVAL;
2472 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2473 break;
2474 case PR_PAC_RESET_KEYS:
2475 if (arg3 || arg4 || arg5)
2476 return -EINVAL;
2477 error = PAC_RESET_KEYS(me, arg2);
2478 break;
2479 case PR_SET_TAGGED_ADDR_CTRL:
2480 if (arg3 || arg4 || arg5)
2481 return -EINVAL;
2482 error = SET_TAGGED_ADDR_CTRL(arg2);
2483 break;
2484 case PR_GET_TAGGED_ADDR_CTRL:
2485 if (arg2 || arg3 || arg4 || arg5)
2486 return -EINVAL;
2487 error = GET_TAGGED_ADDR_CTRL();
2488 break;
2489 default:
2490 error = -EINVAL;
2491 break;
2492 }
2493 return error;
2494}
2495
2496SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2497 struct getcpu_cache __user *, unused)
2498{
2499 int err = 0;
2500 int cpu = raw_smp_processor_id();
2501
2502 if (cpup)
2503 err |= put_user(cpu, cpup);
2504 if (nodep)
2505 err |= put_user(cpu_to_node(cpu), nodep);
2506 return err ? -EFAULT : 0;
2507}
2508
2509/**
2510 * do_sysinfo - fill in sysinfo struct
2511 * @info: pointer to buffer to fill
2512 */
2513static int do_sysinfo(struct sysinfo *info)
2514{
2515 unsigned long mem_total, sav_total;
2516 unsigned int mem_unit, bitcount;
2517 struct timespec64 tp;
2518
2519 memset(info, 0, sizeof(struct sysinfo));
2520
2521 ktime_get_boottime_ts64(&tp);
2522 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2523
2524 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2525
2526 info->procs = nr_threads;
2527
2528 si_meminfo(info);
2529 si_swapinfo(info);
2530
2531 /*
2532 * If the sum of all the available memory (i.e. ram + swap)
2533 * is less than can be stored in a 32 bit unsigned long then
2534 * we can be binary compatible with 2.2.x kernels. If not,
2535 * well, in that case 2.2.x was broken anyways...
2536 *
2537 * -Erik Andersen <andersee@debian.org>
2538 */
2539
2540 mem_total = info->totalram + info->totalswap;
2541 if (mem_total < info->totalram || mem_total < info->totalswap)
2542 goto out;
2543 bitcount = 0;
2544 mem_unit = info->mem_unit;
2545 while (mem_unit > 1) {
2546 bitcount++;
2547 mem_unit >>= 1;
2548 sav_total = mem_total;
2549 mem_total <<= 1;
2550 if (mem_total < sav_total)
2551 goto out;
2552 }
2553
2554 /*
2555 * If mem_total did not overflow, multiply all memory values by
2556 * info->mem_unit and set it to 1. This leaves things compatible
2557 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2558 * kernels...
2559 */
2560
2561 info->mem_unit = 1;
2562 info->totalram <<= bitcount;
2563 info->freeram <<= bitcount;
2564 info->sharedram <<= bitcount;
2565 info->bufferram <<= bitcount;
2566 info->totalswap <<= bitcount;
2567 info->freeswap <<= bitcount;
2568 info->totalhigh <<= bitcount;
2569 info->freehigh <<= bitcount;
2570
2571out:
2572 return 0;
2573}
2574
2575SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2576{
2577 struct sysinfo val;
2578
2579 do_sysinfo(&val);
2580
2581 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2582 return -EFAULT;
2583
2584 return 0;
2585}
2586
2587#ifdef CONFIG_COMPAT
2588struct compat_sysinfo {
2589 s32 uptime;
2590 u32 loads[3];
2591 u32 totalram;
2592 u32 freeram;
2593 u32 sharedram;
2594 u32 bufferram;
2595 u32 totalswap;
2596 u32 freeswap;
2597 u16 procs;
2598 u16 pad;
2599 u32 totalhigh;
2600 u32 freehigh;
2601 u32 mem_unit;
2602 char _f[20-2*sizeof(u32)-sizeof(int)];
2603};
2604
2605COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2606{
2607 struct sysinfo s;
2608
2609 do_sysinfo(&s);
2610
2611 /* Check to see if any memory value is too large for 32-bit and scale
2612 * down if needed
2613 */
2614 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2615 int bitcount = 0;
2616
2617 while (s.mem_unit < PAGE_SIZE) {
2618 s.mem_unit <<= 1;
2619 bitcount++;
2620 }
2621
2622 s.totalram >>= bitcount;
2623 s.freeram >>= bitcount;
2624 s.sharedram >>= bitcount;
2625 s.bufferram >>= bitcount;
2626 s.totalswap >>= bitcount;
2627 s.freeswap >>= bitcount;
2628 s.totalhigh >>= bitcount;
2629 s.freehigh >>= bitcount;
2630 }
2631
2632 if (!access_ok(info, sizeof(struct compat_sysinfo)) ||
2633 __put_user(s.uptime, &info->uptime) ||
2634 __put_user(s.loads[0], &info->loads[0]) ||
2635 __put_user(s.loads[1], &info->loads[1]) ||
2636 __put_user(s.loads[2], &info->loads[2]) ||
2637 __put_user(s.totalram, &info->totalram) ||
2638 __put_user(s.freeram, &info->freeram) ||
2639 __put_user(s.sharedram, &info->sharedram) ||
2640 __put_user(s.bufferram, &info->bufferram) ||
2641 __put_user(s.totalswap, &info->totalswap) ||
2642 __put_user(s.freeswap, &info->freeswap) ||
2643 __put_user(s.procs, &info->procs) ||
2644 __put_user(s.totalhigh, &info->totalhigh) ||
2645 __put_user(s.freehigh, &info->freehigh) ||
2646 __put_user(s.mem_unit, &info->mem_unit))
2647 return -EFAULT;
2648
2649 return 0;
2650}
2651#endif /* CONFIG_COMPAT */