Loading...
1/*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/export.h>
8#include <linux/mm.h>
9#include <linux/utsname.h>
10#include <linux/mman.h>
11#include <linux/reboot.h>
12#include <linux/prctl.h>
13#include <linux/highuid.h>
14#include <linux/fs.h>
15#include <linux/kmod.h>
16#include <linux/perf_event.h>
17#include <linux/resource.h>
18#include <linux/kernel.h>
19#include <linux/workqueue.h>
20#include <linux/capability.h>
21#include <linux/device.h>
22#include <linux/key.h>
23#include <linux/times.h>
24#include <linux/posix-timers.h>
25#include <linux/security.h>
26#include <linux/dcookies.h>
27#include <linux/suspend.h>
28#include <linux/tty.h>
29#include <linux/signal.h>
30#include <linux/cn_proc.h>
31#include <linux/getcpu.h>
32#include <linux/task_io_accounting_ops.h>
33#include <linux/seccomp.h>
34#include <linux/cpu.h>
35#include <linux/personality.h>
36#include <linux/ptrace.h>
37#include <linux/fs_struct.h>
38#include <linux/file.h>
39#include <linux/mount.h>
40#include <linux/gfp.h>
41#include <linux/syscore_ops.h>
42#include <linux/version.h>
43#include <linux/ctype.h>
44
45#include <linux/compat.h>
46#include <linux/syscalls.h>
47#include <linux/kprobes.h>
48#include <linux/user_namespace.h>
49#include <linux/binfmts.h>
50
51#include <linux/sched.h>
52#include <linux/rcupdate.h>
53#include <linux/uidgid.h>
54#include <linux/cred.h>
55
56#include <linux/kmsg_dump.h>
57/* Move somewhere else to avoid recompiling? */
58#include <generated/utsrelease.h>
59
60#include <asm/uaccess.h>
61#include <asm/io.h>
62#include <asm/unistd.h>
63
64#ifndef SET_UNALIGN_CTL
65# define SET_UNALIGN_CTL(a, b) (-EINVAL)
66#endif
67#ifndef GET_UNALIGN_CTL
68# define GET_UNALIGN_CTL(a, b) (-EINVAL)
69#endif
70#ifndef SET_FPEMU_CTL
71# define SET_FPEMU_CTL(a, b) (-EINVAL)
72#endif
73#ifndef GET_FPEMU_CTL
74# define GET_FPEMU_CTL(a, b) (-EINVAL)
75#endif
76#ifndef SET_FPEXC_CTL
77# define SET_FPEXC_CTL(a, b) (-EINVAL)
78#endif
79#ifndef GET_FPEXC_CTL
80# define GET_FPEXC_CTL(a, b) (-EINVAL)
81#endif
82#ifndef GET_ENDIAN
83# define GET_ENDIAN(a, b) (-EINVAL)
84#endif
85#ifndef SET_ENDIAN
86# define SET_ENDIAN(a, b) (-EINVAL)
87#endif
88#ifndef GET_TSC_CTL
89# define GET_TSC_CTL(a) (-EINVAL)
90#endif
91#ifndef SET_TSC_CTL
92# define SET_TSC_CTL(a) (-EINVAL)
93#endif
94#ifndef MPX_ENABLE_MANAGEMENT
95# define MPX_ENABLE_MANAGEMENT() (-EINVAL)
96#endif
97#ifndef MPX_DISABLE_MANAGEMENT
98# define MPX_DISABLE_MANAGEMENT() (-EINVAL)
99#endif
100#ifndef GET_FP_MODE
101# define GET_FP_MODE(a) (-EINVAL)
102#endif
103#ifndef SET_FP_MODE
104# define SET_FP_MODE(a,b) (-EINVAL)
105#endif
106
107/*
108 * this is where the system-wide overflow UID and GID are defined, for
109 * architectures that now have 32-bit UID/GID but didn't in the past
110 */
111
112int overflowuid = DEFAULT_OVERFLOWUID;
113int overflowgid = DEFAULT_OVERFLOWGID;
114
115EXPORT_SYMBOL(overflowuid);
116EXPORT_SYMBOL(overflowgid);
117
118/*
119 * the same as above, but for filesystems which can only store a 16-bit
120 * UID and GID. as such, this is needed on all architectures
121 */
122
123int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
124int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
125
126EXPORT_SYMBOL(fs_overflowuid);
127EXPORT_SYMBOL(fs_overflowgid);
128
129/*
130 * Returns true if current's euid is same as p's uid or euid,
131 * or has CAP_SYS_NICE to p's user_ns.
132 *
133 * Called with rcu_read_lock, creds are safe
134 */
135static bool set_one_prio_perm(struct task_struct *p)
136{
137 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
138
139 if (uid_eq(pcred->uid, cred->euid) ||
140 uid_eq(pcred->euid, cred->euid))
141 return true;
142 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
143 return true;
144 return false;
145}
146
147/*
148 * set the priority of a task
149 * - the caller must hold the RCU read lock
150 */
151static int set_one_prio(struct task_struct *p, int niceval, int error)
152{
153 int no_nice;
154
155 if (!set_one_prio_perm(p)) {
156 error = -EPERM;
157 goto out;
158 }
159 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
160 error = -EACCES;
161 goto out;
162 }
163 no_nice = security_task_setnice(p, niceval);
164 if (no_nice) {
165 error = no_nice;
166 goto out;
167 }
168 if (error == -ESRCH)
169 error = 0;
170 set_user_nice(p, niceval);
171out:
172 return error;
173}
174
175SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
176{
177 struct task_struct *g, *p;
178 struct user_struct *user;
179 const struct cred *cred = current_cred();
180 int error = -EINVAL;
181 struct pid *pgrp;
182 kuid_t uid;
183
184 if (which > PRIO_USER || which < PRIO_PROCESS)
185 goto out;
186
187 /* normalize: avoid signed division (rounding problems) */
188 error = -ESRCH;
189 if (niceval < MIN_NICE)
190 niceval = MIN_NICE;
191 if (niceval > MAX_NICE)
192 niceval = MAX_NICE;
193
194 rcu_read_lock();
195 read_lock(&tasklist_lock);
196 switch (which) {
197 case PRIO_PROCESS:
198 if (who)
199 p = find_task_by_vpid(who);
200 else
201 p = current;
202 if (p)
203 error = set_one_prio(p, niceval, error);
204 break;
205 case PRIO_PGRP:
206 if (who)
207 pgrp = find_vpid(who);
208 else
209 pgrp = task_pgrp(current);
210 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
211 error = set_one_prio(p, niceval, error);
212 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
213 break;
214 case PRIO_USER:
215 uid = make_kuid(cred->user_ns, who);
216 user = cred->user;
217 if (!who)
218 uid = cred->uid;
219 else if (!uid_eq(uid, cred->uid)) {
220 user = find_user(uid);
221 if (!user)
222 goto out_unlock; /* No processes for this user */
223 }
224 do_each_thread(g, p) {
225 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
226 error = set_one_prio(p, niceval, error);
227 } while_each_thread(g, p);
228 if (!uid_eq(uid, cred->uid))
229 free_uid(user); /* For find_user() */
230 break;
231 }
232out_unlock:
233 read_unlock(&tasklist_lock);
234 rcu_read_unlock();
235out:
236 return error;
237}
238
239/*
240 * Ugh. To avoid negative return values, "getpriority()" will
241 * not return the normal nice-value, but a negated value that
242 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
243 * to stay compatible.
244 */
245SYSCALL_DEFINE2(getpriority, int, which, int, who)
246{
247 struct task_struct *g, *p;
248 struct user_struct *user;
249 const struct cred *cred = current_cred();
250 long niceval, retval = -ESRCH;
251 struct pid *pgrp;
252 kuid_t uid;
253
254 if (which > PRIO_USER || which < PRIO_PROCESS)
255 return -EINVAL;
256
257 rcu_read_lock();
258 read_lock(&tasklist_lock);
259 switch (which) {
260 case PRIO_PROCESS:
261 if (who)
262 p = find_task_by_vpid(who);
263 else
264 p = current;
265 if (p) {
266 niceval = nice_to_rlimit(task_nice(p));
267 if (niceval > retval)
268 retval = niceval;
269 }
270 break;
271 case PRIO_PGRP:
272 if (who)
273 pgrp = find_vpid(who);
274 else
275 pgrp = task_pgrp(current);
276 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
277 niceval = nice_to_rlimit(task_nice(p));
278 if (niceval > retval)
279 retval = niceval;
280 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
281 break;
282 case PRIO_USER:
283 uid = make_kuid(cred->user_ns, who);
284 user = cred->user;
285 if (!who)
286 uid = cred->uid;
287 else if (!uid_eq(uid, cred->uid)) {
288 user = find_user(uid);
289 if (!user)
290 goto out_unlock; /* No processes for this user */
291 }
292 do_each_thread(g, p) {
293 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
294 niceval = nice_to_rlimit(task_nice(p));
295 if (niceval > retval)
296 retval = niceval;
297 }
298 } while_each_thread(g, p);
299 if (!uid_eq(uid, cred->uid))
300 free_uid(user); /* for find_user() */
301 break;
302 }
303out_unlock:
304 read_unlock(&tasklist_lock);
305 rcu_read_unlock();
306
307 return retval;
308}
309
310/*
311 * Unprivileged users may change the real gid to the effective gid
312 * or vice versa. (BSD-style)
313 *
314 * If you set the real gid at all, or set the effective gid to a value not
315 * equal to the real gid, then the saved gid is set to the new effective gid.
316 *
317 * This makes it possible for a setgid program to completely drop its
318 * privileges, which is often a useful assertion to make when you are doing
319 * a security audit over a program.
320 *
321 * The general idea is that a program which uses just setregid() will be
322 * 100% compatible with BSD. A program which uses just setgid() will be
323 * 100% compatible with POSIX with saved IDs.
324 *
325 * SMP: There are not races, the GIDs are checked only by filesystem
326 * operations (as far as semantic preservation is concerned).
327 */
328#ifdef CONFIG_MULTIUSER
329SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
330{
331 struct user_namespace *ns = current_user_ns();
332 const struct cred *old;
333 struct cred *new;
334 int retval;
335 kgid_t krgid, kegid;
336
337 krgid = make_kgid(ns, rgid);
338 kegid = make_kgid(ns, egid);
339
340 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
341 return -EINVAL;
342 if ((egid != (gid_t) -1) && !gid_valid(kegid))
343 return -EINVAL;
344
345 new = prepare_creds();
346 if (!new)
347 return -ENOMEM;
348 old = current_cred();
349
350 retval = -EPERM;
351 if (rgid != (gid_t) -1) {
352 if (gid_eq(old->gid, krgid) ||
353 gid_eq(old->egid, krgid) ||
354 ns_capable(old->user_ns, CAP_SETGID))
355 new->gid = krgid;
356 else
357 goto error;
358 }
359 if (egid != (gid_t) -1) {
360 if (gid_eq(old->gid, kegid) ||
361 gid_eq(old->egid, kegid) ||
362 gid_eq(old->sgid, kegid) ||
363 ns_capable(old->user_ns, CAP_SETGID))
364 new->egid = kegid;
365 else
366 goto error;
367 }
368
369 if (rgid != (gid_t) -1 ||
370 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
371 new->sgid = new->egid;
372 new->fsgid = new->egid;
373
374 return commit_creds(new);
375
376error:
377 abort_creds(new);
378 return retval;
379}
380
381/*
382 * setgid() is implemented like SysV w/ SAVED_IDS
383 *
384 * SMP: Same implicit races as above.
385 */
386SYSCALL_DEFINE1(setgid, gid_t, gid)
387{
388 struct user_namespace *ns = current_user_ns();
389 const struct cred *old;
390 struct cred *new;
391 int retval;
392 kgid_t kgid;
393
394 kgid = make_kgid(ns, gid);
395 if (!gid_valid(kgid))
396 return -EINVAL;
397
398 new = prepare_creds();
399 if (!new)
400 return -ENOMEM;
401 old = current_cred();
402
403 retval = -EPERM;
404 if (ns_capable(old->user_ns, CAP_SETGID))
405 new->gid = new->egid = new->sgid = new->fsgid = kgid;
406 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
407 new->egid = new->fsgid = kgid;
408 else
409 goto error;
410
411 return commit_creds(new);
412
413error:
414 abort_creds(new);
415 return retval;
416}
417
418/*
419 * change the user struct in a credentials set to match the new UID
420 */
421static int set_user(struct cred *new)
422{
423 struct user_struct *new_user;
424
425 new_user = alloc_uid(new->uid);
426 if (!new_user)
427 return -EAGAIN;
428
429 /*
430 * We don't fail in case of NPROC limit excess here because too many
431 * poorly written programs don't check set*uid() return code, assuming
432 * it never fails if called by root. We may still enforce NPROC limit
433 * for programs doing set*uid()+execve() by harmlessly deferring the
434 * failure to the execve() stage.
435 */
436 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
437 new_user != INIT_USER)
438 current->flags |= PF_NPROC_EXCEEDED;
439 else
440 current->flags &= ~PF_NPROC_EXCEEDED;
441
442 free_uid(new->user);
443 new->user = new_user;
444 return 0;
445}
446
447/*
448 * Unprivileged users may change the real uid to the effective uid
449 * or vice versa. (BSD-style)
450 *
451 * If you set the real uid at all, or set the effective uid to a value not
452 * equal to the real uid, then the saved uid is set to the new effective uid.
453 *
454 * This makes it possible for a setuid program to completely drop its
455 * privileges, which is often a useful assertion to make when you are doing
456 * a security audit over a program.
457 *
458 * The general idea is that a program which uses just setreuid() will be
459 * 100% compatible with BSD. A program which uses just setuid() will be
460 * 100% compatible with POSIX with saved IDs.
461 */
462SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
463{
464 struct user_namespace *ns = current_user_ns();
465 const struct cred *old;
466 struct cred *new;
467 int retval;
468 kuid_t kruid, keuid;
469
470 kruid = make_kuid(ns, ruid);
471 keuid = make_kuid(ns, euid);
472
473 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
474 return -EINVAL;
475 if ((euid != (uid_t) -1) && !uid_valid(keuid))
476 return -EINVAL;
477
478 new = prepare_creds();
479 if (!new)
480 return -ENOMEM;
481 old = current_cred();
482
483 retval = -EPERM;
484 if (ruid != (uid_t) -1) {
485 new->uid = kruid;
486 if (!uid_eq(old->uid, kruid) &&
487 !uid_eq(old->euid, kruid) &&
488 !ns_capable(old->user_ns, CAP_SETUID))
489 goto error;
490 }
491
492 if (euid != (uid_t) -1) {
493 new->euid = keuid;
494 if (!uid_eq(old->uid, keuid) &&
495 !uid_eq(old->euid, keuid) &&
496 !uid_eq(old->suid, keuid) &&
497 !ns_capable(old->user_ns, CAP_SETUID))
498 goto error;
499 }
500
501 if (!uid_eq(new->uid, old->uid)) {
502 retval = set_user(new);
503 if (retval < 0)
504 goto error;
505 }
506 if (ruid != (uid_t) -1 ||
507 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
508 new->suid = new->euid;
509 new->fsuid = new->euid;
510
511 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
512 if (retval < 0)
513 goto error;
514
515 return commit_creds(new);
516
517error:
518 abort_creds(new);
519 return retval;
520}
521
522/*
523 * setuid() is implemented like SysV with SAVED_IDS
524 *
525 * Note that SAVED_ID's is deficient in that a setuid root program
526 * like sendmail, for example, cannot set its uid to be a normal
527 * user and then switch back, because if you're root, setuid() sets
528 * the saved uid too. If you don't like this, blame the bright people
529 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
530 * will allow a root program to temporarily drop privileges and be able to
531 * regain them by swapping the real and effective uid.
532 */
533SYSCALL_DEFINE1(setuid, uid_t, uid)
534{
535 struct user_namespace *ns = current_user_ns();
536 const struct cred *old;
537 struct cred *new;
538 int retval;
539 kuid_t kuid;
540
541 kuid = make_kuid(ns, uid);
542 if (!uid_valid(kuid))
543 return -EINVAL;
544
545 new = prepare_creds();
546 if (!new)
547 return -ENOMEM;
548 old = current_cred();
549
550 retval = -EPERM;
551 if (ns_capable(old->user_ns, CAP_SETUID)) {
552 new->suid = new->uid = kuid;
553 if (!uid_eq(kuid, old->uid)) {
554 retval = set_user(new);
555 if (retval < 0)
556 goto error;
557 }
558 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
559 goto error;
560 }
561
562 new->fsuid = new->euid = kuid;
563
564 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
565 if (retval < 0)
566 goto error;
567
568 return commit_creds(new);
569
570error:
571 abort_creds(new);
572 return retval;
573}
574
575
576/*
577 * This function implements a generic ability to update ruid, euid,
578 * and suid. This allows you to implement the 4.4 compatible seteuid().
579 */
580SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
581{
582 struct user_namespace *ns = current_user_ns();
583 const struct cred *old;
584 struct cred *new;
585 int retval;
586 kuid_t kruid, keuid, ksuid;
587
588 kruid = make_kuid(ns, ruid);
589 keuid = make_kuid(ns, euid);
590 ksuid = make_kuid(ns, suid);
591
592 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
593 return -EINVAL;
594
595 if ((euid != (uid_t) -1) && !uid_valid(keuid))
596 return -EINVAL;
597
598 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
599 return -EINVAL;
600
601 new = prepare_creds();
602 if (!new)
603 return -ENOMEM;
604
605 old = current_cred();
606
607 retval = -EPERM;
608 if (!ns_capable(old->user_ns, CAP_SETUID)) {
609 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
610 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
611 goto error;
612 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
613 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
614 goto error;
615 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
616 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
617 goto error;
618 }
619
620 if (ruid != (uid_t) -1) {
621 new->uid = kruid;
622 if (!uid_eq(kruid, old->uid)) {
623 retval = set_user(new);
624 if (retval < 0)
625 goto error;
626 }
627 }
628 if (euid != (uid_t) -1)
629 new->euid = keuid;
630 if (suid != (uid_t) -1)
631 new->suid = ksuid;
632 new->fsuid = new->euid;
633
634 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
635 if (retval < 0)
636 goto error;
637
638 return commit_creds(new);
639
640error:
641 abort_creds(new);
642 return retval;
643}
644
645SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
646{
647 const struct cred *cred = current_cred();
648 int retval;
649 uid_t ruid, euid, suid;
650
651 ruid = from_kuid_munged(cred->user_ns, cred->uid);
652 euid = from_kuid_munged(cred->user_ns, cred->euid);
653 suid = from_kuid_munged(cred->user_ns, cred->suid);
654
655 retval = put_user(ruid, ruidp);
656 if (!retval) {
657 retval = put_user(euid, euidp);
658 if (!retval)
659 return put_user(suid, suidp);
660 }
661 return retval;
662}
663
664/*
665 * Same as above, but for rgid, egid, sgid.
666 */
667SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
668{
669 struct user_namespace *ns = current_user_ns();
670 const struct cred *old;
671 struct cred *new;
672 int retval;
673 kgid_t krgid, kegid, ksgid;
674
675 krgid = make_kgid(ns, rgid);
676 kegid = make_kgid(ns, egid);
677 ksgid = make_kgid(ns, sgid);
678
679 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
680 return -EINVAL;
681 if ((egid != (gid_t) -1) && !gid_valid(kegid))
682 return -EINVAL;
683 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
684 return -EINVAL;
685
686 new = prepare_creds();
687 if (!new)
688 return -ENOMEM;
689 old = current_cred();
690
691 retval = -EPERM;
692 if (!ns_capable(old->user_ns, CAP_SETGID)) {
693 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
694 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
695 goto error;
696 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
697 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
698 goto error;
699 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
700 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
701 goto error;
702 }
703
704 if (rgid != (gid_t) -1)
705 new->gid = krgid;
706 if (egid != (gid_t) -1)
707 new->egid = kegid;
708 if (sgid != (gid_t) -1)
709 new->sgid = ksgid;
710 new->fsgid = new->egid;
711
712 return commit_creds(new);
713
714error:
715 abort_creds(new);
716 return retval;
717}
718
719SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
720{
721 const struct cred *cred = current_cred();
722 int retval;
723 gid_t rgid, egid, sgid;
724
725 rgid = from_kgid_munged(cred->user_ns, cred->gid);
726 egid = from_kgid_munged(cred->user_ns, cred->egid);
727 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
728
729 retval = put_user(rgid, rgidp);
730 if (!retval) {
731 retval = put_user(egid, egidp);
732 if (!retval)
733 retval = put_user(sgid, sgidp);
734 }
735
736 return retval;
737}
738
739
740/*
741 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
742 * is used for "access()" and for the NFS daemon (letting nfsd stay at
743 * whatever uid it wants to). It normally shadows "euid", except when
744 * explicitly set by setfsuid() or for access..
745 */
746SYSCALL_DEFINE1(setfsuid, uid_t, uid)
747{
748 const struct cred *old;
749 struct cred *new;
750 uid_t old_fsuid;
751 kuid_t kuid;
752
753 old = current_cred();
754 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
755
756 kuid = make_kuid(old->user_ns, uid);
757 if (!uid_valid(kuid))
758 return old_fsuid;
759
760 new = prepare_creds();
761 if (!new)
762 return old_fsuid;
763
764 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
765 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
766 ns_capable(old->user_ns, CAP_SETUID)) {
767 if (!uid_eq(kuid, old->fsuid)) {
768 new->fsuid = kuid;
769 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
770 goto change_okay;
771 }
772 }
773
774 abort_creds(new);
775 return old_fsuid;
776
777change_okay:
778 commit_creds(new);
779 return old_fsuid;
780}
781
782/*
783 * Samma på svenska..
784 */
785SYSCALL_DEFINE1(setfsgid, gid_t, gid)
786{
787 const struct cred *old;
788 struct cred *new;
789 gid_t old_fsgid;
790 kgid_t kgid;
791
792 old = current_cred();
793 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
794
795 kgid = make_kgid(old->user_ns, gid);
796 if (!gid_valid(kgid))
797 return old_fsgid;
798
799 new = prepare_creds();
800 if (!new)
801 return old_fsgid;
802
803 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
804 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
805 ns_capable(old->user_ns, CAP_SETGID)) {
806 if (!gid_eq(kgid, old->fsgid)) {
807 new->fsgid = kgid;
808 goto change_okay;
809 }
810 }
811
812 abort_creds(new);
813 return old_fsgid;
814
815change_okay:
816 commit_creds(new);
817 return old_fsgid;
818}
819#endif /* CONFIG_MULTIUSER */
820
821/**
822 * sys_getpid - return the thread group id of the current process
823 *
824 * Note, despite the name, this returns the tgid not the pid. The tgid and
825 * the pid are identical unless CLONE_THREAD was specified on clone() in
826 * which case the tgid is the same in all threads of the same group.
827 *
828 * This is SMP safe as current->tgid does not change.
829 */
830SYSCALL_DEFINE0(getpid)
831{
832 return task_tgid_vnr(current);
833}
834
835/* Thread ID - the internal kernel "pid" */
836SYSCALL_DEFINE0(gettid)
837{
838 return task_pid_vnr(current);
839}
840
841/*
842 * Accessing ->real_parent is not SMP-safe, it could
843 * change from under us. However, we can use a stale
844 * value of ->real_parent under rcu_read_lock(), see
845 * release_task()->call_rcu(delayed_put_task_struct).
846 */
847SYSCALL_DEFINE0(getppid)
848{
849 int pid;
850
851 rcu_read_lock();
852 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
853 rcu_read_unlock();
854
855 return pid;
856}
857
858SYSCALL_DEFINE0(getuid)
859{
860 /* Only we change this so SMP safe */
861 return from_kuid_munged(current_user_ns(), current_uid());
862}
863
864SYSCALL_DEFINE0(geteuid)
865{
866 /* Only we change this so SMP safe */
867 return from_kuid_munged(current_user_ns(), current_euid());
868}
869
870SYSCALL_DEFINE0(getgid)
871{
872 /* Only we change this so SMP safe */
873 return from_kgid_munged(current_user_ns(), current_gid());
874}
875
876SYSCALL_DEFINE0(getegid)
877{
878 /* Only we change this so SMP safe */
879 return from_kgid_munged(current_user_ns(), current_egid());
880}
881
882void do_sys_times(struct tms *tms)
883{
884 cputime_t tgutime, tgstime, cutime, cstime;
885
886 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
887 cutime = current->signal->cutime;
888 cstime = current->signal->cstime;
889 tms->tms_utime = cputime_to_clock_t(tgutime);
890 tms->tms_stime = cputime_to_clock_t(tgstime);
891 tms->tms_cutime = cputime_to_clock_t(cutime);
892 tms->tms_cstime = cputime_to_clock_t(cstime);
893}
894
895SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
896{
897 if (tbuf) {
898 struct tms tmp;
899
900 do_sys_times(&tmp);
901 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
902 return -EFAULT;
903 }
904 force_successful_syscall_return();
905 return (long) jiffies_64_to_clock_t(get_jiffies_64());
906}
907
908/*
909 * This needs some heavy checking ...
910 * I just haven't the stomach for it. I also don't fully
911 * understand sessions/pgrp etc. Let somebody who does explain it.
912 *
913 * OK, I think I have the protection semantics right.... this is really
914 * only important on a multi-user system anyway, to make sure one user
915 * can't send a signal to a process owned by another. -TYT, 12/12/91
916 *
917 * !PF_FORKNOEXEC check to conform completely to POSIX.
918 */
919SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
920{
921 struct task_struct *p;
922 struct task_struct *group_leader = current->group_leader;
923 struct pid *pgrp;
924 int err;
925
926 if (!pid)
927 pid = task_pid_vnr(group_leader);
928 if (!pgid)
929 pgid = pid;
930 if (pgid < 0)
931 return -EINVAL;
932 rcu_read_lock();
933
934 /* From this point forward we keep holding onto the tasklist lock
935 * so that our parent does not change from under us. -DaveM
936 */
937 write_lock_irq(&tasklist_lock);
938
939 err = -ESRCH;
940 p = find_task_by_vpid(pid);
941 if (!p)
942 goto out;
943
944 err = -EINVAL;
945 if (!thread_group_leader(p))
946 goto out;
947
948 if (same_thread_group(p->real_parent, group_leader)) {
949 err = -EPERM;
950 if (task_session(p) != task_session(group_leader))
951 goto out;
952 err = -EACCES;
953 if (!(p->flags & PF_FORKNOEXEC))
954 goto out;
955 } else {
956 err = -ESRCH;
957 if (p != group_leader)
958 goto out;
959 }
960
961 err = -EPERM;
962 if (p->signal->leader)
963 goto out;
964
965 pgrp = task_pid(p);
966 if (pgid != pid) {
967 struct task_struct *g;
968
969 pgrp = find_vpid(pgid);
970 g = pid_task(pgrp, PIDTYPE_PGID);
971 if (!g || task_session(g) != task_session(group_leader))
972 goto out;
973 }
974
975 err = security_task_setpgid(p, pgid);
976 if (err)
977 goto out;
978
979 if (task_pgrp(p) != pgrp)
980 change_pid(p, PIDTYPE_PGID, pgrp);
981
982 err = 0;
983out:
984 /* All paths lead to here, thus we are safe. -DaveM */
985 write_unlock_irq(&tasklist_lock);
986 rcu_read_unlock();
987 return err;
988}
989
990SYSCALL_DEFINE1(getpgid, pid_t, pid)
991{
992 struct task_struct *p;
993 struct pid *grp;
994 int retval;
995
996 rcu_read_lock();
997 if (!pid)
998 grp = task_pgrp(current);
999 else {
1000 retval = -ESRCH;
1001 p = find_task_by_vpid(pid);
1002 if (!p)
1003 goto out;
1004 grp = task_pgrp(p);
1005 if (!grp)
1006 goto out;
1007
1008 retval = security_task_getpgid(p);
1009 if (retval)
1010 goto out;
1011 }
1012 retval = pid_vnr(grp);
1013out:
1014 rcu_read_unlock();
1015 return retval;
1016}
1017
1018#ifdef __ARCH_WANT_SYS_GETPGRP
1019
1020SYSCALL_DEFINE0(getpgrp)
1021{
1022 return sys_getpgid(0);
1023}
1024
1025#endif
1026
1027SYSCALL_DEFINE1(getsid, pid_t, pid)
1028{
1029 struct task_struct *p;
1030 struct pid *sid;
1031 int retval;
1032
1033 rcu_read_lock();
1034 if (!pid)
1035 sid = task_session(current);
1036 else {
1037 retval = -ESRCH;
1038 p = find_task_by_vpid(pid);
1039 if (!p)
1040 goto out;
1041 sid = task_session(p);
1042 if (!sid)
1043 goto out;
1044
1045 retval = security_task_getsid(p);
1046 if (retval)
1047 goto out;
1048 }
1049 retval = pid_vnr(sid);
1050out:
1051 rcu_read_unlock();
1052 return retval;
1053}
1054
1055static void set_special_pids(struct pid *pid)
1056{
1057 struct task_struct *curr = current->group_leader;
1058
1059 if (task_session(curr) != pid)
1060 change_pid(curr, PIDTYPE_SID, pid);
1061
1062 if (task_pgrp(curr) != pid)
1063 change_pid(curr, PIDTYPE_PGID, pid);
1064}
1065
1066SYSCALL_DEFINE0(setsid)
1067{
1068 struct task_struct *group_leader = current->group_leader;
1069 struct pid *sid = task_pid(group_leader);
1070 pid_t session = pid_vnr(sid);
1071 int err = -EPERM;
1072
1073 write_lock_irq(&tasklist_lock);
1074 /* Fail if I am already a session leader */
1075 if (group_leader->signal->leader)
1076 goto out;
1077
1078 /* Fail if a process group id already exists that equals the
1079 * proposed session id.
1080 */
1081 if (pid_task(sid, PIDTYPE_PGID))
1082 goto out;
1083
1084 group_leader->signal->leader = 1;
1085 set_special_pids(sid);
1086
1087 proc_clear_tty(group_leader);
1088
1089 err = session;
1090out:
1091 write_unlock_irq(&tasklist_lock);
1092 if (err > 0) {
1093 proc_sid_connector(group_leader);
1094 sched_autogroup_create_attach(group_leader);
1095 }
1096 return err;
1097}
1098
1099DECLARE_RWSEM(uts_sem);
1100
1101#ifdef COMPAT_UTS_MACHINE
1102#define override_architecture(name) \
1103 (personality(current->personality) == PER_LINUX32 && \
1104 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1105 sizeof(COMPAT_UTS_MACHINE)))
1106#else
1107#define override_architecture(name) 0
1108#endif
1109
1110/*
1111 * Work around broken programs that cannot handle "Linux 3.0".
1112 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1113 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
1114 */
1115static int override_release(char __user *release, size_t len)
1116{
1117 int ret = 0;
1118
1119 if (current->personality & UNAME26) {
1120 const char *rest = UTS_RELEASE;
1121 char buf[65] = { 0 };
1122 int ndots = 0;
1123 unsigned v;
1124 size_t copy;
1125
1126 while (*rest) {
1127 if (*rest == '.' && ++ndots >= 3)
1128 break;
1129 if (!isdigit(*rest) && *rest != '.')
1130 break;
1131 rest++;
1132 }
1133 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1134 copy = clamp_t(size_t, len, 1, sizeof(buf));
1135 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1136 ret = copy_to_user(release, buf, copy + 1);
1137 }
1138 return ret;
1139}
1140
1141SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1142{
1143 int errno = 0;
1144
1145 down_read(&uts_sem);
1146 if (copy_to_user(name, utsname(), sizeof *name))
1147 errno = -EFAULT;
1148 up_read(&uts_sem);
1149
1150 if (!errno && override_release(name->release, sizeof(name->release)))
1151 errno = -EFAULT;
1152 if (!errno && override_architecture(name))
1153 errno = -EFAULT;
1154 return errno;
1155}
1156
1157#ifdef __ARCH_WANT_SYS_OLD_UNAME
1158/*
1159 * Old cruft
1160 */
1161SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1162{
1163 int error = 0;
1164
1165 if (!name)
1166 return -EFAULT;
1167
1168 down_read(&uts_sem);
1169 if (copy_to_user(name, utsname(), sizeof(*name)))
1170 error = -EFAULT;
1171 up_read(&uts_sem);
1172
1173 if (!error && override_release(name->release, sizeof(name->release)))
1174 error = -EFAULT;
1175 if (!error && override_architecture(name))
1176 error = -EFAULT;
1177 return error;
1178}
1179
1180SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1181{
1182 int error;
1183
1184 if (!name)
1185 return -EFAULT;
1186 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1187 return -EFAULT;
1188
1189 down_read(&uts_sem);
1190 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1191 __OLD_UTS_LEN);
1192 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1193 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1194 __OLD_UTS_LEN);
1195 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1196 error |= __copy_to_user(&name->release, &utsname()->release,
1197 __OLD_UTS_LEN);
1198 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1199 error |= __copy_to_user(&name->version, &utsname()->version,
1200 __OLD_UTS_LEN);
1201 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1202 error |= __copy_to_user(&name->machine, &utsname()->machine,
1203 __OLD_UTS_LEN);
1204 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1205 up_read(&uts_sem);
1206
1207 if (!error && override_architecture(name))
1208 error = -EFAULT;
1209 if (!error && override_release(name->release, sizeof(name->release)))
1210 error = -EFAULT;
1211 return error ? -EFAULT : 0;
1212}
1213#endif
1214
1215SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1216{
1217 int errno;
1218 char tmp[__NEW_UTS_LEN];
1219
1220 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1221 return -EPERM;
1222
1223 if (len < 0 || len > __NEW_UTS_LEN)
1224 return -EINVAL;
1225 down_write(&uts_sem);
1226 errno = -EFAULT;
1227 if (!copy_from_user(tmp, name, len)) {
1228 struct new_utsname *u = utsname();
1229
1230 memcpy(u->nodename, tmp, len);
1231 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1232 errno = 0;
1233 uts_proc_notify(UTS_PROC_HOSTNAME);
1234 }
1235 up_write(&uts_sem);
1236 return errno;
1237}
1238
1239#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1240
1241SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1242{
1243 int i, errno;
1244 struct new_utsname *u;
1245
1246 if (len < 0)
1247 return -EINVAL;
1248 down_read(&uts_sem);
1249 u = utsname();
1250 i = 1 + strlen(u->nodename);
1251 if (i > len)
1252 i = len;
1253 errno = 0;
1254 if (copy_to_user(name, u->nodename, i))
1255 errno = -EFAULT;
1256 up_read(&uts_sem);
1257 return errno;
1258}
1259
1260#endif
1261
1262/*
1263 * Only setdomainname; getdomainname can be implemented by calling
1264 * uname()
1265 */
1266SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1267{
1268 int errno;
1269 char tmp[__NEW_UTS_LEN];
1270
1271 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1272 return -EPERM;
1273 if (len < 0 || len > __NEW_UTS_LEN)
1274 return -EINVAL;
1275
1276 down_write(&uts_sem);
1277 errno = -EFAULT;
1278 if (!copy_from_user(tmp, name, len)) {
1279 struct new_utsname *u = utsname();
1280
1281 memcpy(u->domainname, tmp, len);
1282 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1283 errno = 0;
1284 uts_proc_notify(UTS_PROC_DOMAINNAME);
1285 }
1286 up_write(&uts_sem);
1287 return errno;
1288}
1289
1290SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1291{
1292 struct rlimit value;
1293 int ret;
1294
1295 ret = do_prlimit(current, resource, NULL, &value);
1296 if (!ret)
1297 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1298
1299 return ret;
1300}
1301
1302#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1303
1304/*
1305 * Back compatibility for getrlimit. Needed for some apps.
1306 */
1307SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1308 struct rlimit __user *, rlim)
1309{
1310 struct rlimit x;
1311 if (resource >= RLIM_NLIMITS)
1312 return -EINVAL;
1313
1314 task_lock(current->group_leader);
1315 x = current->signal->rlim[resource];
1316 task_unlock(current->group_leader);
1317 if (x.rlim_cur > 0x7FFFFFFF)
1318 x.rlim_cur = 0x7FFFFFFF;
1319 if (x.rlim_max > 0x7FFFFFFF)
1320 x.rlim_max = 0x7FFFFFFF;
1321 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1322}
1323
1324#endif
1325
1326static inline bool rlim64_is_infinity(__u64 rlim64)
1327{
1328#if BITS_PER_LONG < 64
1329 return rlim64 >= ULONG_MAX;
1330#else
1331 return rlim64 == RLIM64_INFINITY;
1332#endif
1333}
1334
1335static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1336{
1337 if (rlim->rlim_cur == RLIM_INFINITY)
1338 rlim64->rlim_cur = RLIM64_INFINITY;
1339 else
1340 rlim64->rlim_cur = rlim->rlim_cur;
1341 if (rlim->rlim_max == RLIM_INFINITY)
1342 rlim64->rlim_max = RLIM64_INFINITY;
1343 else
1344 rlim64->rlim_max = rlim->rlim_max;
1345}
1346
1347static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1348{
1349 if (rlim64_is_infinity(rlim64->rlim_cur))
1350 rlim->rlim_cur = RLIM_INFINITY;
1351 else
1352 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1353 if (rlim64_is_infinity(rlim64->rlim_max))
1354 rlim->rlim_max = RLIM_INFINITY;
1355 else
1356 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1357}
1358
1359/* make sure you are allowed to change @tsk limits before calling this */
1360int do_prlimit(struct task_struct *tsk, unsigned int resource,
1361 struct rlimit *new_rlim, struct rlimit *old_rlim)
1362{
1363 struct rlimit *rlim;
1364 int retval = 0;
1365
1366 if (resource >= RLIM_NLIMITS)
1367 return -EINVAL;
1368 if (new_rlim) {
1369 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1370 return -EINVAL;
1371 if (resource == RLIMIT_NOFILE &&
1372 new_rlim->rlim_max > sysctl_nr_open)
1373 return -EPERM;
1374 }
1375
1376 /* protect tsk->signal and tsk->sighand from disappearing */
1377 read_lock(&tasklist_lock);
1378 if (!tsk->sighand) {
1379 retval = -ESRCH;
1380 goto out;
1381 }
1382
1383 rlim = tsk->signal->rlim + resource;
1384 task_lock(tsk->group_leader);
1385 if (new_rlim) {
1386 /* Keep the capable check against init_user_ns until
1387 cgroups can contain all limits */
1388 if (new_rlim->rlim_max > rlim->rlim_max &&
1389 !capable(CAP_SYS_RESOURCE))
1390 retval = -EPERM;
1391 if (!retval)
1392 retval = security_task_setrlimit(tsk->group_leader,
1393 resource, new_rlim);
1394 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1395 /*
1396 * The caller is asking for an immediate RLIMIT_CPU
1397 * expiry. But we use the zero value to mean "it was
1398 * never set". So let's cheat and make it one second
1399 * instead
1400 */
1401 new_rlim->rlim_cur = 1;
1402 }
1403 }
1404 if (!retval) {
1405 if (old_rlim)
1406 *old_rlim = *rlim;
1407 if (new_rlim)
1408 *rlim = *new_rlim;
1409 }
1410 task_unlock(tsk->group_leader);
1411
1412 /*
1413 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1414 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1415 * very long-standing error, and fixing it now risks breakage of
1416 * applications, so we live with it
1417 */
1418 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1419 new_rlim->rlim_cur != RLIM_INFINITY)
1420 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1421out:
1422 read_unlock(&tasklist_lock);
1423 return retval;
1424}
1425
1426/* rcu lock must be held */
1427static int check_prlimit_permission(struct task_struct *task)
1428{
1429 const struct cred *cred = current_cred(), *tcred;
1430
1431 if (current == task)
1432 return 0;
1433
1434 tcred = __task_cred(task);
1435 if (uid_eq(cred->uid, tcred->euid) &&
1436 uid_eq(cred->uid, tcred->suid) &&
1437 uid_eq(cred->uid, tcred->uid) &&
1438 gid_eq(cred->gid, tcred->egid) &&
1439 gid_eq(cred->gid, tcred->sgid) &&
1440 gid_eq(cred->gid, tcred->gid))
1441 return 0;
1442 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1443 return 0;
1444
1445 return -EPERM;
1446}
1447
1448SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1449 const struct rlimit64 __user *, new_rlim,
1450 struct rlimit64 __user *, old_rlim)
1451{
1452 struct rlimit64 old64, new64;
1453 struct rlimit old, new;
1454 struct task_struct *tsk;
1455 int ret;
1456
1457 if (new_rlim) {
1458 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1459 return -EFAULT;
1460 rlim64_to_rlim(&new64, &new);
1461 }
1462
1463 rcu_read_lock();
1464 tsk = pid ? find_task_by_vpid(pid) : current;
1465 if (!tsk) {
1466 rcu_read_unlock();
1467 return -ESRCH;
1468 }
1469 ret = check_prlimit_permission(tsk);
1470 if (ret) {
1471 rcu_read_unlock();
1472 return ret;
1473 }
1474 get_task_struct(tsk);
1475 rcu_read_unlock();
1476
1477 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1478 old_rlim ? &old : NULL);
1479
1480 if (!ret && old_rlim) {
1481 rlim_to_rlim64(&old, &old64);
1482 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1483 ret = -EFAULT;
1484 }
1485
1486 put_task_struct(tsk);
1487 return ret;
1488}
1489
1490SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1491{
1492 struct rlimit new_rlim;
1493
1494 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1495 return -EFAULT;
1496 return do_prlimit(current, resource, &new_rlim, NULL);
1497}
1498
1499/*
1500 * It would make sense to put struct rusage in the task_struct,
1501 * except that would make the task_struct be *really big*. After
1502 * task_struct gets moved into malloc'ed memory, it would
1503 * make sense to do this. It will make moving the rest of the information
1504 * a lot simpler! (Which we're not doing right now because we're not
1505 * measuring them yet).
1506 *
1507 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1508 * races with threads incrementing their own counters. But since word
1509 * reads are atomic, we either get new values or old values and we don't
1510 * care which for the sums. We always take the siglock to protect reading
1511 * the c* fields from p->signal from races with exit.c updating those
1512 * fields when reaping, so a sample either gets all the additions of a
1513 * given child after it's reaped, or none so this sample is before reaping.
1514 *
1515 * Locking:
1516 * We need to take the siglock for CHILDEREN, SELF and BOTH
1517 * for the cases current multithreaded, non-current single threaded
1518 * non-current multithreaded. Thread traversal is now safe with
1519 * the siglock held.
1520 * Strictly speaking, we donot need to take the siglock if we are current and
1521 * single threaded, as no one else can take our signal_struct away, no one
1522 * else can reap the children to update signal->c* counters, and no one else
1523 * can race with the signal-> fields. If we do not take any lock, the
1524 * signal-> fields could be read out of order while another thread was just
1525 * exiting. So we should place a read memory barrier when we avoid the lock.
1526 * On the writer side, write memory barrier is implied in __exit_signal
1527 * as __exit_signal releases the siglock spinlock after updating the signal->
1528 * fields. But we don't do this yet to keep things simple.
1529 *
1530 */
1531
1532static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1533{
1534 r->ru_nvcsw += t->nvcsw;
1535 r->ru_nivcsw += t->nivcsw;
1536 r->ru_minflt += t->min_flt;
1537 r->ru_majflt += t->maj_flt;
1538 r->ru_inblock += task_io_get_inblock(t);
1539 r->ru_oublock += task_io_get_oublock(t);
1540}
1541
1542static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1543{
1544 struct task_struct *t;
1545 unsigned long flags;
1546 cputime_t tgutime, tgstime, utime, stime;
1547 unsigned long maxrss = 0;
1548
1549 memset((char *)r, 0, sizeof (*r));
1550 utime = stime = 0;
1551
1552 if (who == RUSAGE_THREAD) {
1553 task_cputime_adjusted(current, &utime, &stime);
1554 accumulate_thread_rusage(p, r);
1555 maxrss = p->signal->maxrss;
1556 goto out;
1557 }
1558
1559 if (!lock_task_sighand(p, &flags))
1560 return;
1561
1562 switch (who) {
1563 case RUSAGE_BOTH:
1564 case RUSAGE_CHILDREN:
1565 utime = p->signal->cutime;
1566 stime = p->signal->cstime;
1567 r->ru_nvcsw = p->signal->cnvcsw;
1568 r->ru_nivcsw = p->signal->cnivcsw;
1569 r->ru_minflt = p->signal->cmin_flt;
1570 r->ru_majflt = p->signal->cmaj_flt;
1571 r->ru_inblock = p->signal->cinblock;
1572 r->ru_oublock = p->signal->coublock;
1573 maxrss = p->signal->cmaxrss;
1574
1575 if (who == RUSAGE_CHILDREN)
1576 break;
1577
1578 case RUSAGE_SELF:
1579 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1580 utime += tgutime;
1581 stime += tgstime;
1582 r->ru_nvcsw += p->signal->nvcsw;
1583 r->ru_nivcsw += p->signal->nivcsw;
1584 r->ru_minflt += p->signal->min_flt;
1585 r->ru_majflt += p->signal->maj_flt;
1586 r->ru_inblock += p->signal->inblock;
1587 r->ru_oublock += p->signal->oublock;
1588 if (maxrss < p->signal->maxrss)
1589 maxrss = p->signal->maxrss;
1590 t = p;
1591 do {
1592 accumulate_thread_rusage(t, r);
1593 } while_each_thread(p, t);
1594 break;
1595
1596 default:
1597 BUG();
1598 }
1599 unlock_task_sighand(p, &flags);
1600
1601out:
1602 cputime_to_timeval(utime, &r->ru_utime);
1603 cputime_to_timeval(stime, &r->ru_stime);
1604
1605 if (who != RUSAGE_CHILDREN) {
1606 struct mm_struct *mm = get_task_mm(p);
1607
1608 if (mm) {
1609 setmax_mm_hiwater_rss(&maxrss, mm);
1610 mmput(mm);
1611 }
1612 }
1613 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1614}
1615
1616int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1617{
1618 struct rusage r;
1619
1620 k_getrusage(p, who, &r);
1621 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1622}
1623
1624SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1625{
1626 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1627 who != RUSAGE_THREAD)
1628 return -EINVAL;
1629 return getrusage(current, who, ru);
1630}
1631
1632#ifdef CONFIG_COMPAT
1633COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1634{
1635 struct rusage r;
1636
1637 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1638 who != RUSAGE_THREAD)
1639 return -EINVAL;
1640
1641 k_getrusage(current, who, &r);
1642 return put_compat_rusage(&r, ru);
1643}
1644#endif
1645
1646SYSCALL_DEFINE1(umask, int, mask)
1647{
1648 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1649 return mask;
1650}
1651
1652static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1653{
1654 struct fd exe;
1655 struct file *old_exe, *exe_file;
1656 struct inode *inode;
1657 int err;
1658
1659 exe = fdget(fd);
1660 if (!exe.file)
1661 return -EBADF;
1662
1663 inode = file_inode(exe.file);
1664
1665 /*
1666 * Because the original mm->exe_file points to executable file, make
1667 * sure that this one is executable as well, to avoid breaking an
1668 * overall picture.
1669 */
1670 err = -EACCES;
1671 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1672 goto exit;
1673
1674 err = inode_permission(inode, MAY_EXEC);
1675 if (err)
1676 goto exit;
1677
1678 /*
1679 * Forbid mm->exe_file change if old file still mapped.
1680 */
1681 exe_file = get_mm_exe_file(mm);
1682 err = -EBUSY;
1683 if (exe_file) {
1684 struct vm_area_struct *vma;
1685
1686 down_read(&mm->mmap_sem);
1687 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1688 if (!vma->vm_file)
1689 continue;
1690 if (path_equal(&vma->vm_file->f_path,
1691 &exe_file->f_path))
1692 goto exit_err;
1693 }
1694
1695 up_read(&mm->mmap_sem);
1696 fput(exe_file);
1697 }
1698
1699 /*
1700 * The symlink can be changed only once, just to disallow arbitrary
1701 * transitions malicious software might bring in. This means one
1702 * could make a snapshot over all processes running and monitor
1703 * /proc/pid/exe changes to notice unusual activity if needed.
1704 */
1705 err = -EPERM;
1706 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1707 goto exit;
1708
1709 err = 0;
1710 /* set the new file, lockless */
1711 get_file(exe.file);
1712 old_exe = xchg(&mm->exe_file, exe.file);
1713 if (old_exe)
1714 fput(old_exe);
1715exit:
1716 fdput(exe);
1717 return err;
1718exit_err:
1719 up_read(&mm->mmap_sem);
1720 fput(exe_file);
1721 goto exit;
1722}
1723
1724/*
1725 * WARNING: we don't require any capability here so be very careful
1726 * in what is allowed for modification from userspace.
1727 */
1728static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1729{
1730 unsigned long mmap_max_addr = TASK_SIZE;
1731 struct mm_struct *mm = current->mm;
1732 int error = -EINVAL, i;
1733
1734 static const unsigned char offsets[] = {
1735 offsetof(struct prctl_mm_map, start_code),
1736 offsetof(struct prctl_mm_map, end_code),
1737 offsetof(struct prctl_mm_map, start_data),
1738 offsetof(struct prctl_mm_map, end_data),
1739 offsetof(struct prctl_mm_map, start_brk),
1740 offsetof(struct prctl_mm_map, brk),
1741 offsetof(struct prctl_mm_map, start_stack),
1742 offsetof(struct prctl_mm_map, arg_start),
1743 offsetof(struct prctl_mm_map, arg_end),
1744 offsetof(struct prctl_mm_map, env_start),
1745 offsetof(struct prctl_mm_map, env_end),
1746 };
1747
1748 /*
1749 * Make sure the members are not somewhere outside
1750 * of allowed address space.
1751 */
1752 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1753 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1754
1755 if ((unsigned long)val >= mmap_max_addr ||
1756 (unsigned long)val < mmap_min_addr)
1757 goto out;
1758 }
1759
1760 /*
1761 * Make sure the pairs are ordered.
1762 */
1763#define __prctl_check_order(__m1, __op, __m2) \
1764 ((unsigned long)prctl_map->__m1 __op \
1765 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1766 error = __prctl_check_order(start_code, <, end_code);
1767 error |= __prctl_check_order(start_data, <, end_data);
1768 error |= __prctl_check_order(start_brk, <=, brk);
1769 error |= __prctl_check_order(arg_start, <=, arg_end);
1770 error |= __prctl_check_order(env_start, <=, env_end);
1771 if (error)
1772 goto out;
1773#undef __prctl_check_order
1774
1775 error = -EINVAL;
1776
1777 /*
1778 * @brk should be after @end_data in traditional maps.
1779 */
1780 if (prctl_map->start_brk <= prctl_map->end_data ||
1781 prctl_map->brk <= prctl_map->end_data)
1782 goto out;
1783
1784 /*
1785 * Neither we should allow to override limits if they set.
1786 */
1787 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1788 prctl_map->start_brk, prctl_map->end_data,
1789 prctl_map->start_data))
1790 goto out;
1791
1792 /*
1793 * Someone is trying to cheat the auxv vector.
1794 */
1795 if (prctl_map->auxv_size) {
1796 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1797 goto out;
1798 }
1799
1800 /*
1801 * Finally, make sure the caller has the rights to
1802 * change /proc/pid/exe link: only local root should
1803 * be allowed to.
1804 */
1805 if (prctl_map->exe_fd != (u32)-1) {
1806 struct user_namespace *ns = current_user_ns();
1807 const struct cred *cred = current_cred();
1808
1809 if (!uid_eq(cred->uid, make_kuid(ns, 0)) ||
1810 !gid_eq(cred->gid, make_kgid(ns, 0)))
1811 goto out;
1812 }
1813
1814 error = 0;
1815out:
1816 return error;
1817}
1818
1819#ifdef CONFIG_CHECKPOINT_RESTORE
1820static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1821{
1822 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1823 unsigned long user_auxv[AT_VECTOR_SIZE];
1824 struct mm_struct *mm = current->mm;
1825 int error;
1826
1827 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1828 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1829
1830 if (opt == PR_SET_MM_MAP_SIZE)
1831 return put_user((unsigned int)sizeof(prctl_map),
1832 (unsigned int __user *)addr);
1833
1834 if (data_size != sizeof(prctl_map))
1835 return -EINVAL;
1836
1837 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1838 return -EFAULT;
1839
1840 error = validate_prctl_map(&prctl_map);
1841 if (error)
1842 return error;
1843
1844 if (prctl_map.auxv_size) {
1845 memset(user_auxv, 0, sizeof(user_auxv));
1846 if (copy_from_user(user_auxv,
1847 (const void __user *)prctl_map.auxv,
1848 prctl_map.auxv_size))
1849 return -EFAULT;
1850
1851 /* Last entry must be AT_NULL as specification requires */
1852 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1853 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1854 }
1855
1856 if (prctl_map.exe_fd != (u32)-1) {
1857 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
1858 if (error)
1859 return error;
1860 }
1861
1862 down_write(&mm->mmap_sem);
1863
1864 /*
1865 * We don't validate if these members are pointing to
1866 * real present VMAs because application may have correspond
1867 * VMAs already unmapped and kernel uses these members for statistics
1868 * output in procfs mostly, except
1869 *
1870 * - @start_brk/@brk which are used in do_brk but kernel lookups
1871 * for VMAs when updating these memvers so anything wrong written
1872 * here cause kernel to swear at userspace program but won't lead
1873 * to any problem in kernel itself
1874 */
1875
1876 mm->start_code = prctl_map.start_code;
1877 mm->end_code = prctl_map.end_code;
1878 mm->start_data = prctl_map.start_data;
1879 mm->end_data = prctl_map.end_data;
1880 mm->start_brk = prctl_map.start_brk;
1881 mm->brk = prctl_map.brk;
1882 mm->start_stack = prctl_map.start_stack;
1883 mm->arg_start = prctl_map.arg_start;
1884 mm->arg_end = prctl_map.arg_end;
1885 mm->env_start = prctl_map.env_start;
1886 mm->env_end = prctl_map.env_end;
1887
1888 /*
1889 * Note this update of @saved_auxv is lockless thus
1890 * if someone reads this member in procfs while we're
1891 * updating -- it may get partly updated results. It's
1892 * known and acceptable trade off: we leave it as is to
1893 * not introduce additional locks here making the kernel
1894 * more complex.
1895 */
1896 if (prctl_map.auxv_size)
1897 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
1898
1899 up_write(&mm->mmap_sem);
1900 return 0;
1901}
1902#endif /* CONFIG_CHECKPOINT_RESTORE */
1903
1904static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
1905 unsigned long len)
1906{
1907 /*
1908 * This doesn't move the auxiliary vector itself since it's pinned to
1909 * mm_struct, but it permits filling the vector with new values. It's
1910 * up to the caller to provide sane values here, otherwise userspace
1911 * tools which use this vector might be unhappy.
1912 */
1913 unsigned long user_auxv[AT_VECTOR_SIZE];
1914
1915 if (len > sizeof(user_auxv))
1916 return -EINVAL;
1917
1918 if (copy_from_user(user_auxv, (const void __user *)addr, len))
1919 return -EFAULT;
1920
1921 /* Make sure the last entry is always AT_NULL */
1922 user_auxv[AT_VECTOR_SIZE - 2] = 0;
1923 user_auxv[AT_VECTOR_SIZE - 1] = 0;
1924
1925 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1926
1927 task_lock(current);
1928 memcpy(mm->saved_auxv, user_auxv, len);
1929 task_unlock(current);
1930
1931 return 0;
1932}
1933
1934static int prctl_set_mm(int opt, unsigned long addr,
1935 unsigned long arg4, unsigned long arg5)
1936{
1937 struct mm_struct *mm = current->mm;
1938 struct prctl_mm_map prctl_map;
1939 struct vm_area_struct *vma;
1940 int error;
1941
1942 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
1943 opt != PR_SET_MM_MAP &&
1944 opt != PR_SET_MM_MAP_SIZE)))
1945 return -EINVAL;
1946
1947#ifdef CONFIG_CHECKPOINT_RESTORE
1948 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
1949 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
1950#endif
1951
1952 if (!capable(CAP_SYS_RESOURCE))
1953 return -EPERM;
1954
1955 if (opt == PR_SET_MM_EXE_FILE)
1956 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1957
1958 if (opt == PR_SET_MM_AUXV)
1959 return prctl_set_auxv(mm, addr, arg4);
1960
1961 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1962 return -EINVAL;
1963
1964 error = -EINVAL;
1965
1966 down_write(&mm->mmap_sem);
1967 vma = find_vma(mm, addr);
1968
1969 prctl_map.start_code = mm->start_code;
1970 prctl_map.end_code = mm->end_code;
1971 prctl_map.start_data = mm->start_data;
1972 prctl_map.end_data = mm->end_data;
1973 prctl_map.start_brk = mm->start_brk;
1974 prctl_map.brk = mm->brk;
1975 prctl_map.start_stack = mm->start_stack;
1976 prctl_map.arg_start = mm->arg_start;
1977 prctl_map.arg_end = mm->arg_end;
1978 prctl_map.env_start = mm->env_start;
1979 prctl_map.env_end = mm->env_end;
1980 prctl_map.auxv = NULL;
1981 prctl_map.auxv_size = 0;
1982 prctl_map.exe_fd = -1;
1983
1984 switch (opt) {
1985 case PR_SET_MM_START_CODE:
1986 prctl_map.start_code = addr;
1987 break;
1988 case PR_SET_MM_END_CODE:
1989 prctl_map.end_code = addr;
1990 break;
1991 case PR_SET_MM_START_DATA:
1992 prctl_map.start_data = addr;
1993 break;
1994 case PR_SET_MM_END_DATA:
1995 prctl_map.end_data = addr;
1996 break;
1997 case PR_SET_MM_START_STACK:
1998 prctl_map.start_stack = addr;
1999 break;
2000 case PR_SET_MM_START_BRK:
2001 prctl_map.start_brk = addr;
2002 break;
2003 case PR_SET_MM_BRK:
2004 prctl_map.brk = addr;
2005 break;
2006 case PR_SET_MM_ARG_START:
2007 prctl_map.arg_start = addr;
2008 break;
2009 case PR_SET_MM_ARG_END:
2010 prctl_map.arg_end = addr;
2011 break;
2012 case PR_SET_MM_ENV_START:
2013 prctl_map.env_start = addr;
2014 break;
2015 case PR_SET_MM_ENV_END:
2016 prctl_map.env_end = addr;
2017 break;
2018 default:
2019 goto out;
2020 }
2021
2022 error = validate_prctl_map(&prctl_map);
2023 if (error)
2024 goto out;
2025
2026 switch (opt) {
2027 /*
2028 * If command line arguments and environment
2029 * are placed somewhere else on stack, we can
2030 * set them up here, ARG_START/END to setup
2031 * command line argumets and ENV_START/END
2032 * for environment.
2033 */
2034 case PR_SET_MM_START_STACK:
2035 case PR_SET_MM_ARG_START:
2036 case PR_SET_MM_ARG_END:
2037 case PR_SET_MM_ENV_START:
2038 case PR_SET_MM_ENV_END:
2039 if (!vma) {
2040 error = -EFAULT;
2041 goto out;
2042 }
2043 }
2044
2045 mm->start_code = prctl_map.start_code;
2046 mm->end_code = prctl_map.end_code;
2047 mm->start_data = prctl_map.start_data;
2048 mm->end_data = prctl_map.end_data;
2049 mm->start_brk = prctl_map.start_brk;
2050 mm->brk = prctl_map.brk;
2051 mm->start_stack = prctl_map.start_stack;
2052 mm->arg_start = prctl_map.arg_start;
2053 mm->arg_end = prctl_map.arg_end;
2054 mm->env_start = prctl_map.env_start;
2055 mm->env_end = prctl_map.env_end;
2056
2057 error = 0;
2058out:
2059 up_write(&mm->mmap_sem);
2060 return error;
2061}
2062
2063#ifdef CONFIG_CHECKPOINT_RESTORE
2064static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2065{
2066 return put_user(me->clear_child_tid, tid_addr);
2067}
2068#else
2069static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2070{
2071 return -EINVAL;
2072}
2073#endif
2074
2075SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2076 unsigned long, arg4, unsigned long, arg5)
2077{
2078 struct task_struct *me = current;
2079 unsigned char comm[sizeof(me->comm)];
2080 long error;
2081
2082 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2083 if (error != -ENOSYS)
2084 return error;
2085
2086 error = 0;
2087 switch (option) {
2088 case PR_SET_PDEATHSIG:
2089 if (!valid_signal(arg2)) {
2090 error = -EINVAL;
2091 break;
2092 }
2093 me->pdeath_signal = arg2;
2094 break;
2095 case PR_GET_PDEATHSIG:
2096 error = put_user(me->pdeath_signal, (int __user *)arg2);
2097 break;
2098 case PR_GET_DUMPABLE:
2099 error = get_dumpable(me->mm);
2100 break;
2101 case PR_SET_DUMPABLE:
2102 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2103 error = -EINVAL;
2104 break;
2105 }
2106 set_dumpable(me->mm, arg2);
2107 break;
2108
2109 case PR_SET_UNALIGN:
2110 error = SET_UNALIGN_CTL(me, arg2);
2111 break;
2112 case PR_GET_UNALIGN:
2113 error = GET_UNALIGN_CTL(me, arg2);
2114 break;
2115 case PR_SET_FPEMU:
2116 error = SET_FPEMU_CTL(me, arg2);
2117 break;
2118 case PR_GET_FPEMU:
2119 error = GET_FPEMU_CTL(me, arg2);
2120 break;
2121 case PR_SET_FPEXC:
2122 error = SET_FPEXC_CTL(me, arg2);
2123 break;
2124 case PR_GET_FPEXC:
2125 error = GET_FPEXC_CTL(me, arg2);
2126 break;
2127 case PR_GET_TIMING:
2128 error = PR_TIMING_STATISTICAL;
2129 break;
2130 case PR_SET_TIMING:
2131 if (arg2 != PR_TIMING_STATISTICAL)
2132 error = -EINVAL;
2133 break;
2134 case PR_SET_NAME:
2135 comm[sizeof(me->comm) - 1] = 0;
2136 if (strncpy_from_user(comm, (char __user *)arg2,
2137 sizeof(me->comm) - 1) < 0)
2138 return -EFAULT;
2139 set_task_comm(me, comm);
2140 proc_comm_connector(me);
2141 break;
2142 case PR_GET_NAME:
2143 get_task_comm(comm, me);
2144 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2145 return -EFAULT;
2146 break;
2147 case PR_GET_ENDIAN:
2148 error = GET_ENDIAN(me, arg2);
2149 break;
2150 case PR_SET_ENDIAN:
2151 error = SET_ENDIAN(me, arg2);
2152 break;
2153 case PR_GET_SECCOMP:
2154 error = prctl_get_seccomp();
2155 break;
2156 case PR_SET_SECCOMP:
2157 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2158 break;
2159 case PR_GET_TSC:
2160 error = GET_TSC_CTL(arg2);
2161 break;
2162 case PR_SET_TSC:
2163 error = SET_TSC_CTL(arg2);
2164 break;
2165 case PR_TASK_PERF_EVENTS_DISABLE:
2166 error = perf_event_task_disable();
2167 break;
2168 case PR_TASK_PERF_EVENTS_ENABLE:
2169 error = perf_event_task_enable();
2170 break;
2171 case PR_GET_TIMERSLACK:
2172 if (current->timer_slack_ns > ULONG_MAX)
2173 error = ULONG_MAX;
2174 else
2175 error = current->timer_slack_ns;
2176 break;
2177 case PR_SET_TIMERSLACK:
2178 if (arg2 <= 0)
2179 current->timer_slack_ns =
2180 current->default_timer_slack_ns;
2181 else
2182 current->timer_slack_ns = arg2;
2183 break;
2184 case PR_MCE_KILL:
2185 if (arg4 | arg5)
2186 return -EINVAL;
2187 switch (arg2) {
2188 case PR_MCE_KILL_CLEAR:
2189 if (arg3 != 0)
2190 return -EINVAL;
2191 current->flags &= ~PF_MCE_PROCESS;
2192 break;
2193 case PR_MCE_KILL_SET:
2194 current->flags |= PF_MCE_PROCESS;
2195 if (arg3 == PR_MCE_KILL_EARLY)
2196 current->flags |= PF_MCE_EARLY;
2197 else if (arg3 == PR_MCE_KILL_LATE)
2198 current->flags &= ~PF_MCE_EARLY;
2199 else if (arg3 == PR_MCE_KILL_DEFAULT)
2200 current->flags &=
2201 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2202 else
2203 return -EINVAL;
2204 break;
2205 default:
2206 return -EINVAL;
2207 }
2208 break;
2209 case PR_MCE_KILL_GET:
2210 if (arg2 | arg3 | arg4 | arg5)
2211 return -EINVAL;
2212 if (current->flags & PF_MCE_PROCESS)
2213 error = (current->flags & PF_MCE_EARLY) ?
2214 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2215 else
2216 error = PR_MCE_KILL_DEFAULT;
2217 break;
2218 case PR_SET_MM:
2219 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2220 break;
2221 case PR_GET_TID_ADDRESS:
2222 error = prctl_get_tid_address(me, (int __user **)arg2);
2223 break;
2224 case PR_SET_CHILD_SUBREAPER:
2225 me->signal->is_child_subreaper = !!arg2;
2226 break;
2227 case PR_GET_CHILD_SUBREAPER:
2228 error = put_user(me->signal->is_child_subreaper,
2229 (int __user *)arg2);
2230 break;
2231 case PR_SET_NO_NEW_PRIVS:
2232 if (arg2 != 1 || arg3 || arg4 || arg5)
2233 return -EINVAL;
2234
2235 task_set_no_new_privs(current);
2236 break;
2237 case PR_GET_NO_NEW_PRIVS:
2238 if (arg2 || arg3 || arg4 || arg5)
2239 return -EINVAL;
2240 return task_no_new_privs(current) ? 1 : 0;
2241 case PR_GET_THP_DISABLE:
2242 if (arg2 || arg3 || arg4 || arg5)
2243 return -EINVAL;
2244 error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2245 break;
2246 case PR_SET_THP_DISABLE:
2247 if (arg3 || arg4 || arg5)
2248 return -EINVAL;
2249 down_write(&me->mm->mmap_sem);
2250 if (arg2)
2251 me->mm->def_flags |= VM_NOHUGEPAGE;
2252 else
2253 me->mm->def_flags &= ~VM_NOHUGEPAGE;
2254 up_write(&me->mm->mmap_sem);
2255 break;
2256 case PR_MPX_ENABLE_MANAGEMENT:
2257 if (arg2 || arg3 || arg4 || arg5)
2258 return -EINVAL;
2259 error = MPX_ENABLE_MANAGEMENT();
2260 break;
2261 case PR_MPX_DISABLE_MANAGEMENT:
2262 if (arg2 || arg3 || arg4 || arg5)
2263 return -EINVAL;
2264 error = MPX_DISABLE_MANAGEMENT();
2265 break;
2266 case PR_SET_FP_MODE:
2267 error = SET_FP_MODE(me, arg2);
2268 break;
2269 case PR_GET_FP_MODE:
2270 error = GET_FP_MODE(me);
2271 break;
2272 default:
2273 error = -EINVAL;
2274 break;
2275 }
2276 return error;
2277}
2278
2279SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2280 struct getcpu_cache __user *, unused)
2281{
2282 int err = 0;
2283 int cpu = raw_smp_processor_id();
2284
2285 if (cpup)
2286 err |= put_user(cpu, cpup);
2287 if (nodep)
2288 err |= put_user(cpu_to_node(cpu), nodep);
2289 return err ? -EFAULT : 0;
2290}
2291
2292/**
2293 * do_sysinfo - fill in sysinfo struct
2294 * @info: pointer to buffer to fill
2295 */
2296static int do_sysinfo(struct sysinfo *info)
2297{
2298 unsigned long mem_total, sav_total;
2299 unsigned int mem_unit, bitcount;
2300 struct timespec tp;
2301
2302 memset(info, 0, sizeof(struct sysinfo));
2303
2304 get_monotonic_boottime(&tp);
2305 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2306
2307 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2308
2309 info->procs = nr_threads;
2310
2311 si_meminfo(info);
2312 si_swapinfo(info);
2313
2314 /*
2315 * If the sum of all the available memory (i.e. ram + swap)
2316 * is less than can be stored in a 32 bit unsigned long then
2317 * we can be binary compatible with 2.2.x kernels. If not,
2318 * well, in that case 2.2.x was broken anyways...
2319 *
2320 * -Erik Andersen <andersee@debian.org>
2321 */
2322
2323 mem_total = info->totalram + info->totalswap;
2324 if (mem_total < info->totalram || mem_total < info->totalswap)
2325 goto out;
2326 bitcount = 0;
2327 mem_unit = info->mem_unit;
2328 while (mem_unit > 1) {
2329 bitcount++;
2330 mem_unit >>= 1;
2331 sav_total = mem_total;
2332 mem_total <<= 1;
2333 if (mem_total < sav_total)
2334 goto out;
2335 }
2336
2337 /*
2338 * If mem_total did not overflow, multiply all memory values by
2339 * info->mem_unit and set it to 1. This leaves things compatible
2340 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2341 * kernels...
2342 */
2343
2344 info->mem_unit = 1;
2345 info->totalram <<= bitcount;
2346 info->freeram <<= bitcount;
2347 info->sharedram <<= bitcount;
2348 info->bufferram <<= bitcount;
2349 info->totalswap <<= bitcount;
2350 info->freeswap <<= bitcount;
2351 info->totalhigh <<= bitcount;
2352 info->freehigh <<= bitcount;
2353
2354out:
2355 return 0;
2356}
2357
2358SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2359{
2360 struct sysinfo val;
2361
2362 do_sysinfo(&val);
2363
2364 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2365 return -EFAULT;
2366
2367 return 0;
2368}
2369
2370#ifdef CONFIG_COMPAT
2371struct compat_sysinfo {
2372 s32 uptime;
2373 u32 loads[3];
2374 u32 totalram;
2375 u32 freeram;
2376 u32 sharedram;
2377 u32 bufferram;
2378 u32 totalswap;
2379 u32 freeswap;
2380 u16 procs;
2381 u16 pad;
2382 u32 totalhigh;
2383 u32 freehigh;
2384 u32 mem_unit;
2385 char _f[20-2*sizeof(u32)-sizeof(int)];
2386};
2387
2388COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2389{
2390 struct sysinfo s;
2391
2392 do_sysinfo(&s);
2393
2394 /* Check to see if any memory value is too large for 32-bit and scale
2395 * down if needed
2396 */
2397 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2398 int bitcount = 0;
2399
2400 while (s.mem_unit < PAGE_SIZE) {
2401 s.mem_unit <<= 1;
2402 bitcount++;
2403 }
2404
2405 s.totalram >>= bitcount;
2406 s.freeram >>= bitcount;
2407 s.sharedram >>= bitcount;
2408 s.bufferram >>= bitcount;
2409 s.totalswap >>= bitcount;
2410 s.freeswap >>= bitcount;
2411 s.totalhigh >>= bitcount;
2412 s.freehigh >>= bitcount;
2413 }
2414
2415 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2416 __put_user(s.uptime, &info->uptime) ||
2417 __put_user(s.loads[0], &info->loads[0]) ||
2418 __put_user(s.loads[1], &info->loads[1]) ||
2419 __put_user(s.loads[2], &info->loads[2]) ||
2420 __put_user(s.totalram, &info->totalram) ||
2421 __put_user(s.freeram, &info->freeram) ||
2422 __put_user(s.sharedram, &info->sharedram) ||
2423 __put_user(s.bufferram, &info->bufferram) ||
2424 __put_user(s.totalswap, &info->totalswap) ||
2425 __put_user(s.freeswap, &info->freeswap) ||
2426 __put_user(s.procs, &info->procs) ||
2427 __put_user(s.totalhigh, &info->totalhigh) ||
2428 __put_user(s.freehigh, &info->freehigh) ||
2429 __put_user(s.mem_unit, &info->mem_unit))
2430 return -EFAULT;
2431
2432 return 0;
2433}
2434#endif /* CONFIG_COMPAT */
1/*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/export.h>
8#include <linux/mm.h>
9#include <linux/utsname.h>
10#include <linux/mman.h>
11#include <linux/reboot.h>
12#include <linux/prctl.h>
13#include <linux/highuid.h>
14#include <linux/fs.h>
15#include <linux/kmod.h>
16#include <linux/perf_event.h>
17#include <linux/resource.h>
18#include <linux/kernel.h>
19#include <linux/workqueue.h>
20#include <linux/capability.h>
21#include <linux/device.h>
22#include <linux/key.h>
23#include <linux/times.h>
24#include <linux/posix-timers.h>
25#include <linux/security.h>
26#include <linux/dcookies.h>
27#include <linux/suspend.h>
28#include <linux/tty.h>
29#include <linux/signal.h>
30#include <linux/cn_proc.h>
31#include <linux/getcpu.h>
32#include <linux/task_io_accounting_ops.h>
33#include <linux/seccomp.h>
34#include <linux/cpu.h>
35#include <linux/personality.h>
36#include <linux/ptrace.h>
37#include <linux/fs_struct.h>
38#include <linux/file.h>
39#include <linux/mount.h>
40#include <linux/gfp.h>
41#include <linux/syscore_ops.h>
42#include <linux/version.h>
43#include <linux/ctype.h>
44
45#include <linux/compat.h>
46#include <linux/syscalls.h>
47#include <linux/kprobes.h>
48#include <linux/user_namespace.h>
49#include <linux/binfmts.h>
50
51#include <linux/sched.h>
52#include <linux/rcupdate.h>
53#include <linux/uidgid.h>
54#include <linux/cred.h>
55
56#include <linux/kmsg_dump.h>
57/* Move somewhere else to avoid recompiling? */
58#include <generated/utsrelease.h>
59
60#include <asm/uaccess.h>
61#include <asm/io.h>
62#include <asm/unistd.h>
63
64#ifndef SET_UNALIGN_CTL
65# define SET_UNALIGN_CTL(a,b) (-EINVAL)
66#endif
67#ifndef GET_UNALIGN_CTL
68# define GET_UNALIGN_CTL(a,b) (-EINVAL)
69#endif
70#ifndef SET_FPEMU_CTL
71# define SET_FPEMU_CTL(a,b) (-EINVAL)
72#endif
73#ifndef GET_FPEMU_CTL
74# define GET_FPEMU_CTL(a,b) (-EINVAL)
75#endif
76#ifndef SET_FPEXC_CTL
77# define SET_FPEXC_CTL(a,b) (-EINVAL)
78#endif
79#ifndef GET_FPEXC_CTL
80# define GET_FPEXC_CTL(a,b) (-EINVAL)
81#endif
82#ifndef GET_ENDIAN
83# define GET_ENDIAN(a,b) (-EINVAL)
84#endif
85#ifndef SET_ENDIAN
86# define SET_ENDIAN(a,b) (-EINVAL)
87#endif
88#ifndef GET_TSC_CTL
89# define GET_TSC_CTL(a) (-EINVAL)
90#endif
91#ifndef SET_TSC_CTL
92# define SET_TSC_CTL(a) (-EINVAL)
93#endif
94
95/*
96 * this is where the system-wide overflow UID and GID are defined, for
97 * architectures that now have 32-bit UID/GID but didn't in the past
98 */
99
100int overflowuid = DEFAULT_OVERFLOWUID;
101int overflowgid = DEFAULT_OVERFLOWGID;
102
103EXPORT_SYMBOL(overflowuid);
104EXPORT_SYMBOL(overflowgid);
105
106/*
107 * the same as above, but for filesystems which can only store a 16-bit
108 * UID and GID. as such, this is needed on all architectures
109 */
110
111int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
112int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
113
114EXPORT_SYMBOL(fs_overflowuid);
115EXPORT_SYMBOL(fs_overflowgid);
116
117/*
118 * Returns true if current's euid is same as p's uid or euid,
119 * or has CAP_SYS_NICE to p's user_ns.
120 *
121 * Called with rcu_read_lock, creds are safe
122 */
123static bool set_one_prio_perm(struct task_struct *p)
124{
125 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
126
127 if (uid_eq(pcred->uid, cred->euid) ||
128 uid_eq(pcred->euid, cred->euid))
129 return true;
130 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
131 return true;
132 return false;
133}
134
135/*
136 * set the priority of a task
137 * - the caller must hold the RCU read lock
138 */
139static int set_one_prio(struct task_struct *p, int niceval, int error)
140{
141 int no_nice;
142
143 if (!set_one_prio_perm(p)) {
144 error = -EPERM;
145 goto out;
146 }
147 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
148 error = -EACCES;
149 goto out;
150 }
151 no_nice = security_task_setnice(p, niceval);
152 if (no_nice) {
153 error = no_nice;
154 goto out;
155 }
156 if (error == -ESRCH)
157 error = 0;
158 set_user_nice(p, niceval);
159out:
160 return error;
161}
162
163SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
164{
165 struct task_struct *g, *p;
166 struct user_struct *user;
167 const struct cred *cred = current_cred();
168 int error = -EINVAL;
169 struct pid *pgrp;
170 kuid_t uid;
171
172 if (which > PRIO_USER || which < PRIO_PROCESS)
173 goto out;
174
175 /* normalize: avoid signed division (rounding problems) */
176 error = -ESRCH;
177 if (niceval < MIN_NICE)
178 niceval = MIN_NICE;
179 if (niceval > MAX_NICE)
180 niceval = MAX_NICE;
181
182 rcu_read_lock();
183 read_lock(&tasklist_lock);
184 switch (which) {
185 case PRIO_PROCESS:
186 if (who)
187 p = find_task_by_vpid(who);
188 else
189 p = current;
190 if (p)
191 error = set_one_prio(p, niceval, error);
192 break;
193 case PRIO_PGRP:
194 if (who)
195 pgrp = find_vpid(who);
196 else
197 pgrp = task_pgrp(current);
198 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
199 error = set_one_prio(p, niceval, error);
200 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
201 break;
202 case PRIO_USER:
203 uid = make_kuid(cred->user_ns, who);
204 user = cred->user;
205 if (!who)
206 uid = cred->uid;
207 else if (!uid_eq(uid, cred->uid) &&
208 !(user = find_user(uid)))
209 goto out_unlock; /* No processes for this user */
210
211 do_each_thread(g, p) {
212 if (uid_eq(task_uid(p), uid))
213 error = set_one_prio(p, niceval, error);
214 } while_each_thread(g, p);
215 if (!uid_eq(uid, cred->uid))
216 free_uid(user); /* For find_user() */
217 break;
218 }
219out_unlock:
220 read_unlock(&tasklist_lock);
221 rcu_read_unlock();
222out:
223 return error;
224}
225
226/*
227 * Ugh. To avoid negative return values, "getpriority()" will
228 * not return the normal nice-value, but a negated value that
229 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
230 * to stay compatible.
231 */
232SYSCALL_DEFINE2(getpriority, int, which, int, who)
233{
234 struct task_struct *g, *p;
235 struct user_struct *user;
236 const struct cred *cred = current_cred();
237 long niceval, retval = -ESRCH;
238 struct pid *pgrp;
239 kuid_t uid;
240
241 if (which > PRIO_USER || which < PRIO_PROCESS)
242 return -EINVAL;
243
244 rcu_read_lock();
245 read_lock(&tasklist_lock);
246 switch (which) {
247 case PRIO_PROCESS:
248 if (who)
249 p = find_task_by_vpid(who);
250 else
251 p = current;
252 if (p) {
253 niceval = 20 - task_nice(p);
254 if (niceval > retval)
255 retval = niceval;
256 }
257 break;
258 case PRIO_PGRP:
259 if (who)
260 pgrp = find_vpid(who);
261 else
262 pgrp = task_pgrp(current);
263 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
264 niceval = 20 - task_nice(p);
265 if (niceval > retval)
266 retval = niceval;
267 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
268 break;
269 case PRIO_USER:
270 uid = make_kuid(cred->user_ns, who);
271 user = cred->user;
272 if (!who)
273 uid = cred->uid;
274 else if (!uid_eq(uid, cred->uid) &&
275 !(user = find_user(uid)))
276 goto out_unlock; /* No processes for this user */
277
278 do_each_thread(g, p) {
279 if (uid_eq(task_uid(p), uid)) {
280 niceval = 20 - task_nice(p);
281 if (niceval > retval)
282 retval = niceval;
283 }
284 } while_each_thread(g, p);
285 if (!uid_eq(uid, cred->uid))
286 free_uid(user); /* for find_user() */
287 break;
288 }
289out_unlock:
290 read_unlock(&tasklist_lock);
291 rcu_read_unlock();
292
293 return retval;
294}
295
296/*
297 * Unprivileged users may change the real gid to the effective gid
298 * or vice versa. (BSD-style)
299 *
300 * If you set the real gid at all, or set the effective gid to a value not
301 * equal to the real gid, then the saved gid is set to the new effective gid.
302 *
303 * This makes it possible for a setgid program to completely drop its
304 * privileges, which is often a useful assertion to make when you are doing
305 * a security audit over a program.
306 *
307 * The general idea is that a program which uses just setregid() will be
308 * 100% compatible with BSD. A program which uses just setgid() will be
309 * 100% compatible with POSIX with saved IDs.
310 *
311 * SMP: There are not races, the GIDs are checked only by filesystem
312 * operations (as far as semantic preservation is concerned).
313 */
314SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
315{
316 struct user_namespace *ns = current_user_ns();
317 const struct cred *old;
318 struct cred *new;
319 int retval;
320 kgid_t krgid, kegid;
321
322 krgid = make_kgid(ns, rgid);
323 kegid = make_kgid(ns, egid);
324
325 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
326 return -EINVAL;
327 if ((egid != (gid_t) -1) && !gid_valid(kegid))
328 return -EINVAL;
329
330 new = prepare_creds();
331 if (!new)
332 return -ENOMEM;
333 old = current_cred();
334
335 retval = -EPERM;
336 if (rgid != (gid_t) -1) {
337 if (gid_eq(old->gid, krgid) ||
338 gid_eq(old->egid, krgid) ||
339 ns_capable(old->user_ns, CAP_SETGID))
340 new->gid = krgid;
341 else
342 goto error;
343 }
344 if (egid != (gid_t) -1) {
345 if (gid_eq(old->gid, kegid) ||
346 gid_eq(old->egid, kegid) ||
347 gid_eq(old->sgid, kegid) ||
348 ns_capable(old->user_ns, CAP_SETGID))
349 new->egid = kegid;
350 else
351 goto error;
352 }
353
354 if (rgid != (gid_t) -1 ||
355 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
356 new->sgid = new->egid;
357 new->fsgid = new->egid;
358
359 return commit_creds(new);
360
361error:
362 abort_creds(new);
363 return retval;
364}
365
366/*
367 * setgid() is implemented like SysV w/ SAVED_IDS
368 *
369 * SMP: Same implicit races as above.
370 */
371SYSCALL_DEFINE1(setgid, gid_t, gid)
372{
373 struct user_namespace *ns = current_user_ns();
374 const struct cred *old;
375 struct cred *new;
376 int retval;
377 kgid_t kgid;
378
379 kgid = make_kgid(ns, gid);
380 if (!gid_valid(kgid))
381 return -EINVAL;
382
383 new = prepare_creds();
384 if (!new)
385 return -ENOMEM;
386 old = current_cred();
387
388 retval = -EPERM;
389 if (ns_capable(old->user_ns, CAP_SETGID))
390 new->gid = new->egid = new->sgid = new->fsgid = kgid;
391 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
392 new->egid = new->fsgid = kgid;
393 else
394 goto error;
395
396 return commit_creds(new);
397
398error:
399 abort_creds(new);
400 return retval;
401}
402
403/*
404 * change the user struct in a credentials set to match the new UID
405 */
406static int set_user(struct cred *new)
407{
408 struct user_struct *new_user;
409
410 new_user = alloc_uid(new->uid);
411 if (!new_user)
412 return -EAGAIN;
413
414 /*
415 * We don't fail in case of NPROC limit excess here because too many
416 * poorly written programs don't check set*uid() return code, assuming
417 * it never fails if called by root. We may still enforce NPROC limit
418 * for programs doing set*uid()+execve() by harmlessly deferring the
419 * failure to the execve() stage.
420 */
421 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
422 new_user != INIT_USER)
423 current->flags |= PF_NPROC_EXCEEDED;
424 else
425 current->flags &= ~PF_NPROC_EXCEEDED;
426
427 free_uid(new->user);
428 new->user = new_user;
429 return 0;
430}
431
432/*
433 * Unprivileged users may change the real uid to the effective uid
434 * or vice versa. (BSD-style)
435 *
436 * If you set the real uid at all, or set the effective uid to a value not
437 * equal to the real uid, then the saved uid is set to the new effective uid.
438 *
439 * This makes it possible for a setuid program to completely drop its
440 * privileges, which is often a useful assertion to make when you are doing
441 * a security audit over a program.
442 *
443 * The general idea is that a program which uses just setreuid() will be
444 * 100% compatible with BSD. A program which uses just setuid() will be
445 * 100% compatible with POSIX with saved IDs.
446 */
447SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
448{
449 struct user_namespace *ns = current_user_ns();
450 const struct cred *old;
451 struct cred *new;
452 int retval;
453 kuid_t kruid, keuid;
454
455 kruid = make_kuid(ns, ruid);
456 keuid = make_kuid(ns, euid);
457
458 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
459 return -EINVAL;
460 if ((euid != (uid_t) -1) && !uid_valid(keuid))
461 return -EINVAL;
462
463 new = prepare_creds();
464 if (!new)
465 return -ENOMEM;
466 old = current_cred();
467
468 retval = -EPERM;
469 if (ruid != (uid_t) -1) {
470 new->uid = kruid;
471 if (!uid_eq(old->uid, kruid) &&
472 !uid_eq(old->euid, kruid) &&
473 !ns_capable(old->user_ns, CAP_SETUID))
474 goto error;
475 }
476
477 if (euid != (uid_t) -1) {
478 new->euid = keuid;
479 if (!uid_eq(old->uid, keuid) &&
480 !uid_eq(old->euid, keuid) &&
481 !uid_eq(old->suid, keuid) &&
482 !ns_capable(old->user_ns, CAP_SETUID))
483 goto error;
484 }
485
486 if (!uid_eq(new->uid, old->uid)) {
487 retval = set_user(new);
488 if (retval < 0)
489 goto error;
490 }
491 if (ruid != (uid_t) -1 ||
492 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
493 new->suid = new->euid;
494 new->fsuid = new->euid;
495
496 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
497 if (retval < 0)
498 goto error;
499
500 return commit_creds(new);
501
502error:
503 abort_creds(new);
504 return retval;
505}
506
507/*
508 * setuid() is implemented like SysV with SAVED_IDS
509 *
510 * Note that SAVED_ID's is deficient in that a setuid root program
511 * like sendmail, for example, cannot set its uid to be a normal
512 * user and then switch back, because if you're root, setuid() sets
513 * the saved uid too. If you don't like this, blame the bright people
514 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
515 * will allow a root program to temporarily drop privileges and be able to
516 * regain them by swapping the real and effective uid.
517 */
518SYSCALL_DEFINE1(setuid, uid_t, uid)
519{
520 struct user_namespace *ns = current_user_ns();
521 const struct cred *old;
522 struct cred *new;
523 int retval;
524 kuid_t kuid;
525
526 kuid = make_kuid(ns, uid);
527 if (!uid_valid(kuid))
528 return -EINVAL;
529
530 new = prepare_creds();
531 if (!new)
532 return -ENOMEM;
533 old = current_cred();
534
535 retval = -EPERM;
536 if (ns_capable(old->user_ns, CAP_SETUID)) {
537 new->suid = new->uid = kuid;
538 if (!uid_eq(kuid, old->uid)) {
539 retval = set_user(new);
540 if (retval < 0)
541 goto error;
542 }
543 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
544 goto error;
545 }
546
547 new->fsuid = new->euid = kuid;
548
549 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
550 if (retval < 0)
551 goto error;
552
553 return commit_creds(new);
554
555error:
556 abort_creds(new);
557 return retval;
558}
559
560
561/*
562 * This function implements a generic ability to update ruid, euid,
563 * and suid. This allows you to implement the 4.4 compatible seteuid().
564 */
565SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
566{
567 struct user_namespace *ns = current_user_ns();
568 const struct cred *old;
569 struct cred *new;
570 int retval;
571 kuid_t kruid, keuid, ksuid;
572
573 kruid = make_kuid(ns, ruid);
574 keuid = make_kuid(ns, euid);
575 ksuid = make_kuid(ns, suid);
576
577 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
578 return -EINVAL;
579
580 if ((euid != (uid_t) -1) && !uid_valid(keuid))
581 return -EINVAL;
582
583 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
584 return -EINVAL;
585
586 new = prepare_creds();
587 if (!new)
588 return -ENOMEM;
589
590 old = current_cred();
591
592 retval = -EPERM;
593 if (!ns_capable(old->user_ns, CAP_SETUID)) {
594 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
595 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
596 goto error;
597 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
598 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
599 goto error;
600 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
601 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
602 goto error;
603 }
604
605 if (ruid != (uid_t) -1) {
606 new->uid = kruid;
607 if (!uid_eq(kruid, old->uid)) {
608 retval = set_user(new);
609 if (retval < 0)
610 goto error;
611 }
612 }
613 if (euid != (uid_t) -1)
614 new->euid = keuid;
615 if (suid != (uid_t) -1)
616 new->suid = ksuid;
617 new->fsuid = new->euid;
618
619 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
620 if (retval < 0)
621 goto error;
622
623 return commit_creds(new);
624
625error:
626 abort_creds(new);
627 return retval;
628}
629
630SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
631{
632 const struct cred *cred = current_cred();
633 int retval;
634 uid_t ruid, euid, suid;
635
636 ruid = from_kuid_munged(cred->user_ns, cred->uid);
637 euid = from_kuid_munged(cred->user_ns, cred->euid);
638 suid = from_kuid_munged(cred->user_ns, cred->suid);
639
640 if (!(retval = put_user(ruid, ruidp)) &&
641 !(retval = put_user(euid, euidp)))
642 retval = put_user(suid, suidp);
643
644 return retval;
645}
646
647/*
648 * Same as above, but for rgid, egid, sgid.
649 */
650SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
651{
652 struct user_namespace *ns = current_user_ns();
653 const struct cred *old;
654 struct cred *new;
655 int retval;
656 kgid_t krgid, kegid, ksgid;
657
658 krgid = make_kgid(ns, rgid);
659 kegid = make_kgid(ns, egid);
660 ksgid = make_kgid(ns, sgid);
661
662 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
663 return -EINVAL;
664 if ((egid != (gid_t) -1) && !gid_valid(kegid))
665 return -EINVAL;
666 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
667 return -EINVAL;
668
669 new = prepare_creds();
670 if (!new)
671 return -ENOMEM;
672 old = current_cred();
673
674 retval = -EPERM;
675 if (!ns_capable(old->user_ns, CAP_SETGID)) {
676 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
677 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
678 goto error;
679 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
680 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
681 goto error;
682 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
683 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
684 goto error;
685 }
686
687 if (rgid != (gid_t) -1)
688 new->gid = krgid;
689 if (egid != (gid_t) -1)
690 new->egid = kegid;
691 if (sgid != (gid_t) -1)
692 new->sgid = ksgid;
693 new->fsgid = new->egid;
694
695 return commit_creds(new);
696
697error:
698 abort_creds(new);
699 return retval;
700}
701
702SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
703{
704 const struct cred *cred = current_cred();
705 int retval;
706 gid_t rgid, egid, sgid;
707
708 rgid = from_kgid_munged(cred->user_ns, cred->gid);
709 egid = from_kgid_munged(cred->user_ns, cred->egid);
710 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
711
712 if (!(retval = put_user(rgid, rgidp)) &&
713 !(retval = put_user(egid, egidp)))
714 retval = put_user(sgid, sgidp);
715
716 return retval;
717}
718
719
720/*
721 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
722 * is used for "access()" and for the NFS daemon (letting nfsd stay at
723 * whatever uid it wants to). It normally shadows "euid", except when
724 * explicitly set by setfsuid() or for access..
725 */
726SYSCALL_DEFINE1(setfsuid, uid_t, uid)
727{
728 const struct cred *old;
729 struct cred *new;
730 uid_t old_fsuid;
731 kuid_t kuid;
732
733 old = current_cred();
734 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
735
736 kuid = make_kuid(old->user_ns, uid);
737 if (!uid_valid(kuid))
738 return old_fsuid;
739
740 new = prepare_creds();
741 if (!new)
742 return old_fsuid;
743
744 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
745 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
746 ns_capable(old->user_ns, CAP_SETUID)) {
747 if (!uid_eq(kuid, old->fsuid)) {
748 new->fsuid = kuid;
749 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
750 goto change_okay;
751 }
752 }
753
754 abort_creds(new);
755 return old_fsuid;
756
757change_okay:
758 commit_creds(new);
759 return old_fsuid;
760}
761
762/*
763 * Samma på svenska..
764 */
765SYSCALL_DEFINE1(setfsgid, gid_t, gid)
766{
767 const struct cred *old;
768 struct cred *new;
769 gid_t old_fsgid;
770 kgid_t kgid;
771
772 old = current_cred();
773 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
774
775 kgid = make_kgid(old->user_ns, gid);
776 if (!gid_valid(kgid))
777 return old_fsgid;
778
779 new = prepare_creds();
780 if (!new)
781 return old_fsgid;
782
783 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
784 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
785 ns_capable(old->user_ns, CAP_SETGID)) {
786 if (!gid_eq(kgid, old->fsgid)) {
787 new->fsgid = kgid;
788 goto change_okay;
789 }
790 }
791
792 abort_creds(new);
793 return old_fsgid;
794
795change_okay:
796 commit_creds(new);
797 return old_fsgid;
798}
799
800/**
801 * sys_getpid - return the thread group id of the current process
802 *
803 * Note, despite the name, this returns the tgid not the pid. The tgid and
804 * the pid are identical unless CLONE_THREAD was specified on clone() in
805 * which case the tgid is the same in all threads of the same group.
806 *
807 * This is SMP safe as current->tgid does not change.
808 */
809SYSCALL_DEFINE0(getpid)
810{
811 return task_tgid_vnr(current);
812}
813
814/* Thread ID - the internal kernel "pid" */
815SYSCALL_DEFINE0(gettid)
816{
817 return task_pid_vnr(current);
818}
819
820/*
821 * Accessing ->real_parent is not SMP-safe, it could
822 * change from under us. However, we can use a stale
823 * value of ->real_parent under rcu_read_lock(), see
824 * release_task()->call_rcu(delayed_put_task_struct).
825 */
826SYSCALL_DEFINE0(getppid)
827{
828 int pid;
829
830 rcu_read_lock();
831 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
832 rcu_read_unlock();
833
834 return pid;
835}
836
837SYSCALL_DEFINE0(getuid)
838{
839 /* Only we change this so SMP safe */
840 return from_kuid_munged(current_user_ns(), current_uid());
841}
842
843SYSCALL_DEFINE0(geteuid)
844{
845 /* Only we change this so SMP safe */
846 return from_kuid_munged(current_user_ns(), current_euid());
847}
848
849SYSCALL_DEFINE0(getgid)
850{
851 /* Only we change this so SMP safe */
852 return from_kgid_munged(current_user_ns(), current_gid());
853}
854
855SYSCALL_DEFINE0(getegid)
856{
857 /* Only we change this so SMP safe */
858 return from_kgid_munged(current_user_ns(), current_egid());
859}
860
861void do_sys_times(struct tms *tms)
862{
863 cputime_t tgutime, tgstime, cutime, cstime;
864
865 spin_lock_irq(¤t->sighand->siglock);
866 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
867 cutime = current->signal->cutime;
868 cstime = current->signal->cstime;
869 spin_unlock_irq(¤t->sighand->siglock);
870 tms->tms_utime = cputime_to_clock_t(tgutime);
871 tms->tms_stime = cputime_to_clock_t(tgstime);
872 tms->tms_cutime = cputime_to_clock_t(cutime);
873 tms->tms_cstime = cputime_to_clock_t(cstime);
874}
875
876SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
877{
878 if (tbuf) {
879 struct tms tmp;
880
881 do_sys_times(&tmp);
882 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
883 return -EFAULT;
884 }
885 force_successful_syscall_return();
886 return (long) jiffies_64_to_clock_t(get_jiffies_64());
887}
888
889/*
890 * This needs some heavy checking ...
891 * I just haven't the stomach for it. I also don't fully
892 * understand sessions/pgrp etc. Let somebody who does explain it.
893 *
894 * OK, I think I have the protection semantics right.... this is really
895 * only important on a multi-user system anyway, to make sure one user
896 * can't send a signal to a process owned by another. -TYT, 12/12/91
897 *
898 * !PF_FORKNOEXEC check to conform completely to POSIX.
899 */
900SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
901{
902 struct task_struct *p;
903 struct task_struct *group_leader = current->group_leader;
904 struct pid *pgrp;
905 int err;
906
907 if (!pid)
908 pid = task_pid_vnr(group_leader);
909 if (!pgid)
910 pgid = pid;
911 if (pgid < 0)
912 return -EINVAL;
913 rcu_read_lock();
914
915 /* From this point forward we keep holding onto the tasklist lock
916 * so that our parent does not change from under us. -DaveM
917 */
918 write_lock_irq(&tasklist_lock);
919
920 err = -ESRCH;
921 p = find_task_by_vpid(pid);
922 if (!p)
923 goto out;
924
925 err = -EINVAL;
926 if (!thread_group_leader(p))
927 goto out;
928
929 if (same_thread_group(p->real_parent, group_leader)) {
930 err = -EPERM;
931 if (task_session(p) != task_session(group_leader))
932 goto out;
933 err = -EACCES;
934 if (!(p->flags & PF_FORKNOEXEC))
935 goto out;
936 } else {
937 err = -ESRCH;
938 if (p != group_leader)
939 goto out;
940 }
941
942 err = -EPERM;
943 if (p->signal->leader)
944 goto out;
945
946 pgrp = task_pid(p);
947 if (pgid != pid) {
948 struct task_struct *g;
949
950 pgrp = find_vpid(pgid);
951 g = pid_task(pgrp, PIDTYPE_PGID);
952 if (!g || task_session(g) != task_session(group_leader))
953 goto out;
954 }
955
956 err = security_task_setpgid(p, pgid);
957 if (err)
958 goto out;
959
960 if (task_pgrp(p) != pgrp)
961 change_pid(p, PIDTYPE_PGID, pgrp);
962
963 err = 0;
964out:
965 /* All paths lead to here, thus we are safe. -DaveM */
966 write_unlock_irq(&tasklist_lock);
967 rcu_read_unlock();
968 return err;
969}
970
971SYSCALL_DEFINE1(getpgid, pid_t, pid)
972{
973 struct task_struct *p;
974 struct pid *grp;
975 int retval;
976
977 rcu_read_lock();
978 if (!pid)
979 grp = task_pgrp(current);
980 else {
981 retval = -ESRCH;
982 p = find_task_by_vpid(pid);
983 if (!p)
984 goto out;
985 grp = task_pgrp(p);
986 if (!grp)
987 goto out;
988
989 retval = security_task_getpgid(p);
990 if (retval)
991 goto out;
992 }
993 retval = pid_vnr(grp);
994out:
995 rcu_read_unlock();
996 return retval;
997}
998
999#ifdef __ARCH_WANT_SYS_GETPGRP
1000
1001SYSCALL_DEFINE0(getpgrp)
1002{
1003 return sys_getpgid(0);
1004}
1005
1006#endif
1007
1008SYSCALL_DEFINE1(getsid, pid_t, pid)
1009{
1010 struct task_struct *p;
1011 struct pid *sid;
1012 int retval;
1013
1014 rcu_read_lock();
1015 if (!pid)
1016 sid = task_session(current);
1017 else {
1018 retval = -ESRCH;
1019 p = find_task_by_vpid(pid);
1020 if (!p)
1021 goto out;
1022 sid = task_session(p);
1023 if (!sid)
1024 goto out;
1025
1026 retval = security_task_getsid(p);
1027 if (retval)
1028 goto out;
1029 }
1030 retval = pid_vnr(sid);
1031out:
1032 rcu_read_unlock();
1033 return retval;
1034}
1035
1036static void set_special_pids(struct pid *pid)
1037{
1038 struct task_struct *curr = current->group_leader;
1039
1040 if (task_session(curr) != pid)
1041 change_pid(curr, PIDTYPE_SID, pid);
1042
1043 if (task_pgrp(curr) != pid)
1044 change_pid(curr, PIDTYPE_PGID, pid);
1045}
1046
1047SYSCALL_DEFINE0(setsid)
1048{
1049 struct task_struct *group_leader = current->group_leader;
1050 struct pid *sid = task_pid(group_leader);
1051 pid_t session = pid_vnr(sid);
1052 int err = -EPERM;
1053
1054 write_lock_irq(&tasklist_lock);
1055 /* Fail if I am already a session leader */
1056 if (group_leader->signal->leader)
1057 goto out;
1058
1059 /* Fail if a process group id already exists that equals the
1060 * proposed session id.
1061 */
1062 if (pid_task(sid, PIDTYPE_PGID))
1063 goto out;
1064
1065 group_leader->signal->leader = 1;
1066 set_special_pids(sid);
1067
1068 proc_clear_tty(group_leader);
1069
1070 err = session;
1071out:
1072 write_unlock_irq(&tasklist_lock);
1073 if (err > 0) {
1074 proc_sid_connector(group_leader);
1075 sched_autogroup_create_attach(group_leader);
1076 }
1077 return err;
1078}
1079
1080DECLARE_RWSEM(uts_sem);
1081
1082#ifdef COMPAT_UTS_MACHINE
1083#define override_architecture(name) \
1084 (personality(current->personality) == PER_LINUX32 && \
1085 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1086 sizeof(COMPAT_UTS_MACHINE)))
1087#else
1088#define override_architecture(name) 0
1089#endif
1090
1091/*
1092 * Work around broken programs that cannot handle "Linux 3.0".
1093 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1094 */
1095static int override_release(char __user *release, size_t len)
1096{
1097 int ret = 0;
1098
1099 if (current->personality & UNAME26) {
1100 const char *rest = UTS_RELEASE;
1101 char buf[65] = { 0 };
1102 int ndots = 0;
1103 unsigned v;
1104 size_t copy;
1105
1106 while (*rest) {
1107 if (*rest == '.' && ++ndots >= 3)
1108 break;
1109 if (!isdigit(*rest) && *rest != '.')
1110 break;
1111 rest++;
1112 }
1113 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1114 copy = clamp_t(size_t, len, 1, sizeof(buf));
1115 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1116 ret = copy_to_user(release, buf, copy + 1);
1117 }
1118 return ret;
1119}
1120
1121SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1122{
1123 int errno = 0;
1124
1125 down_read(&uts_sem);
1126 if (copy_to_user(name, utsname(), sizeof *name))
1127 errno = -EFAULT;
1128 up_read(&uts_sem);
1129
1130 if (!errno && override_release(name->release, sizeof(name->release)))
1131 errno = -EFAULT;
1132 if (!errno && override_architecture(name))
1133 errno = -EFAULT;
1134 return errno;
1135}
1136
1137#ifdef __ARCH_WANT_SYS_OLD_UNAME
1138/*
1139 * Old cruft
1140 */
1141SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1142{
1143 int error = 0;
1144
1145 if (!name)
1146 return -EFAULT;
1147
1148 down_read(&uts_sem);
1149 if (copy_to_user(name, utsname(), sizeof(*name)))
1150 error = -EFAULT;
1151 up_read(&uts_sem);
1152
1153 if (!error && override_release(name->release, sizeof(name->release)))
1154 error = -EFAULT;
1155 if (!error && override_architecture(name))
1156 error = -EFAULT;
1157 return error;
1158}
1159
1160SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1161{
1162 int error;
1163
1164 if (!name)
1165 return -EFAULT;
1166 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1167 return -EFAULT;
1168
1169 down_read(&uts_sem);
1170 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1171 __OLD_UTS_LEN);
1172 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1173 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1174 __OLD_UTS_LEN);
1175 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1176 error |= __copy_to_user(&name->release, &utsname()->release,
1177 __OLD_UTS_LEN);
1178 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1179 error |= __copy_to_user(&name->version, &utsname()->version,
1180 __OLD_UTS_LEN);
1181 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1182 error |= __copy_to_user(&name->machine, &utsname()->machine,
1183 __OLD_UTS_LEN);
1184 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1185 up_read(&uts_sem);
1186
1187 if (!error && override_architecture(name))
1188 error = -EFAULT;
1189 if (!error && override_release(name->release, sizeof(name->release)))
1190 error = -EFAULT;
1191 return error ? -EFAULT : 0;
1192}
1193#endif
1194
1195SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1196{
1197 int errno;
1198 char tmp[__NEW_UTS_LEN];
1199
1200 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1201 return -EPERM;
1202
1203 if (len < 0 || len > __NEW_UTS_LEN)
1204 return -EINVAL;
1205 down_write(&uts_sem);
1206 errno = -EFAULT;
1207 if (!copy_from_user(tmp, name, len)) {
1208 struct new_utsname *u = utsname();
1209
1210 memcpy(u->nodename, tmp, len);
1211 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1212 errno = 0;
1213 uts_proc_notify(UTS_PROC_HOSTNAME);
1214 }
1215 up_write(&uts_sem);
1216 return errno;
1217}
1218
1219#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1220
1221SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1222{
1223 int i, errno;
1224 struct new_utsname *u;
1225
1226 if (len < 0)
1227 return -EINVAL;
1228 down_read(&uts_sem);
1229 u = utsname();
1230 i = 1 + strlen(u->nodename);
1231 if (i > len)
1232 i = len;
1233 errno = 0;
1234 if (copy_to_user(name, u->nodename, i))
1235 errno = -EFAULT;
1236 up_read(&uts_sem);
1237 return errno;
1238}
1239
1240#endif
1241
1242/*
1243 * Only setdomainname; getdomainname can be implemented by calling
1244 * uname()
1245 */
1246SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1247{
1248 int errno;
1249 char tmp[__NEW_UTS_LEN];
1250
1251 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1252 return -EPERM;
1253 if (len < 0 || len > __NEW_UTS_LEN)
1254 return -EINVAL;
1255
1256 down_write(&uts_sem);
1257 errno = -EFAULT;
1258 if (!copy_from_user(tmp, name, len)) {
1259 struct new_utsname *u = utsname();
1260
1261 memcpy(u->domainname, tmp, len);
1262 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1263 errno = 0;
1264 uts_proc_notify(UTS_PROC_DOMAINNAME);
1265 }
1266 up_write(&uts_sem);
1267 return errno;
1268}
1269
1270SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1271{
1272 struct rlimit value;
1273 int ret;
1274
1275 ret = do_prlimit(current, resource, NULL, &value);
1276 if (!ret)
1277 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1278
1279 return ret;
1280}
1281
1282#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1283
1284/*
1285 * Back compatibility for getrlimit. Needed for some apps.
1286 */
1287
1288SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1289 struct rlimit __user *, rlim)
1290{
1291 struct rlimit x;
1292 if (resource >= RLIM_NLIMITS)
1293 return -EINVAL;
1294
1295 task_lock(current->group_leader);
1296 x = current->signal->rlim[resource];
1297 task_unlock(current->group_leader);
1298 if (x.rlim_cur > 0x7FFFFFFF)
1299 x.rlim_cur = 0x7FFFFFFF;
1300 if (x.rlim_max > 0x7FFFFFFF)
1301 x.rlim_max = 0x7FFFFFFF;
1302 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1303}
1304
1305#endif
1306
1307static inline bool rlim64_is_infinity(__u64 rlim64)
1308{
1309#if BITS_PER_LONG < 64
1310 return rlim64 >= ULONG_MAX;
1311#else
1312 return rlim64 == RLIM64_INFINITY;
1313#endif
1314}
1315
1316static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1317{
1318 if (rlim->rlim_cur == RLIM_INFINITY)
1319 rlim64->rlim_cur = RLIM64_INFINITY;
1320 else
1321 rlim64->rlim_cur = rlim->rlim_cur;
1322 if (rlim->rlim_max == RLIM_INFINITY)
1323 rlim64->rlim_max = RLIM64_INFINITY;
1324 else
1325 rlim64->rlim_max = rlim->rlim_max;
1326}
1327
1328static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1329{
1330 if (rlim64_is_infinity(rlim64->rlim_cur))
1331 rlim->rlim_cur = RLIM_INFINITY;
1332 else
1333 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1334 if (rlim64_is_infinity(rlim64->rlim_max))
1335 rlim->rlim_max = RLIM_INFINITY;
1336 else
1337 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1338}
1339
1340/* make sure you are allowed to change @tsk limits before calling this */
1341int do_prlimit(struct task_struct *tsk, unsigned int resource,
1342 struct rlimit *new_rlim, struct rlimit *old_rlim)
1343{
1344 struct rlimit *rlim;
1345 int retval = 0;
1346
1347 if (resource >= RLIM_NLIMITS)
1348 return -EINVAL;
1349 if (new_rlim) {
1350 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1351 return -EINVAL;
1352 if (resource == RLIMIT_NOFILE &&
1353 new_rlim->rlim_max > sysctl_nr_open)
1354 return -EPERM;
1355 }
1356
1357 /* protect tsk->signal and tsk->sighand from disappearing */
1358 read_lock(&tasklist_lock);
1359 if (!tsk->sighand) {
1360 retval = -ESRCH;
1361 goto out;
1362 }
1363
1364 rlim = tsk->signal->rlim + resource;
1365 task_lock(tsk->group_leader);
1366 if (new_rlim) {
1367 /* Keep the capable check against init_user_ns until
1368 cgroups can contain all limits */
1369 if (new_rlim->rlim_max > rlim->rlim_max &&
1370 !capable(CAP_SYS_RESOURCE))
1371 retval = -EPERM;
1372 if (!retval)
1373 retval = security_task_setrlimit(tsk->group_leader,
1374 resource, new_rlim);
1375 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1376 /*
1377 * The caller is asking for an immediate RLIMIT_CPU
1378 * expiry. But we use the zero value to mean "it was
1379 * never set". So let's cheat and make it one second
1380 * instead
1381 */
1382 new_rlim->rlim_cur = 1;
1383 }
1384 }
1385 if (!retval) {
1386 if (old_rlim)
1387 *old_rlim = *rlim;
1388 if (new_rlim)
1389 *rlim = *new_rlim;
1390 }
1391 task_unlock(tsk->group_leader);
1392
1393 /*
1394 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1395 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1396 * very long-standing error, and fixing it now risks breakage of
1397 * applications, so we live with it
1398 */
1399 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1400 new_rlim->rlim_cur != RLIM_INFINITY)
1401 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1402out:
1403 read_unlock(&tasklist_lock);
1404 return retval;
1405}
1406
1407/* rcu lock must be held */
1408static int check_prlimit_permission(struct task_struct *task)
1409{
1410 const struct cred *cred = current_cred(), *tcred;
1411
1412 if (current == task)
1413 return 0;
1414
1415 tcred = __task_cred(task);
1416 if (uid_eq(cred->uid, tcred->euid) &&
1417 uid_eq(cred->uid, tcred->suid) &&
1418 uid_eq(cred->uid, tcred->uid) &&
1419 gid_eq(cred->gid, tcred->egid) &&
1420 gid_eq(cred->gid, tcred->sgid) &&
1421 gid_eq(cred->gid, tcred->gid))
1422 return 0;
1423 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1424 return 0;
1425
1426 return -EPERM;
1427}
1428
1429SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1430 const struct rlimit64 __user *, new_rlim,
1431 struct rlimit64 __user *, old_rlim)
1432{
1433 struct rlimit64 old64, new64;
1434 struct rlimit old, new;
1435 struct task_struct *tsk;
1436 int ret;
1437
1438 if (new_rlim) {
1439 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1440 return -EFAULT;
1441 rlim64_to_rlim(&new64, &new);
1442 }
1443
1444 rcu_read_lock();
1445 tsk = pid ? find_task_by_vpid(pid) : current;
1446 if (!tsk) {
1447 rcu_read_unlock();
1448 return -ESRCH;
1449 }
1450 ret = check_prlimit_permission(tsk);
1451 if (ret) {
1452 rcu_read_unlock();
1453 return ret;
1454 }
1455 get_task_struct(tsk);
1456 rcu_read_unlock();
1457
1458 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1459 old_rlim ? &old : NULL);
1460
1461 if (!ret && old_rlim) {
1462 rlim_to_rlim64(&old, &old64);
1463 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1464 ret = -EFAULT;
1465 }
1466
1467 put_task_struct(tsk);
1468 return ret;
1469}
1470
1471SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1472{
1473 struct rlimit new_rlim;
1474
1475 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1476 return -EFAULT;
1477 return do_prlimit(current, resource, &new_rlim, NULL);
1478}
1479
1480/*
1481 * It would make sense to put struct rusage in the task_struct,
1482 * except that would make the task_struct be *really big*. After
1483 * task_struct gets moved into malloc'ed memory, it would
1484 * make sense to do this. It will make moving the rest of the information
1485 * a lot simpler! (Which we're not doing right now because we're not
1486 * measuring them yet).
1487 *
1488 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1489 * races with threads incrementing their own counters. But since word
1490 * reads are atomic, we either get new values or old values and we don't
1491 * care which for the sums. We always take the siglock to protect reading
1492 * the c* fields from p->signal from races with exit.c updating those
1493 * fields when reaping, so a sample either gets all the additions of a
1494 * given child after it's reaped, or none so this sample is before reaping.
1495 *
1496 * Locking:
1497 * We need to take the siglock for CHILDEREN, SELF and BOTH
1498 * for the cases current multithreaded, non-current single threaded
1499 * non-current multithreaded. Thread traversal is now safe with
1500 * the siglock held.
1501 * Strictly speaking, we donot need to take the siglock if we are current and
1502 * single threaded, as no one else can take our signal_struct away, no one
1503 * else can reap the children to update signal->c* counters, and no one else
1504 * can race with the signal-> fields. If we do not take any lock, the
1505 * signal-> fields could be read out of order while another thread was just
1506 * exiting. So we should place a read memory barrier when we avoid the lock.
1507 * On the writer side, write memory barrier is implied in __exit_signal
1508 * as __exit_signal releases the siglock spinlock after updating the signal->
1509 * fields. But we don't do this yet to keep things simple.
1510 *
1511 */
1512
1513static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1514{
1515 r->ru_nvcsw += t->nvcsw;
1516 r->ru_nivcsw += t->nivcsw;
1517 r->ru_minflt += t->min_flt;
1518 r->ru_majflt += t->maj_flt;
1519 r->ru_inblock += task_io_get_inblock(t);
1520 r->ru_oublock += task_io_get_oublock(t);
1521}
1522
1523static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1524{
1525 struct task_struct *t;
1526 unsigned long flags;
1527 cputime_t tgutime, tgstime, utime, stime;
1528 unsigned long maxrss = 0;
1529
1530 memset((char *) r, 0, sizeof *r);
1531 utime = stime = 0;
1532
1533 if (who == RUSAGE_THREAD) {
1534 task_cputime_adjusted(current, &utime, &stime);
1535 accumulate_thread_rusage(p, r);
1536 maxrss = p->signal->maxrss;
1537 goto out;
1538 }
1539
1540 if (!lock_task_sighand(p, &flags))
1541 return;
1542
1543 switch (who) {
1544 case RUSAGE_BOTH:
1545 case RUSAGE_CHILDREN:
1546 utime = p->signal->cutime;
1547 stime = p->signal->cstime;
1548 r->ru_nvcsw = p->signal->cnvcsw;
1549 r->ru_nivcsw = p->signal->cnivcsw;
1550 r->ru_minflt = p->signal->cmin_flt;
1551 r->ru_majflt = p->signal->cmaj_flt;
1552 r->ru_inblock = p->signal->cinblock;
1553 r->ru_oublock = p->signal->coublock;
1554 maxrss = p->signal->cmaxrss;
1555
1556 if (who == RUSAGE_CHILDREN)
1557 break;
1558
1559 case RUSAGE_SELF:
1560 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1561 utime += tgutime;
1562 stime += tgstime;
1563 r->ru_nvcsw += p->signal->nvcsw;
1564 r->ru_nivcsw += p->signal->nivcsw;
1565 r->ru_minflt += p->signal->min_flt;
1566 r->ru_majflt += p->signal->maj_flt;
1567 r->ru_inblock += p->signal->inblock;
1568 r->ru_oublock += p->signal->oublock;
1569 if (maxrss < p->signal->maxrss)
1570 maxrss = p->signal->maxrss;
1571 t = p;
1572 do {
1573 accumulate_thread_rusage(t, r);
1574 } while_each_thread(p, t);
1575 break;
1576
1577 default:
1578 BUG();
1579 }
1580 unlock_task_sighand(p, &flags);
1581
1582out:
1583 cputime_to_timeval(utime, &r->ru_utime);
1584 cputime_to_timeval(stime, &r->ru_stime);
1585
1586 if (who != RUSAGE_CHILDREN) {
1587 struct mm_struct *mm = get_task_mm(p);
1588 if (mm) {
1589 setmax_mm_hiwater_rss(&maxrss, mm);
1590 mmput(mm);
1591 }
1592 }
1593 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1594}
1595
1596int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1597{
1598 struct rusage r;
1599 k_getrusage(p, who, &r);
1600 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1601}
1602
1603SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1604{
1605 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1606 who != RUSAGE_THREAD)
1607 return -EINVAL;
1608 return getrusage(current, who, ru);
1609}
1610
1611#ifdef CONFIG_COMPAT
1612COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1613{
1614 struct rusage r;
1615
1616 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1617 who != RUSAGE_THREAD)
1618 return -EINVAL;
1619
1620 k_getrusage(current, who, &r);
1621 return put_compat_rusage(&r, ru);
1622}
1623#endif
1624
1625SYSCALL_DEFINE1(umask, int, mask)
1626{
1627 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1628 return mask;
1629}
1630
1631static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1632{
1633 struct fd exe;
1634 struct inode *inode;
1635 int err;
1636
1637 exe = fdget(fd);
1638 if (!exe.file)
1639 return -EBADF;
1640
1641 inode = file_inode(exe.file);
1642
1643 /*
1644 * Because the original mm->exe_file points to executable file, make
1645 * sure that this one is executable as well, to avoid breaking an
1646 * overall picture.
1647 */
1648 err = -EACCES;
1649 if (!S_ISREG(inode->i_mode) ||
1650 exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1651 goto exit;
1652
1653 err = inode_permission(inode, MAY_EXEC);
1654 if (err)
1655 goto exit;
1656
1657 down_write(&mm->mmap_sem);
1658
1659 /*
1660 * Forbid mm->exe_file change if old file still mapped.
1661 */
1662 err = -EBUSY;
1663 if (mm->exe_file) {
1664 struct vm_area_struct *vma;
1665
1666 for (vma = mm->mmap; vma; vma = vma->vm_next)
1667 if (vma->vm_file &&
1668 path_equal(&vma->vm_file->f_path,
1669 &mm->exe_file->f_path))
1670 goto exit_unlock;
1671 }
1672
1673 /*
1674 * The symlink can be changed only once, just to disallow arbitrary
1675 * transitions malicious software might bring in. This means one
1676 * could make a snapshot over all processes running and monitor
1677 * /proc/pid/exe changes to notice unusual activity if needed.
1678 */
1679 err = -EPERM;
1680 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1681 goto exit_unlock;
1682
1683 err = 0;
1684 set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */
1685exit_unlock:
1686 up_write(&mm->mmap_sem);
1687
1688exit:
1689 fdput(exe);
1690 return err;
1691}
1692
1693static int prctl_set_mm(int opt, unsigned long addr,
1694 unsigned long arg4, unsigned long arg5)
1695{
1696 unsigned long rlim = rlimit(RLIMIT_DATA);
1697 struct mm_struct *mm = current->mm;
1698 struct vm_area_struct *vma;
1699 int error;
1700
1701 if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
1702 return -EINVAL;
1703
1704 if (!capable(CAP_SYS_RESOURCE))
1705 return -EPERM;
1706
1707 if (opt == PR_SET_MM_EXE_FILE)
1708 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1709
1710 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1711 return -EINVAL;
1712
1713 error = -EINVAL;
1714
1715 down_read(&mm->mmap_sem);
1716 vma = find_vma(mm, addr);
1717
1718 switch (opt) {
1719 case PR_SET_MM_START_CODE:
1720 mm->start_code = addr;
1721 break;
1722 case PR_SET_MM_END_CODE:
1723 mm->end_code = addr;
1724 break;
1725 case PR_SET_MM_START_DATA:
1726 mm->start_data = addr;
1727 break;
1728 case PR_SET_MM_END_DATA:
1729 mm->end_data = addr;
1730 break;
1731
1732 case PR_SET_MM_START_BRK:
1733 if (addr <= mm->end_data)
1734 goto out;
1735
1736 if (rlim < RLIM_INFINITY &&
1737 (mm->brk - addr) +
1738 (mm->end_data - mm->start_data) > rlim)
1739 goto out;
1740
1741 mm->start_brk = addr;
1742 break;
1743
1744 case PR_SET_MM_BRK:
1745 if (addr <= mm->end_data)
1746 goto out;
1747
1748 if (rlim < RLIM_INFINITY &&
1749 (addr - mm->start_brk) +
1750 (mm->end_data - mm->start_data) > rlim)
1751 goto out;
1752
1753 mm->brk = addr;
1754 break;
1755
1756 /*
1757 * If command line arguments and environment
1758 * are placed somewhere else on stack, we can
1759 * set them up here, ARG_START/END to setup
1760 * command line argumets and ENV_START/END
1761 * for environment.
1762 */
1763 case PR_SET_MM_START_STACK:
1764 case PR_SET_MM_ARG_START:
1765 case PR_SET_MM_ARG_END:
1766 case PR_SET_MM_ENV_START:
1767 case PR_SET_MM_ENV_END:
1768 if (!vma) {
1769 error = -EFAULT;
1770 goto out;
1771 }
1772 if (opt == PR_SET_MM_START_STACK)
1773 mm->start_stack = addr;
1774 else if (opt == PR_SET_MM_ARG_START)
1775 mm->arg_start = addr;
1776 else if (opt == PR_SET_MM_ARG_END)
1777 mm->arg_end = addr;
1778 else if (opt == PR_SET_MM_ENV_START)
1779 mm->env_start = addr;
1780 else if (opt == PR_SET_MM_ENV_END)
1781 mm->env_end = addr;
1782 break;
1783
1784 /*
1785 * This doesn't move auxiliary vector itself
1786 * since it's pinned to mm_struct, but allow
1787 * to fill vector with new values. It's up
1788 * to a caller to provide sane values here
1789 * otherwise user space tools which use this
1790 * vector might be unhappy.
1791 */
1792 case PR_SET_MM_AUXV: {
1793 unsigned long user_auxv[AT_VECTOR_SIZE];
1794
1795 if (arg4 > sizeof(user_auxv))
1796 goto out;
1797 up_read(&mm->mmap_sem);
1798
1799 if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
1800 return -EFAULT;
1801
1802 /* Make sure the last entry is always AT_NULL */
1803 user_auxv[AT_VECTOR_SIZE - 2] = 0;
1804 user_auxv[AT_VECTOR_SIZE - 1] = 0;
1805
1806 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1807
1808 task_lock(current);
1809 memcpy(mm->saved_auxv, user_auxv, arg4);
1810 task_unlock(current);
1811
1812 return 0;
1813 }
1814 default:
1815 goto out;
1816 }
1817
1818 error = 0;
1819out:
1820 up_read(&mm->mmap_sem);
1821 return error;
1822}
1823
1824#ifdef CONFIG_CHECKPOINT_RESTORE
1825static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1826{
1827 return put_user(me->clear_child_tid, tid_addr);
1828}
1829#else
1830static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1831{
1832 return -EINVAL;
1833}
1834#endif
1835
1836SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1837 unsigned long, arg4, unsigned long, arg5)
1838{
1839 struct task_struct *me = current;
1840 unsigned char comm[sizeof(me->comm)];
1841 long error;
1842
1843 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1844 if (error != -ENOSYS)
1845 return error;
1846
1847 error = 0;
1848 switch (option) {
1849 case PR_SET_PDEATHSIG:
1850 if (!valid_signal(arg2)) {
1851 error = -EINVAL;
1852 break;
1853 }
1854 me->pdeath_signal = arg2;
1855 break;
1856 case PR_GET_PDEATHSIG:
1857 error = put_user(me->pdeath_signal, (int __user *)arg2);
1858 break;
1859 case PR_GET_DUMPABLE:
1860 error = get_dumpable(me->mm);
1861 break;
1862 case PR_SET_DUMPABLE:
1863 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
1864 error = -EINVAL;
1865 break;
1866 }
1867 set_dumpable(me->mm, arg2);
1868 break;
1869
1870 case PR_SET_UNALIGN:
1871 error = SET_UNALIGN_CTL(me, arg2);
1872 break;
1873 case PR_GET_UNALIGN:
1874 error = GET_UNALIGN_CTL(me, arg2);
1875 break;
1876 case PR_SET_FPEMU:
1877 error = SET_FPEMU_CTL(me, arg2);
1878 break;
1879 case PR_GET_FPEMU:
1880 error = GET_FPEMU_CTL(me, arg2);
1881 break;
1882 case PR_SET_FPEXC:
1883 error = SET_FPEXC_CTL(me, arg2);
1884 break;
1885 case PR_GET_FPEXC:
1886 error = GET_FPEXC_CTL(me, arg2);
1887 break;
1888 case PR_GET_TIMING:
1889 error = PR_TIMING_STATISTICAL;
1890 break;
1891 case PR_SET_TIMING:
1892 if (arg2 != PR_TIMING_STATISTICAL)
1893 error = -EINVAL;
1894 break;
1895 case PR_SET_NAME:
1896 comm[sizeof(me->comm) - 1] = 0;
1897 if (strncpy_from_user(comm, (char __user *)arg2,
1898 sizeof(me->comm) - 1) < 0)
1899 return -EFAULT;
1900 set_task_comm(me, comm);
1901 proc_comm_connector(me);
1902 break;
1903 case PR_GET_NAME:
1904 get_task_comm(comm, me);
1905 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
1906 return -EFAULT;
1907 break;
1908 case PR_GET_ENDIAN:
1909 error = GET_ENDIAN(me, arg2);
1910 break;
1911 case PR_SET_ENDIAN:
1912 error = SET_ENDIAN(me, arg2);
1913 break;
1914 case PR_GET_SECCOMP:
1915 error = prctl_get_seccomp();
1916 break;
1917 case PR_SET_SECCOMP:
1918 error = prctl_set_seccomp(arg2, (char __user *)arg3);
1919 break;
1920 case PR_GET_TSC:
1921 error = GET_TSC_CTL(arg2);
1922 break;
1923 case PR_SET_TSC:
1924 error = SET_TSC_CTL(arg2);
1925 break;
1926 case PR_TASK_PERF_EVENTS_DISABLE:
1927 error = perf_event_task_disable();
1928 break;
1929 case PR_TASK_PERF_EVENTS_ENABLE:
1930 error = perf_event_task_enable();
1931 break;
1932 case PR_GET_TIMERSLACK:
1933 error = current->timer_slack_ns;
1934 break;
1935 case PR_SET_TIMERSLACK:
1936 if (arg2 <= 0)
1937 current->timer_slack_ns =
1938 current->default_timer_slack_ns;
1939 else
1940 current->timer_slack_ns = arg2;
1941 break;
1942 case PR_MCE_KILL:
1943 if (arg4 | arg5)
1944 return -EINVAL;
1945 switch (arg2) {
1946 case PR_MCE_KILL_CLEAR:
1947 if (arg3 != 0)
1948 return -EINVAL;
1949 current->flags &= ~PF_MCE_PROCESS;
1950 break;
1951 case PR_MCE_KILL_SET:
1952 current->flags |= PF_MCE_PROCESS;
1953 if (arg3 == PR_MCE_KILL_EARLY)
1954 current->flags |= PF_MCE_EARLY;
1955 else if (arg3 == PR_MCE_KILL_LATE)
1956 current->flags &= ~PF_MCE_EARLY;
1957 else if (arg3 == PR_MCE_KILL_DEFAULT)
1958 current->flags &=
1959 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1960 else
1961 return -EINVAL;
1962 break;
1963 default:
1964 return -EINVAL;
1965 }
1966 break;
1967 case PR_MCE_KILL_GET:
1968 if (arg2 | arg3 | arg4 | arg5)
1969 return -EINVAL;
1970 if (current->flags & PF_MCE_PROCESS)
1971 error = (current->flags & PF_MCE_EARLY) ?
1972 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1973 else
1974 error = PR_MCE_KILL_DEFAULT;
1975 break;
1976 case PR_SET_MM:
1977 error = prctl_set_mm(arg2, arg3, arg4, arg5);
1978 break;
1979 case PR_GET_TID_ADDRESS:
1980 error = prctl_get_tid_address(me, (int __user **)arg2);
1981 break;
1982 case PR_SET_CHILD_SUBREAPER:
1983 me->signal->is_child_subreaper = !!arg2;
1984 break;
1985 case PR_GET_CHILD_SUBREAPER:
1986 error = put_user(me->signal->is_child_subreaper,
1987 (int __user *)arg2);
1988 break;
1989 case PR_SET_NO_NEW_PRIVS:
1990 if (arg2 != 1 || arg3 || arg4 || arg5)
1991 return -EINVAL;
1992
1993 current->no_new_privs = 1;
1994 break;
1995 case PR_GET_NO_NEW_PRIVS:
1996 if (arg2 || arg3 || arg4 || arg5)
1997 return -EINVAL;
1998 return current->no_new_privs ? 1 : 0;
1999 case PR_GET_THP_DISABLE:
2000 if (arg2 || arg3 || arg4 || arg5)
2001 return -EINVAL;
2002 error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2003 break;
2004 case PR_SET_THP_DISABLE:
2005 if (arg3 || arg4 || arg5)
2006 return -EINVAL;
2007 down_write(&me->mm->mmap_sem);
2008 if (arg2)
2009 me->mm->def_flags |= VM_NOHUGEPAGE;
2010 else
2011 me->mm->def_flags &= ~VM_NOHUGEPAGE;
2012 up_write(&me->mm->mmap_sem);
2013 break;
2014 default:
2015 error = -EINVAL;
2016 break;
2017 }
2018 return error;
2019}
2020
2021SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2022 struct getcpu_cache __user *, unused)
2023{
2024 int err = 0;
2025 int cpu = raw_smp_processor_id();
2026 if (cpup)
2027 err |= put_user(cpu, cpup);
2028 if (nodep)
2029 err |= put_user(cpu_to_node(cpu), nodep);
2030 return err ? -EFAULT : 0;
2031}
2032
2033/**
2034 * do_sysinfo - fill in sysinfo struct
2035 * @info: pointer to buffer to fill
2036 */
2037static int do_sysinfo(struct sysinfo *info)
2038{
2039 unsigned long mem_total, sav_total;
2040 unsigned int mem_unit, bitcount;
2041 struct timespec tp;
2042
2043 memset(info, 0, sizeof(struct sysinfo));
2044
2045 get_monotonic_boottime(&tp);
2046 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2047
2048 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2049
2050 info->procs = nr_threads;
2051
2052 si_meminfo(info);
2053 si_swapinfo(info);
2054
2055 /*
2056 * If the sum of all the available memory (i.e. ram + swap)
2057 * is less than can be stored in a 32 bit unsigned long then
2058 * we can be binary compatible with 2.2.x kernels. If not,
2059 * well, in that case 2.2.x was broken anyways...
2060 *
2061 * -Erik Andersen <andersee@debian.org>
2062 */
2063
2064 mem_total = info->totalram + info->totalswap;
2065 if (mem_total < info->totalram || mem_total < info->totalswap)
2066 goto out;
2067 bitcount = 0;
2068 mem_unit = info->mem_unit;
2069 while (mem_unit > 1) {
2070 bitcount++;
2071 mem_unit >>= 1;
2072 sav_total = mem_total;
2073 mem_total <<= 1;
2074 if (mem_total < sav_total)
2075 goto out;
2076 }
2077
2078 /*
2079 * If mem_total did not overflow, multiply all memory values by
2080 * info->mem_unit and set it to 1. This leaves things compatible
2081 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2082 * kernels...
2083 */
2084
2085 info->mem_unit = 1;
2086 info->totalram <<= bitcount;
2087 info->freeram <<= bitcount;
2088 info->sharedram <<= bitcount;
2089 info->bufferram <<= bitcount;
2090 info->totalswap <<= bitcount;
2091 info->freeswap <<= bitcount;
2092 info->totalhigh <<= bitcount;
2093 info->freehigh <<= bitcount;
2094
2095out:
2096 return 0;
2097}
2098
2099SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2100{
2101 struct sysinfo val;
2102
2103 do_sysinfo(&val);
2104
2105 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2106 return -EFAULT;
2107
2108 return 0;
2109}
2110
2111#ifdef CONFIG_COMPAT
2112struct compat_sysinfo {
2113 s32 uptime;
2114 u32 loads[3];
2115 u32 totalram;
2116 u32 freeram;
2117 u32 sharedram;
2118 u32 bufferram;
2119 u32 totalswap;
2120 u32 freeswap;
2121 u16 procs;
2122 u16 pad;
2123 u32 totalhigh;
2124 u32 freehigh;
2125 u32 mem_unit;
2126 char _f[20-2*sizeof(u32)-sizeof(int)];
2127};
2128
2129COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2130{
2131 struct sysinfo s;
2132
2133 do_sysinfo(&s);
2134
2135 /* Check to see if any memory value is too large for 32-bit and scale
2136 * down if needed
2137 */
2138 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2139 int bitcount = 0;
2140
2141 while (s.mem_unit < PAGE_SIZE) {
2142 s.mem_unit <<= 1;
2143 bitcount++;
2144 }
2145
2146 s.totalram >>= bitcount;
2147 s.freeram >>= bitcount;
2148 s.sharedram >>= bitcount;
2149 s.bufferram >>= bitcount;
2150 s.totalswap >>= bitcount;
2151 s.freeswap >>= bitcount;
2152 s.totalhigh >>= bitcount;
2153 s.freehigh >>= bitcount;
2154 }
2155
2156 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2157 __put_user(s.uptime, &info->uptime) ||
2158 __put_user(s.loads[0], &info->loads[0]) ||
2159 __put_user(s.loads[1], &info->loads[1]) ||
2160 __put_user(s.loads[2], &info->loads[2]) ||
2161 __put_user(s.totalram, &info->totalram) ||
2162 __put_user(s.freeram, &info->freeram) ||
2163 __put_user(s.sharedram, &info->sharedram) ||
2164 __put_user(s.bufferram, &info->bufferram) ||
2165 __put_user(s.totalswap, &info->totalswap) ||
2166 __put_user(s.freeswap, &info->freeswap) ||
2167 __put_user(s.procs, &info->procs) ||
2168 __put_user(s.totalhigh, &info->totalhigh) ||
2169 __put_user(s.freehigh, &info->freehigh) ||
2170 __put_user(s.mem_unit, &info->mem_unit))
2171 return -EFAULT;
2172
2173 return 0;
2174}
2175#endif /* CONFIG_COMPAT */