Loading...
1/*
2 * linux/kernel/seccomp.c
3 *
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
5 *
6 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
8 *
9 * This defines a simple but solid secure-computing facility.
10 *
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
14 */
15
16#include <linux/atomic.h>
17#include <linux/audit.h>
18#include <linux/compat.h>
19#include <linux/sched.h>
20#include <linux/seccomp.h>
21#include <linux/slab.h>
22#include <linux/syscalls.h>
23
24#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
25#include <asm/syscall.h>
26#endif
27
28#ifdef CONFIG_SECCOMP_FILTER
29#include <linux/filter.h>
30#include <linux/pid.h>
31#include <linux/ptrace.h>
32#include <linux/security.h>
33#include <linux/tracehook.h>
34#include <linux/uaccess.h>
35
36/**
37 * struct seccomp_filter - container for seccomp BPF programs
38 *
39 * @usage: reference count to manage the object lifetime.
40 * get/put helpers should be used when accessing an instance
41 * outside of a lifetime-guarded section. In general, this
42 * is only needed for handling filters shared across tasks.
43 * @prev: points to a previously installed, or inherited, filter
44 * @prog: the BPF program to evaluate
45 *
46 * seccomp_filter objects are organized in a tree linked via the @prev
47 * pointer. For any task, it appears to be a singly-linked list starting
48 * with current->seccomp.filter, the most recently attached or inherited filter.
49 * However, multiple filters may share a @prev node, by way of fork(), which
50 * results in a unidirectional tree existing in memory. This is similar to
51 * how namespaces work.
52 *
53 * seccomp_filter objects should never be modified after being attached
54 * to a task_struct (other than @usage).
55 */
56struct seccomp_filter {
57 atomic_t usage;
58 struct seccomp_filter *prev;
59 struct bpf_prog *prog;
60};
61
62/* Limit any path through the tree to 256KB worth of instructions. */
63#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
64
65/*
66 * Endianness is explicitly ignored and left for BPF program authors to manage
67 * as per the specific architecture.
68 */
69static void populate_seccomp_data(struct seccomp_data *sd)
70{
71 struct task_struct *task = current;
72 struct pt_regs *regs = task_pt_regs(task);
73 unsigned long args[6];
74
75 sd->nr = syscall_get_nr(task, regs);
76 sd->arch = syscall_get_arch();
77 syscall_get_arguments(task, regs, 0, 6, args);
78 sd->args[0] = args[0];
79 sd->args[1] = args[1];
80 sd->args[2] = args[2];
81 sd->args[3] = args[3];
82 sd->args[4] = args[4];
83 sd->args[5] = args[5];
84 sd->instruction_pointer = KSTK_EIP(task);
85}
86
87/**
88 * seccomp_check_filter - verify seccomp filter code
89 * @filter: filter to verify
90 * @flen: length of filter
91 *
92 * Takes a previously checked filter (by bpf_check_classic) and
93 * redirects all filter code that loads struct sk_buff data
94 * and related data through seccomp_bpf_load. It also
95 * enforces length and alignment checking of those loads.
96 *
97 * Returns 0 if the rule set is legal or -EINVAL if not.
98 */
99static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
100{
101 int pc;
102 for (pc = 0; pc < flen; pc++) {
103 struct sock_filter *ftest = &filter[pc];
104 u16 code = ftest->code;
105 u32 k = ftest->k;
106
107 switch (code) {
108 case BPF_LD | BPF_W | BPF_ABS:
109 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
110 /* 32-bit aligned and not out of bounds. */
111 if (k >= sizeof(struct seccomp_data) || k & 3)
112 return -EINVAL;
113 continue;
114 case BPF_LD | BPF_W | BPF_LEN:
115 ftest->code = BPF_LD | BPF_IMM;
116 ftest->k = sizeof(struct seccomp_data);
117 continue;
118 case BPF_LDX | BPF_W | BPF_LEN:
119 ftest->code = BPF_LDX | BPF_IMM;
120 ftest->k = sizeof(struct seccomp_data);
121 continue;
122 /* Explicitly include allowed calls. */
123 case BPF_RET | BPF_K:
124 case BPF_RET | BPF_A:
125 case BPF_ALU | BPF_ADD | BPF_K:
126 case BPF_ALU | BPF_ADD | BPF_X:
127 case BPF_ALU | BPF_SUB | BPF_K:
128 case BPF_ALU | BPF_SUB | BPF_X:
129 case BPF_ALU | BPF_MUL | BPF_K:
130 case BPF_ALU | BPF_MUL | BPF_X:
131 case BPF_ALU | BPF_DIV | BPF_K:
132 case BPF_ALU | BPF_DIV | BPF_X:
133 case BPF_ALU | BPF_AND | BPF_K:
134 case BPF_ALU | BPF_AND | BPF_X:
135 case BPF_ALU | BPF_OR | BPF_K:
136 case BPF_ALU | BPF_OR | BPF_X:
137 case BPF_ALU | BPF_XOR | BPF_K:
138 case BPF_ALU | BPF_XOR | BPF_X:
139 case BPF_ALU | BPF_LSH | BPF_K:
140 case BPF_ALU | BPF_LSH | BPF_X:
141 case BPF_ALU | BPF_RSH | BPF_K:
142 case BPF_ALU | BPF_RSH | BPF_X:
143 case BPF_ALU | BPF_NEG:
144 case BPF_LD | BPF_IMM:
145 case BPF_LDX | BPF_IMM:
146 case BPF_MISC | BPF_TAX:
147 case BPF_MISC | BPF_TXA:
148 case BPF_LD | BPF_MEM:
149 case BPF_LDX | BPF_MEM:
150 case BPF_ST:
151 case BPF_STX:
152 case BPF_JMP | BPF_JA:
153 case BPF_JMP | BPF_JEQ | BPF_K:
154 case BPF_JMP | BPF_JEQ | BPF_X:
155 case BPF_JMP | BPF_JGE | BPF_K:
156 case BPF_JMP | BPF_JGE | BPF_X:
157 case BPF_JMP | BPF_JGT | BPF_K:
158 case BPF_JMP | BPF_JGT | BPF_X:
159 case BPF_JMP | BPF_JSET | BPF_K:
160 case BPF_JMP | BPF_JSET | BPF_X:
161 continue;
162 default:
163 return -EINVAL;
164 }
165 }
166 return 0;
167}
168
169/**
170 * seccomp_run_filters - evaluates all seccomp filters against @sd
171 * @sd: optional seccomp data to be passed to filters
172 *
173 * Returns valid seccomp BPF response codes.
174 */
175static u32 seccomp_run_filters(const struct seccomp_data *sd)
176{
177 struct seccomp_data sd_local;
178 u32 ret = SECCOMP_RET_ALLOW;
179 /* Make sure cross-thread synced filter points somewhere sane. */
180 struct seccomp_filter *f =
181 lockless_dereference(current->seccomp.filter);
182
183 /* Ensure unexpected behavior doesn't result in failing open. */
184 if (unlikely(WARN_ON(f == NULL)))
185 return SECCOMP_RET_KILL;
186
187 if (!sd) {
188 populate_seccomp_data(&sd_local);
189 sd = &sd_local;
190 }
191
192 /*
193 * All filters in the list are evaluated and the lowest BPF return
194 * value always takes priority (ignoring the DATA).
195 */
196 for (; f; f = f->prev) {
197 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
198
199 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
200 ret = cur_ret;
201 }
202 return ret;
203}
204#endif /* CONFIG_SECCOMP_FILTER */
205
206static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
207{
208 assert_spin_locked(¤t->sighand->siglock);
209
210 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
211 return false;
212
213 return true;
214}
215
216static inline void seccomp_assign_mode(struct task_struct *task,
217 unsigned long seccomp_mode)
218{
219 assert_spin_locked(&task->sighand->siglock);
220
221 task->seccomp.mode = seccomp_mode;
222 /*
223 * Make sure TIF_SECCOMP cannot be set before the mode (and
224 * filter) is set.
225 */
226 smp_mb__before_atomic();
227 set_tsk_thread_flag(task, TIF_SECCOMP);
228}
229
230#ifdef CONFIG_SECCOMP_FILTER
231/* Returns 1 if the parent is an ancestor of the child. */
232static int is_ancestor(struct seccomp_filter *parent,
233 struct seccomp_filter *child)
234{
235 /* NULL is the root ancestor. */
236 if (parent == NULL)
237 return 1;
238 for (; child; child = child->prev)
239 if (child == parent)
240 return 1;
241 return 0;
242}
243
244/**
245 * seccomp_can_sync_threads: checks if all threads can be synchronized
246 *
247 * Expects sighand and cred_guard_mutex locks to be held.
248 *
249 * Returns 0 on success, -ve on error, or the pid of a thread which was
250 * either not in the correct seccomp mode or it did not have an ancestral
251 * seccomp filter.
252 */
253static inline pid_t seccomp_can_sync_threads(void)
254{
255 struct task_struct *thread, *caller;
256
257 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
258 assert_spin_locked(¤t->sighand->siglock);
259
260 /* Validate all threads being eligible for synchronization. */
261 caller = current;
262 for_each_thread(caller, thread) {
263 pid_t failed;
264
265 /* Skip current, since it is initiating the sync. */
266 if (thread == caller)
267 continue;
268
269 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
270 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
271 is_ancestor(thread->seccomp.filter,
272 caller->seccomp.filter)))
273 continue;
274
275 /* Return the first thread that cannot be synchronized. */
276 failed = task_pid_vnr(thread);
277 /* If the pid cannot be resolved, then return -ESRCH */
278 if (unlikely(WARN_ON(failed == 0)))
279 failed = -ESRCH;
280 return failed;
281 }
282
283 return 0;
284}
285
286/**
287 * seccomp_sync_threads: sets all threads to use current's filter
288 *
289 * Expects sighand and cred_guard_mutex locks to be held, and for
290 * seccomp_can_sync_threads() to have returned success already
291 * without dropping the locks.
292 *
293 */
294static inline void seccomp_sync_threads(void)
295{
296 struct task_struct *thread, *caller;
297
298 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
299 assert_spin_locked(¤t->sighand->siglock);
300
301 /* Synchronize all threads. */
302 caller = current;
303 for_each_thread(caller, thread) {
304 /* Skip current, since it needs no changes. */
305 if (thread == caller)
306 continue;
307
308 /* Get a task reference for the new leaf node. */
309 get_seccomp_filter(caller);
310 /*
311 * Drop the task reference to the shared ancestor since
312 * current's path will hold a reference. (This also
313 * allows a put before the assignment.)
314 */
315 put_seccomp_filter(thread);
316 smp_store_release(&thread->seccomp.filter,
317 caller->seccomp.filter);
318
319 /*
320 * Don't let an unprivileged task work around
321 * the no_new_privs restriction by creating
322 * a thread that sets it up, enters seccomp,
323 * then dies.
324 */
325 if (task_no_new_privs(caller))
326 task_set_no_new_privs(thread);
327
328 /*
329 * Opt the other thread into seccomp if needed.
330 * As threads are considered to be trust-realm
331 * equivalent (see ptrace_may_access), it is safe to
332 * allow one thread to transition the other.
333 */
334 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
335 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
336 }
337}
338
339/**
340 * seccomp_prepare_filter: Prepares a seccomp filter for use.
341 * @fprog: BPF program to install
342 *
343 * Returns filter on success or an ERR_PTR on failure.
344 */
345static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
346{
347 struct seccomp_filter *sfilter;
348 int ret;
349 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
350
351 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
352 return ERR_PTR(-EINVAL);
353
354 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
355
356 /*
357 * Installing a seccomp filter requires that the task has
358 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
359 * This avoids scenarios where unprivileged tasks can affect the
360 * behavior of privileged children.
361 */
362 if (!task_no_new_privs(current) &&
363 security_capable_noaudit(current_cred(), current_user_ns(),
364 CAP_SYS_ADMIN) != 0)
365 return ERR_PTR(-EACCES);
366
367 /* Allocate a new seccomp_filter */
368 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
369 if (!sfilter)
370 return ERR_PTR(-ENOMEM);
371
372 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
373 seccomp_check_filter, save_orig);
374 if (ret < 0) {
375 kfree(sfilter);
376 return ERR_PTR(ret);
377 }
378
379 atomic_set(&sfilter->usage, 1);
380
381 return sfilter;
382}
383
384/**
385 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
386 * @user_filter: pointer to the user data containing a sock_fprog.
387 *
388 * Returns 0 on success and non-zero otherwise.
389 */
390static struct seccomp_filter *
391seccomp_prepare_user_filter(const char __user *user_filter)
392{
393 struct sock_fprog fprog;
394 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
395
396#ifdef CONFIG_COMPAT
397 if (in_compat_syscall()) {
398 struct compat_sock_fprog fprog32;
399 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
400 goto out;
401 fprog.len = fprog32.len;
402 fprog.filter = compat_ptr(fprog32.filter);
403 } else /* falls through to the if below. */
404#endif
405 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
406 goto out;
407 filter = seccomp_prepare_filter(&fprog);
408out:
409 return filter;
410}
411
412/**
413 * seccomp_attach_filter: validate and attach filter
414 * @flags: flags to change filter behavior
415 * @filter: seccomp filter to add to the current process
416 *
417 * Caller must be holding current->sighand->siglock lock.
418 *
419 * Returns 0 on success, -ve on error.
420 */
421static long seccomp_attach_filter(unsigned int flags,
422 struct seccomp_filter *filter)
423{
424 unsigned long total_insns;
425 struct seccomp_filter *walker;
426
427 assert_spin_locked(¤t->sighand->siglock);
428
429 /* Validate resulting filter length. */
430 total_insns = filter->prog->len;
431 for (walker = current->seccomp.filter; walker; walker = walker->prev)
432 total_insns += walker->prog->len + 4; /* 4 instr penalty */
433 if (total_insns > MAX_INSNS_PER_PATH)
434 return -ENOMEM;
435
436 /* If thread sync has been requested, check that it is possible. */
437 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
438 int ret;
439
440 ret = seccomp_can_sync_threads();
441 if (ret)
442 return ret;
443 }
444
445 /*
446 * If there is an existing filter, make it the prev and don't drop its
447 * task reference.
448 */
449 filter->prev = current->seccomp.filter;
450 current->seccomp.filter = filter;
451
452 /* Now that the new filter is in place, synchronize to all threads. */
453 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
454 seccomp_sync_threads();
455
456 return 0;
457}
458
459/* get_seccomp_filter - increments the reference count of the filter on @tsk */
460void get_seccomp_filter(struct task_struct *tsk)
461{
462 struct seccomp_filter *orig = tsk->seccomp.filter;
463 if (!orig)
464 return;
465 /* Reference count is bounded by the number of total processes. */
466 atomic_inc(&orig->usage);
467}
468
469static inline void seccomp_filter_free(struct seccomp_filter *filter)
470{
471 if (filter) {
472 bpf_prog_destroy(filter->prog);
473 kfree(filter);
474 }
475}
476
477/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
478void put_seccomp_filter(struct task_struct *tsk)
479{
480 struct seccomp_filter *orig = tsk->seccomp.filter;
481 /* Clean up single-reference branches iteratively. */
482 while (orig && atomic_dec_and_test(&orig->usage)) {
483 struct seccomp_filter *freeme = orig;
484 orig = orig->prev;
485 seccomp_filter_free(freeme);
486 }
487}
488
489/**
490 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
491 * @syscall: syscall number to send to userland
492 * @reason: filter-supplied reason code to send to userland (via si_errno)
493 *
494 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
495 */
496static void seccomp_send_sigsys(int syscall, int reason)
497{
498 struct siginfo info;
499 memset(&info, 0, sizeof(info));
500 info.si_signo = SIGSYS;
501 info.si_code = SYS_SECCOMP;
502 info.si_call_addr = (void __user *)KSTK_EIP(current);
503 info.si_errno = reason;
504 info.si_arch = syscall_get_arch();
505 info.si_syscall = syscall;
506 force_sig_info(SIGSYS, &info, current);
507}
508#endif /* CONFIG_SECCOMP_FILTER */
509
510/*
511 * Secure computing mode 1 allows only read/write/exit/sigreturn.
512 * To be fully secure this must be combined with rlimit
513 * to limit the stack allocations too.
514 */
515static const int mode1_syscalls[] = {
516 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
517 0, /* null terminated */
518};
519
520static void __secure_computing_strict(int this_syscall)
521{
522 const int *syscall_whitelist = mode1_syscalls;
523#ifdef CONFIG_COMPAT
524 if (in_compat_syscall())
525 syscall_whitelist = get_compat_mode1_syscalls();
526#endif
527 do {
528 if (*syscall_whitelist == this_syscall)
529 return;
530 } while (*++syscall_whitelist);
531
532#ifdef SECCOMP_DEBUG
533 dump_stack();
534#endif
535 audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
536 do_exit(SIGKILL);
537}
538
539#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
540void secure_computing_strict(int this_syscall)
541{
542 int mode = current->seccomp.mode;
543
544 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
545 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
546 return;
547
548 if (mode == SECCOMP_MODE_DISABLED)
549 return;
550 else if (mode == SECCOMP_MODE_STRICT)
551 __secure_computing_strict(this_syscall);
552 else
553 BUG();
554}
555#else
556
557#ifdef CONFIG_SECCOMP_FILTER
558static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
559 const bool recheck_after_trace)
560{
561 u32 filter_ret, action;
562 int data;
563
564 /*
565 * Make sure that any changes to mode from another thread have
566 * been seen after TIF_SECCOMP was seen.
567 */
568 rmb();
569
570 filter_ret = seccomp_run_filters(sd);
571 data = filter_ret & SECCOMP_RET_DATA;
572 action = filter_ret & SECCOMP_RET_ACTION;
573
574 switch (action) {
575 case SECCOMP_RET_ERRNO:
576 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
577 if (data > MAX_ERRNO)
578 data = MAX_ERRNO;
579 syscall_set_return_value(current, task_pt_regs(current),
580 -data, 0);
581 goto skip;
582
583 case SECCOMP_RET_TRAP:
584 /* Show the handler the original registers. */
585 syscall_rollback(current, task_pt_regs(current));
586 /* Let the filter pass back 16 bits of data. */
587 seccomp_send_sigsys(this_syscall, data);
588 goto skip;
589
590 case SECCOMP_RET_TRACE:
591 /* We've been put in this state by the ptracer already. */
592 if (recheck_after_trace)
593 return 0;
594
595 /* ENOSYS these calls if there is no tracer attached. */
596 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
597 syscall_set_return_value(current,
598 task_pt_regs(current),
599 -ENOSYS, 0);
600 goto skip;
601 }
602
603 /* Allow the BPF to provide the event message */
604 ptrace_event(PTRACE_EVENT_SECCOMP, data);
605 /*
606 * The delivery of a fatal signal during event
607 * notification may silently skip tracer notification,
608 * which could leave us with a potentially unmodified
609 * syscall that the tracer would have liked to have
610 * changed. Since the process is about to die, we just
611 * force the syscall to be skipped and let the signal
612 * kill the process and correctly handle any tracer exit
613 * notifications.
614 */
615 if (fatal_signal_pending(current))
616 goto skip;
617 /* Check if the tracer forced the syscall to be skipped. */
618 this_syscall = syscall_get_nr(current, task_pt_regs(current));
619 if (this_syscall < 0)
620 goto skip;
621
622 /*
623 * Recheck the syscall, since it may have changed. This
624 * intentionally uses a NULL struct seccomp_data to force
625 * a reload of all registers. This does not goto skip since
626 * a skip would have already been reported.
627 */
628 if (__seccomp_filter(this_syscall, NULL, true))
629 return -1;
630
631 return 0;
632
633 case SECCOMP_RET_ALLOW:
634 return 0;
635
636 case SECCOMP_RET_KILL:
637 default:
638 audit_seccomp(this_syscall, SIGSYS, action);
639 do_exit(SIGSYS);
640 }
641
642 unreachable();
643
644skip:
645 audit_seccomp(this_syscall, 0, action);
646 return -1;
647}
648#else
649static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
650 const bool recheck_after_trace)
651{
652 BUG();
653}
654#endif
655
656int __secure_computing(const struct seccomp_data *sd)
657{
658 int mode = current->seccomp.mode;
659 int this_syscall;
660
661 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
662 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
663 return 0;
664
665 this_syscall = sd ? sd->nr :
666 syscall_get_nr(current, task_pt_regs(current));
667
668 switch (mode) {
669 case SECCOMP_MODE_STRICT:
670 __secure_computing_strict(this_syscall); /* may call do_exit */
671 return 0;
672 case SECCOMP_MODE_FILTER:
673 return __seccomp_filter(this_syscall, sd, false);
674 default:
675 BUG();
676 }
677}
678#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
679
680long prctl_get_seccomp(void)
681{
682 return current->seccomp.mode;
683}
684
685/**
686 * seccomp_set_mode_strict: internal function for setting strict seccomp
687 *
688 * Once current->seccomp.mode is non-zero, it may not be changed.
689 *
690 * Returns 0 on success or -EINVAL on failure.
691 */
692static long seccomp_set_mode_strict(void)
693{
694 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
695 long ret = -EINVAL;
696
697 spin_lock_irq(¤t->sighand->siglock);
698
699 if (!seccomp_may_assign_mode(seccomp_mode))
700 goto out;
701
702#ifdef TIF_NOTSC
703 disable_TSC();
704#endif
705 seccomp_assign_mode(current, seccomp_mode);
706 ret = 0;
707
708out:
709 spin_unlock_irq(¤t->sighand->siglock);
710
711 return ret;
712}
713
714#ifdef CONFIG_SECCOMP_FILTER
715/**
716 * seccomp_set_mode_filter: internal function for setting seccomp filter
717 * @flags: flags to change filter behavior
718 * @filter: struct sock_fprog containing filter
719 *
720 * This function may be called repeatedly to install additional filters.
721 * Every filter successfully installed will be evaluated (in reverse order)
722 * for each system call the task makes.
723 *
724 * Once current->seccomp.mode is non-zero, it may not be changed.
725 *
726 * Returns 0 on success or -EINVAL on failure.
727 */
728static long seccomp_set_mode_filter(unsigned int flags,
729 const char __user *filter)
730{
731 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
732 struct seccomp_filter *prepared = NULL;
733 long ret = -EINVAL;
734
735 /* Validate flags. */
736 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
737 return -EINVAL;
738
739 /* Prepare the new filter before holding any locks. */
740 prepared = seccomp_prepare_user_filter(filter);
741 if (IS_ERR(prepared))
742 return PTR_ERR(prepared);
743
744 /*
745 * Make sure we cannot change seccomp or nnp state via TSYNC
746 * while another thread is in the middle of calling exec.
747 */
748 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
749 mutex_lock_killable(¤t->signal->cred_guard_mutex))
750 goto out_free;
751
752 spin_lock_irq(¤t->sighand->siglock);
753
754 if (!seccomp_may_assign_mode(seccomp_mode))
755 goto out;
756
757 ret = seccomp_attach_filter(flags, prepared);
758 if (ret)
759 goto out;
760 /* Do not free the successfully attached filter. */
761 prepared = NULL;
762
763 seccomp_assign_mode(current, seccomp_mode);
764out:
765 spin_unlock_irq(¤t->sighand->siglock);
766 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
767 mutex_unlock(¤t->signal->cred_guard_mutex);
768out_free:
769 seccomp_filter_free(prepared);
770 return ret;
771}
772#else
773static inline long seccomp_set_mode_filter(unsigned int flags,
774 const char __user *filter)
775{
776 return -EINVAL;
777}
778#endif
779
780/* Common entry point for both prctl and syscall. */
781static long do_seccomp(unsigned int op, unsigned int flags,
782 const char __user *uargs)
783{
784 switch (op) {
785 case SECCOMP_SET_MODE_STRICT:
786 if (flags != 0 || uargs != NULL)
787 return -EINVAL;
788 return seccomp_set_mode_strict();
789 case SECCOMP_SET_MODE_FILTER:
790 return seccomp_set_mode_filter(flags, uargs);
791 default:
792 return -EINVAL;
793 }
794}
795
796SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
797 const char __user *, uargs)
798{
799 return do_seccomp(op, flags, uargs);
800}
801
802/**
803 * prctl_set_seccomp: configures current->seccomp.mode
804 * @seccomp_mode: requested mode to use
805 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
806 *
807 * Returns 0 on success or -EINVAL on failure.
808 */
809long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
810{
811 unsigned int op;
812 char __user *uargs;
813
814 switch (seccomp_mode) {
815 case SECCOMP_MODE_STRICT:
816 op = SECCOMP_SET_MODE_STRICT;
817 /*
818 * Setting strict mode through prctl always ignored filter,
819 * so make sure it is always NULL here to pass the internal
820 * check in do_seccomp().
821 */
822 uargs = NULL;
823 break;
824 case SECCOMP_MODE_FILTER:
825 op = SECCOMP_SET_MODE_FILTER;
826 uargs = filter;
827 break;
828 default:
829 return -EINVAL;
830 }
831
832 /* prctl interface doesn't have flags, so they are always zero. */
833 return do_seccomp(op, 0, uargs);
834}
835
836#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
837long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
838 void __user *data)
839{
840 struct seccomp_filter *filter;
841 struct sock_fprog_kern *fprog;
842 long ret;
843 unsigned long count = 0;
844
845 if (!capable(CAP_SYS_ADMIN) ||
846 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
847 return -EACCES;
848 }
849
850 spin_lock_irq(&task->sighand->siglock);
851 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
852 ret = -EINVAL;
853 goto out;
854 }
855
856 filter = task->seccomp.filter;
857 while (filter) {
858 filter = filter->prev;
859 count++;
860 }
861
862 if (filter_off >= count) {
863 ret = -ENOENT;
864 goto out;
865 }
866 count -= filter_off;
867
868 filter = task->seccomp.filter;
869 while (filter && count > 1) {
870 filter = filter->prev;
871 count--;
872 }
873
874 if (WARN_ON(count != 1 || !filter)) {
875 /* The filter tree shouldn't shrink while we're using it. */
876 ret = -ENOENT;
877 goto out;
878 }
879
880 fprog = filter->prog->orig_prog;
881 if (!fprog) {
882 /* This must be a new non-cBPF filter, since we save
883 * every cBPF filter's orig_prog above when
884 * CONFIG_CHECKPOINT_RESTORE is enabled.
885 */
886 ret = -EMEDIUMTYPE;
887 goto out;
888 }
889
890 ret = fprog->len;
891 if (!data)
892 goto out;
893
894 get_seccomp_filter(task);
895 spin_unlock_irq(&task->sighand->siglock);
896
897 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
898 ret = -EFAULT;
899
900 put_seccomp_filter(task);
901 return ret;
902
903out:
904 spin_unlock_irq(&task->sighand->siglock);
905 return ret;
906}
907#endif
1/*
2 * linux/kernel/seccomp.c
3 *
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
5 *
6 * This defines a simple but solid secure-computing mode.
7 */
8
9#include <linux/seccomp.h>
10#include <linux/sched.h>
11#include <linux/compat.h>
12
13/* #define SECCOMP_DEBUG 1 */
14#define NR_SECCOMP_MODES 1
15
16/*
17 * Secure computing mode 1 allows only read/write/exit/sigreturn.
18 * To be fully secure this must be combined with rlimit
19 * to limit the stack allocations too.
20 */
21static int mode1_syscalls[] = {
22 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
23 0, /* null terminated */
24};
25
26#ifdef CONFIG_COMPAT
27static int mode1_syscalls_32[] = {
28 __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
29 0, /* null terminated */
30};
31#endif
32
33void __secure_computing(int this_syscall)
34{
35 int mode = current->seccomp.mode;
36 int * syscall;
37
38 switch (mode) {
39 case 1:
40 syscall = mode1_syscalls;
41#ifdef CONFIG_COMPAT
42 if (is_compat_task())
43 syscall = mode1_syscalls_32;
44#endif
45 do {
46 if (*syscall == this_syscall)
47 return;
48 } while (*++syscall);
49 break;
50 default:
51 BUG();
52 }
53
54#ifdef SECCOMP_DEBUG
55 dump_stack();
56#endif
57 do_exit(SIGKILL);
58}
59
60long prctl_get_seccomp(void)
61{
62 return current->seccomp.mode;
63}
64
65long prctl_set_seccomp(unsigned long seccomp_mode)
66{
67 long ret;
68
69 /* can set it only once to be even more secure */
70 ret = -EPERM;
71 if (unlikely(current->seccomp.mode))
72 goto out;
73
74 ret = -EINVAL;
75 if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
76 current->seccomp.mode = seccomp_mode;
77 set_thread_flag(TIF_SECCOMP);
78#ifdef TIF_NOTSC
79 disable_TSC();
80#endif
81 ret = 0;
82 }
83
84 out:
85 return ret;
86}