Loading...
1/*
2 * linux/kernel/seccomp.c
3 *
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
5 *
6 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
8 *
9 * This defines a simple but solid secure-computing facility.
10 *
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
14 */
15
16#include <linux/atomic.h>
17#include <linux/audit.h>
18#include <linux/compat.h>
19#include <linux/sched.h>
20#include <linux/seccomp.h>
21#include <linux/slab.h>
22#include <linux/syscalls.h>
23
24#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
25#include <asm/syscall.h>
26#endif
27
28#ifdef CONFIG_SECCOMP_FILTER
29#include <linux/filter.h>
30#include <linux/pid.h>
31#include <linux/ptrace.h>
32#include <linux/security.h>
33#include <linux/tracehook.h>
34#include <linux/uaccess.h>
35
36/**
37 * struct seccomp_filter - container for seccomp BPF programs
38 *
39 * @usage: reference count to manage the object lifetime.
40 * get/put helpers should be used when accessing an instance
41 * outside of a lifetime-guarded section. In general, this
42 * is only needed for handling filters shared across tasks.
43 * @prev: points to a previously installed, or inherited, filter
44 * @len: the number of instructions in the program
45 * @insnsi: the BPF program instructions to evaluate
46 *
47 * seccomp_filter objects are organized in a tree linked via the @prev
48 * pointer. For any task, it appears to be a singly-linked list starting
49 * with current->seccomp.filter, the most recently attached or inherited filter.
50 * However, multiple filters may share a @prev node, by way of fork(), which
51 * results in a unidirectional tree existing in memory. This is similar to
52 * how namespaces work.
53 *
54 * seccomp_filter objects should never be modified after being attached
55 * to a task_struct (other than @usage).
56 */
57struct seccomp_filter {
58 atomic_t usage;
59 struct seccomp_filter *prev;
60 struct bpf_prog *prog;
61};
62
63/* Limit any path through the tree to 256KB worth of instructions. */
64#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
65
66/*
67 * Endianness is explicitly ignored and left for BPF program authors to manage
68 * as per the specific architecture.
69 */
70static void populate_seccomp_data(struct seccomp_data *sd)
71{
72 struct task_struct *task = current;
73 struct pt_regs *regs = task_pt_regs(task);
74 unsigned long args[6];
75
76 sd->nr = syscall_get_nr(task, regs);
77 sd->arch = syscall_get_arch();
78 syscall_get_arguments(task, regs, 0, 6, args);
79 sd->args[0] = args[0];
80 sd->args[1] = args[1];
81 sd->args[2] = args[2];
82 sd->args[3] = args[3];
83 sd->args[4] = args[4];
84 sd->args[5] = args[5];
85 sd->instruction_pointer = KSTK_EIP(task);
86}
87
88/**
89 * seccomp_check_filter - verify seccomp filter code
90 * @filter: filter to verify
91 * @flen: length of filter
92 *
93 * Takes a previously checked filter (by bpf_check_classic) and
94 * redirects all filter code that loads struct sk_buff data
95 * and related data through seccomp_bpf_load. It also
96 * enforces length and alignment checking of those loads.
97 *
98 * Returns 0 if the rule set is legal or -EINVAL if not.
99 */
100static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
101{
102 int pc;
103 for (pc = 0; pc < flen; pc++) {
104 struct sock_filter *ftest = &filter[pc];
105 u16 code = ftest->code;
106 u32 k = ftest->k;
107
108 switch (code) {
109 case BPF_LD | BPF_W | BPF_ABS:
110 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
111 /* 32-bit aligned and not out of bounds. */
112 if (k >= sizeof(struct seccomp_data) || k & 3)
113 return -EINVAL;
114 continue;
115 case BPF_LD | BPF_W | BPF_LEN:
116 ftest->code = BPF_LD | BPF_IMM;
117 ftest->k = sizeof(struct seccomp_data);
118 continue;
119 case BPF_LDX | BPF_W | BPF_LEN:
120 ftest->code = BPF_LDX | BPF_IMM;
121 ftest->k = sizeof(struct seccomp_data);
122 continue;
123 /* Explicitly include allowed calls. */
124 case BPF_RET | BPF_K:
125 case BPF_RET | BPF_A:
126 case BPF_ALU | BPF_ADD | BPF_K:
127 case BPF_ALU | BPF_ADD | BPF_X:
128 case BPF_ALU | BPF_SUB | BPF_K:
129 case BPF_ALU | BPF_SUB | BPF_X:
130 case BPF_ALU | BPF_MUL | BPF_K:
131 case BPF_ALU | BPF_MUL | BPF_X:
132 case BPF_ALU | BPF_DIV | BPF_K:
133 case BPF_ALU | BPF_DIV | BPF_X:
134 case BPF_ALU | BPF_AND | BPF_K:
135 case BPF_ALU | BPF_AND | BPF_X:
136 case BPF_ALU | BPF_OR | BPF_K:
137 case BPF_ALU | BPF_OR | BPF_X:
138 case BPF_ALU | BPF_XOR | BPF_K:
139 case BPF_ALU | BPF_XOR | BPF_X:
140 case BPF_ALU | BPF_LSH | BPF_K:
141 case BPF_ALU | BPF_LSH | BPF_X:
142 case BPF_ALU | BPF_RSH | BPF_K:
143 case BPF_ALU | BPF_RSH | BPF_X:
144 case BPF_ALU | BPF_NEG:
145 case BPF_LD | BPF_IMM:
146 case BPF_LDX | BPF_IMM:
147 case BPF_MISC | BPF_TAX:
148 case BPF_MISC | BPF_TXA:
149 case BPF_LD | BPF_MEM:
150 case BPF_LDX | BPF_MEM:
151 case BPF_ST:
152 case BPF_STX:
153 case BPF_JMP | BPF_JA:
154 case BPF_JMP | BPF_JEQ | BPF_K:
155 case BPF_JMP | BPF_JEQ | BPF_X:
156 case BPF_JMP | BPF_JGE | BPF_K:
157 case BPF_JMP | BPF_JGE | BPF_X:
158 case BPF_JMP | BPF_JGT | BPF_K:
159 case BPF_JMP | BPF_JGT | BPF_X:
160 case BPF_JMP | BPF_JSET | BPF_K:
161 case BPF_JMP | BPF_JSET | BPF_X:
162 continue;
163 default:
164 return -EINVAL;
165 }
166 }
167 return 0;
168}
169
170/**
171 * seccomp_run_filters - evaluates all seccomp filters against @syscall
172 * @syscall: number of the current system call
173 *
174 * Returns valid seccomp BPF response codes.
175 */
176static u32 seccomp_run_filters(struct seccomp_data *sd)
177{
178 struct seccomp_data sd_local;
179 u32 ret = SECCOMP_RET_ALLOW;
180 /* Make sure cross-thread synced filter points somewhere sane. */
181 struct seccomp_filter *f =
182 lockless_dereference(current->seccomp.filter);
183
184 /* Ensure unexpected behavior doesn't result in failing open. */
185 if (unlikely(WARN_ON(f == NULL)))
186 return SECCOMP_RET_KILL;
187
188 if (!sd) {
189 populate_seccomp_data(&sd_local);
190 sd = &sd_local;
191 }
192
193 /*
194 * All filters in the list are evaluated and the lowest BPF return
195 * value always takes priority (ignoring the DATA).
196 */
197 for (; f; f = f->prev) {
198 u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);
199
200 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
201 ret = cur_ret;
202 }
203 return ret;
204}
205#endif /* CONFIG_SECCOMP_FILTER */
206
207static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
208{
209 assert_spin_locked(¤t->sighand->siglock);
210
211 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
212 return false;
213
214 return true;
215}
216
217static inline void seccomp_assign_mode(struct task_struct *task,
218 unsigned long seccomp_mode)
219{
220 assert_spin_locked(&task->sighand->siglock);
221
222 task->seccomp.mode = seccomp_mode;
223 /*
224 * Make sure TIF_SECCOMP cannot be set before the mode (and
225 * filter) is set.
226 */
227 smp_mb__before_atomic();
228 set_tsk_thread_flag(task, TIF_SECCOMP);
229}
230
231#ifdef CONFIG_SECCOMP_FILTER
232/* Returns 1 if the parent is an ancestor of the child. */
233static int is_ancestor(struct seccomp_filter *parent,
234 struct seccomp_filter *child)
235{
236 /* NULL is the root ancestor. */
237 if (parent == NULL)
238 return 1;
239 for (; child; child = child->prev)
240 if (child == parent)
241 return 1;
242 return 0;
243}
244
245/**
246 * seccomp_can_sync_threads: checks if all threads can be synchronized
247 *
248 * Expects sighand and cred_guard_mutex locks to be held.
249 *
250 * Returns 0 on success, -ve on error, or the pid of a thread which was
251 * either not in the correct seccomp mode or it did not have an ancestral
252 * seccomp filter.
253 */
254static inline pid_t seccomp_can_sync_threads(void)
255{
256 struct task_struct *thread, *caller;
257
258 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
259 assert_spin_locked(¤t->sighand->siglock);
260
261 /* Validate all threads being eligible for synchronization. */
262 caller = current;
263 for_each_thread(caller, thread) {
264 pid_t failed;
265
266 /* Skip current, since it is initiating the sync. */
267 if (thread == caller)
268 continue;
269
270 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
271 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
272 is_ancestor(thread->seccomp.filter,
273 caller->seccomp.filter)))
274 continue;
275
276 /* Return the first thread that cannot be synchronized. */
277 failed = task_pid_vnr(thread);
278 /* If the pid cannot be resolved, then return -ESRCH */
279 if (unlikely(WARN_ON(failed == 0)))
280 failed = -ESRCH;
281 return failed;
282 }
283
284 return 0;
285}
286
287/**
288 * seccomp_sync_threads: sets all threads to use current's filter
289 *
290 * Expects sighand and cred_guard_mutex locks to be held, and for
291 * seccomp_can_sync_threads() to have returned success already
292 * without dropping the locks.
293 *
294 */
295static inline void seccomp_sync_threads(void)
296{
297 struct task_struct *thread, *caller;
298
299 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
300 assert_spin_locked(¤t->sighand->siglock);
301
302 /* Synchronize all threads. */
303 caller = current;
304 for_each_thread(caller, thread) {
305 /* Skip current, since it needs no changes. */
306 if (thread == caller)
307 continue;
308
309 /* Get a task reference for the new leaf node. */
310 get_seccomp_filter(caller);
311 /*
312 * Drop the task reference to the shared ancestor since
313 * current's path will hold a reference. (This also
314 * allows a put before the assignment.)
315 */
316 put_seccomp_filter(thread);
317 smp_store_release(&thread->seccomp.filter,
318 caller->seccomp.filter);
319
320 /*
321 * Don't let an unprivileged task work around
322 * the no_new_privs restriction by creating
323 * a thread that sets it up, enters seccomp,
324 * then dies.
325 */
326 if (task_no_new_privs(caller))
327 task_set_no_new_privs(thread);
328
329 /*
330 * Opt the other thread into seccomp if needed.
331 * As threads are considered to be trust-realm
332 * equivalent (see ptrace_may_access), it is safe to
333 * allow one thread to transition the other.
334 */
335 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
336 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
337 }
338}
339
340/**
341 * seccomp_prepare_filter: Prepares a seccomp filter for use.
342 * @fprog: BPF program to install
343 *
344 * Returns filter on success or an ERR_PTR on failure.
345 */
346static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
347{
348 struct seccomp_filter *sfilter;
349 int ret;
350 const bool save_orig = config_enabled(CONFIG_CHECKPOINT_RESTORE);
351
352 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
353 return ERR_PTR(-EINVAL);
354
355 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
356
357 /*
358 * Installing a seccomp filter requires that the task has
359 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
360 * This avoids scenarios where unprivileged tasks can affect the
361 * behavior of privileged children.
362 */
363 if (!task_no_new_privs(current) &&
364 security_capable_noaudit(current_cred(), current_user_ns(),
365 CAP_SYS_ADMIN) != 0)
366 return ERR_PTR(-EACCES);
367
368 /* Allocate a new seccomp_filter */
369 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
370 if (!sfilter)
371 return ERR_PTR(-ENOMEM);
372
373 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
374 seccomp_check_filter, save_orig);
375 if (ret < 0) {
376 kfree(sfilter);
377 return ERR_PTR(ret);
378 }
379
380 atomic_set(&sfilter->usage, 1);
381
382 return sfilter;
383}
384
385/**
386 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
387 * @user_filter: pointer to the user data containing a sock_fprog.
388 *
389 * Returns 0 on success and non-zero otherwise.
390 */
391static struct seccomp_filter *
392seccomp_prepare_user_filter(const char __user *user_filter)
393{
394 struct sock_fprog fprog;
395 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
396
397#ifdef CONFIG_COMPAT
398 if (in_compat_syscall()) {
399 struct compat_sock_fprog fprog32;
400 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
401 goto out;
402 fprog.len = fprog32.len;
403 fprog.filter = compat_ptr(fprog32.filter);
404 } else /* falls through to the if below. */
405#endif
406 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
407 goto out;
408 filter = seccomp_prepare_filter(&fprog);
409out:
410 return filter;
411}
412
413/**
414 * seccomp_attach_filter: validate and attach filter
415 * @flags: flags to change filter behavior
416 * @filter: seccomp filter to add to the current process
417 *
418 * Caller must be holding current->sighand->siglock lock.
419 *
420 * Returns 0 on success, -ve on error.
421 */
422static long seccomp_attach_filter(unsigned int flags,
423 struct seccomp_filter *filter)
424{
425 unsigned long total_insns;
426 struct seccomp_filter *walker;
427
428 assert_spin_locked(¤t->sighand->siglock);
429
430 /* Validate resulting filter length. */
431 total_insns = filter->prog->len;
432 for (walker = current->seccomp.filter; walker; walker = walker->prev)
433 total_insns += walker->prog->len + 4; /* 4 instr penalty */
434 if (total_insns > MAX_INSNS_PER_PATH)
435 return -ENOMEM;
436
437 /* If thread sync has been requested, check that it is possible. */
438 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
439 int ret;
440
441 ret = seccomp_can_sync_threads();
442 if (ret)
443 return ret;
444 }
445
446 /*
447 * If there is an existing filter, make it the prev and don't drop its
448 * task reference.
449 */
450 filter->prev = current->seccomp.filter;
451 current->seccomp.filter = filter;
452
453 /* Now that the new filter is in place, synchronize to all threads. */
454 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
455 seccomp_sync_threads();
456
457 return 0;
458}
459
460/* get_seccomp_filter - increments the reference count of the filter on @tsk */
461void get_seccomp_filter(struct task_struct *tsk)
462{
463 struct seccomp_filter *orig = tsk->seccomp.filter;
464 if (!orig)
465 return;
466 /* Reference count is bounded by the number of total processes. */
467 atomic_inc(&orig->usage);
468}
469
470static inline void seccomp_filter_free(struct seccomp_filter *filter)
471{
472 if (filter) {
473 bpf_prog_destroy(filter->prog);
474 kfree(filter);
475 }
476}
477
478/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
479void put_seccomp_filter(struct task_struct *tsk)
480{
481 struct seccomp_filter *orig = tsk->seccomp.filter;
482 /* Clean up single-reference branches iteratively. */
483 while (orig && atomic_dec_and_test(&orig->usage)) {
484 struct seccomp_filter *freeme = orig;
485 orig = orig->prev;
486 seccomp_filter_free(freeme);
487 }
488}
489
490/**
491 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
492 * @syscall: syscall number to send to userland
493 * @reason: filter-supplied reason code to send to userland (via si_errno)
494 *
495 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
496 */
497static void seccomp_send_sigsys(int syscall, int reason)
498{
499 struct siginfo info;
500 memset(&info, 0, sizeof(info));
501 info.si_signo = SIGSYS;
502 info.si_code = SYS_SECCOMP;
503 info.si_call_addr = (void __user *)KSTK_EIP(current);
504 info.si_errno = reason;
505 info.si_arch = syscall_get_arch();
506 info.si_syscall = syscall;
507 force_sig_info(SIGSYS, &info, current);
508}
509#endif /* CONFIG_SECCOMP_FILTER */
510
511/*
512 * Secure computing mode 1 allows only read/write/exit/sigreturn.
513 * To be fully secure this must be combined with rlimit
514 * to limit the stack allocations too.
515 */
516static int mode1_syscalls[] = {
517 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
518 0, /* null terminated */
519};
520
521#ifdef CONFIG_COMPAT
522static int mode1_syscalls_32[] = {
523 __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
524 0, /* null terminated */
525};
526#endif
527
528static void __secure_computing_strict(int this_syscall)
529{
530 int *syscall_whitelist = mode1_syscalls;
531#ifdef CONFIG_COMPAT
532 if (in_compat_syscall())
533 syscall_whitelist = mode1_syscalls_32;
534#endif
535 do {
536 if (*syscall_whitelist == this_syscall)
537 return;
538 } while (*++syscall_whitelist);
539
540#ifdef SECCOMP_DEBUG
541 dump_stack();
542#endif
543 audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
544 do_exit(SIGKILL);
545}
546
547#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
548void secure_computing_strict(int this_syscall)
549{
550 int mode = current->seccomp.mode;
551
552 if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
553 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
554 return;
555
556 if (mode == SECCOMP_MODE_DISABLED)
557 return;
558 else if (mode == SECCOMP_MODE_STRICT)
559 __secure_computing_strict(this_syscall);
560 else
561 BUG();
562}
563#else
564int __secure_computing(void)
565{
566 u32 phase1_result = seccomp_phase1(NULL);
567
568 if (likely(phase1_result == SECCOMP_PHASE1_OK))
569 return 0;
570 else if (likely(phase1_result == SECCOMP_PHASE1_SKIP))
571 return -1;
572 else
573 return seccomp_phase2(phase1_result);
574}
575
576#ifdef CONFIG_SECCOMP_FILTER
577static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
578{
579 u32 filter_ret, action;
580 int data;
581
582 /*
583 * Make sure that any changes to mode from another thread have
584 * been seen after TIF_SECCOMP was seen.
585 */
586 rmb();
587
588 filter_ret = seccomp_run_filters(sd);
589 data = filter_ret & SECCOMP_RET_DATA;
590 action = filter_ret & SECCOMP_RET_ACTION;
591
592 switch (action) {
593 case SECCOMP_RET_ERRNO:
594 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
595 if (data > MAX_ERRNO)
596 data = MAX_ERRNO;
597 syscall_set_return_value(current, task_pt_regs(current),
598 -data, 0);
599 goto skip;
600
601 case SECCOMP_RET_TRAP:
602 /* Show the handler the original registers. */
603 syscall_rollback(current, task_pt_regs(current));
604 /* Let the filter pass back 16 bits of data. */
605 seccomp_send_sigsys(this_syscall, data);
606 goto skip;
607
608 case SECCOMP_RET_TRACE:
609 return filter_ret; /* Save the rest for phase 2. */
610
611 case SECCOMP_RET_ALLOW:
612 return SECCOMP_PHASE1_OK;
613
614 case SECCOMP_RET_KILL:
615 default:
616 audit_seccomp(this_syscall, SIGSYS, action);
617 do_exit(SIGSYS);
618 }
619
620 unreachable();
621
622skip:
623 audit_seccomp(this_syscall, 0, action);
624 return SECCOMP_PHASE1_SKIP;
625}
626#endif
627
628/**
629 * seccomp_phase1() - run fast path seccomp checks on the current syscall
630 * @arg sd: The seccomp_data or NULL
631 *
632 * This only reads pt_regs via the syscall_xyz helpers. The only change
633 * it will make to pt_regs is via syscall_set_return_value, and it will
634 * only do that if it returns SECCOMP_PHASE1_SKIP.
635 *
636 * If sd is provided, it will not read pt_regs at all.
637 *
638 * It may also call do_exit or force a signal; these actions must be
639 * safe.
640 *
641 * If it returns SECCOMP_PHASE1_OK, the syscall passes checks and should
642 * be processed normally.
643 *
644 * If it returns SECCOMP_PHASE1_SKIP, then the syscall should not be
645 * invoked. In this case, seccomp_phase1 will have set the return value
646 * using syscall_set_return_value.
647 *
648 * If it returns anything else, then the return value should be passed
649 * to seccomp_phase2 from a context in which ptrace hooks are safe.
650 */
651u32 seccomp_phase1(struct seccomp_data *sd)
652{
653 int mode = current->seccomp.mode;
654 int this_syscall = sd ? sd->nr :
655 syscall_get_nr(current, task_pt_regs(current));
656
657 if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
658 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
659 return SECCOMP_PHASE1_OK;
660
661 switch (mode) {
662 case SECCOMP_MODE_STRICT:
663 __secure_computing_strict(this_syscall); /* may call do_exit */
664 return SECCOMP_PHASE1_OK;
665#ifdef CONFIG_SECCOMP_FILTER
666 case SECCOMP_MODE_FILTER:
667 return __seccomp_phase1_filter(this_syscall, sd);
668#endif
669 default:
670 BUG();
671 }
672}
673
674/**
675 * seccomp_phase2() - finish slow path seccomp work for the current syscall
676 * @phase1_result: The return value from seccomp_phase1()
677 *
678 * This must be called from a context in which ptrace hooks can be used.
679 *
680 * Returns 0 if the syscall should be processed or -1 to skip the syscall.
681 */
682int seccomp_phase2(u32 phase1_result)
683{
684 struct pt_regs *regs = task_pt_regs(current);
685 u32 action = phase1_result & SECCOMP_RET_ACTION;
686 int data = phase1_result & SECCOMP_RET_DATA;
687
688 BUG_ON(action != SECCOMP_RET_TRACE);
689
690 audit_seccomp(syscall_get_nr(current, regs), 0, action);
691
692 /* Skip these calls if there is no tracer. */
693 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
694 syscall_set_return_value(current, regs,
695 -ENOSYS, 0);
696 return -1;
697 }
698
699 /* Allow the BPF to provide the event message */
700 ptrace_event(PTRACE_EVENT_SECCOMP, data);
701 /*
702 * The delivery of a fatal signal during event
703 * notification may silently skip tracer notification.
704 * Terminating the task now avoids executing a system
705 * call that may not be intended.
706 */
707 if (fatal_signal_pending(current))
708 do_exit(SIGSYS);
709 if (syscall_get_nr(current, regs) < 0)
710 return -1; /* Explicit request to skip. */
711
712 return 0;
713}
714#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
715
716long prctl_get_seccomp(void)
717{
718 return current->seccomp.mode;
719}
720
721/**
722 * seccomp_set_mode_strict: internal function for setting strict seccomp
723 *
724 * Once current->seccomp.mode is non-zero, it may not be changed.
725 *
726 * Returns 0 on success or -EINVAL on failure.
727 */
728static long seccomp_set_mode_strict(void)
729{
730 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
731 long ret = -EINVAL;
732
733 spin_lock_irq(¤t->sighand->siglock);
734
735 if (!seccomp_may_assign_mode(seccomp_mode))
736 goto out;
737
738#ifdef TIF_NOTSC
739 disable_TSC();
740#endif
741 seccomp_assign_mode(current, seccomp_mode);
742 ret = 0;
743
744out:
745 spin_unlock_irq(¤t->sighand->siglock);
746
747 return ret;
748}
749
750#ifdef CONFIG_SECCOMP_FILTER
751/**
752 * seccomp_set_mode_filter: internal function for setting seccomp filter
753 * @flags: flags to change filter behavior
754 * @filter: struct sock_fprog containing filter
755 *
756 * This function may be called repeatedly to install additional filters.
757 * Every filter successfully installed will be evaluated (in reverse order)
758 * for each system call the task makes.
759 *
760 * Once current->seccomp.mode is non-zero, it may not be changed.
761 *
762 * Returns 0 on success or -EINVAL on failure.
763 */
764static long seccomp_set_mode_filter(unsigned int flags,
765 const char __user *filter)
766{
767 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
768 struct seccomp_filter *prepared = NULL;
769 long ret = -EINVAL;
770
771 /* Validate flags. */
772 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
773 return -EINVAL;
774
775 /* Prepare the new filter before holding any locks. */
776 prepared = seccomp_prepare_user_filter(filter);
777 if (IS_ERR(prepared))
778 return PTR_ERR(prepared);
779
780 /*
781 * Make sure we cannot change seccomp or nnp state via TSYNC
782 * while another thread is in the middle of calling exec.
783 */
784 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
785 mutex_lock_killable(¤t->signal->cred_guard_mutex))
786 goto out_free;
787
788 spin_lock_irq(¤t->sighand->siglock);
789
790 if (!seccomp_may_assign_mode(seccomp_mode))
791 goto out;
792
793 ret = seccomp_attach_filter(flags, prepared);
794 if (ret)
795 goto out;
796 /* Do not free the successfully attached filter. */
797 prepared = NULL;
798
799 seccomp_assign_mode(current, seccomp_mode);
800out:
801 spin_unlock_irq(¤t->sighand->siglock);
802 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
803 mutex_unlock(¤t->signal->cred_guard_mutex);
804out_free:
805 seccomp_filter_free(prepared);
806 return ret;
807}
808#else
809static inline long seccomp_set_mode_filter(unsigned int flags,
810 const char __user *filter)
811{
812 return -EINVAL;
813}
814#endif
815
816/* Common entry point for both prctl and syscall. */
817static long do_seccomp(unsigned int op, unsigned int flags,
818 const char __user *uargs)
819{
820 switch (op) {
821 case SECCOMP_SET_MODE_STRICT:
822 if (flags != 0 || uargs != NULL)
823 return -EINVAL;
824 return seccomp_set_mode_strict();
825 case SECCOMP_SET_MODE_FILTER:
826 return seccomp_set_mode_filter(flags, uargs);
827 default:
828 return -EINVAL;
829 }
830}
831
832SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
833 const char __user *, uargs)
834{
835 return do_seccomp(op, flags, uargs);
836}
837
838/**
839 * prctl_set_seccomp: configures current->seccomp.mode
840 * @seccomp_mode: requested mode to use
841 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
842 *
843 * Returns 0 on success or -EINVAL on failure.
844 */
845long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
846{
847 unsigned int op;
848 char __user *uargs;
849
850 switch (seccomp_mode) {
851 case SECCOMP_MODE_STRICT:
852 op = SECCOMP_SET_MODE_STRICT;
853 /*
854 * Setting strict mode through prctl always ignored filter,
855 * so make sure it is always NULL here to pass the internal
856 * check in do_seccomp().
857 */
858 uargs = NULL;
859 break;
860 case SECCOMP_MODE_FILTER:
861 op = SECCOMP_SET_MODE_FILTER;
862 uargs = filter;
863 break;
864 default:
865 return -EINVAL;
866 }
867
868 /* prctl interface doesn't have flags, so they are always zero. */
869 return do_seccomp(op, 0, uargs);
870}
871
872#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
873long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
874 void __user *data)
875{
876 struct seccomp_filter *filter;
877 struct sock_fprog_kern *fprog;
878 long ret;
879 unsigned long count = 0;
880
881 if (!capable(CAP_SYS_ADMIN) ||
882 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
883 return -EACCES;
884 }
885
886 spin_lock_irq(&task->sighand->siglock);
887 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
888 ret = -EINVAL;
889 goto out;
890 }
891
892 filter = task->seccomp.filter;
893 while (filter) {
894 filter = filter->prev;
895 count++;
896 }
897
898 if (filter_off >= count) {
899 ret = -ENOENT;
900 goto out;
901 }
902 count -= filter_off;
903
904 filter = task->seccomp.filter;
905 while (filter && count > 1) {
906 filter = filter->prev;
907 count--;
908 }
909
910 if (WARN_ON(count != 1 || !filter)) {
911 /* The filter tree shouldn't shrink while we're using it. */
912 ret = -ENOENT;
913 goto out;
914 }
915
916 fprog = filter->prog->orig_prog;
917 if (!fprog) {
918 /* This must be a new non-cBPF filter, since we save every
919 * every cBPF filter's orig_prog above when
920 * CONFIG_CHECKPOINT_RESTORE is enabled.
921 */
922 ret = -EMEDIUMTYPE;
923 goto out;
924 }
925
926 ret = fprog->len;
927 if (!data)
928 goto out;
929
930 get_seccomp_filter(task);
931 spin_unlock_irq(&task->sighand->siglock);
932
933 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
934 ret = -EFAULT;
935
936 put_seccomp_filter(task);
937 return ret;
938
939out:
940 spin_unlock_irq(&task->sighand->siglock);
941 return ret;
942}
943#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/kernel/seccomp.c
4 *
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 *
7 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
9 *
10 * This defines a simple but solid secure-computing facility.
11 *
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
15 */
16
17#include <linux/refcount.h>
18#include <linux/audit.h>
19#include <linux/compat.h>
20#include <linux/coredump.h>
21#include <linux/kmemleak.h>
22#include <linux/nospec.h>
23#include <linux/prctl.h>
24#include <linux/sched.h>
25#include <linux/sched/task_stack.h>
26#include <linux/seccomp.h>
27#include <linux/slab.h>
28#include <linux/syscalls.h>
29#include <linux/sysctl.h>
30
31#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
32#include <asm/syscall.h>
33#endif
34
35#ifdef CONFIG_SECCOMP_FILTER
36#include <linux/filter.h>
37#include <linux/pid.h>
38#include <linux/ptrace.h>
39#include <linux/security.h>
40#include <linux/tracehook.h>
41#include <linux/uaccess.h>
42
43/**
44 * struct seccomp_filter - container for seccomp BPF programs
45 *
46 * @usage: reference count to manage the object lifetime.
47 * get/put helpers should be used when accessing an instance
48 * outside of a lifetime-guarded section. In general, this
49 * is only needed for handling filters shared across tasks.
50 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
51 * @prev: points to a previously installed, or inherited, filter
52 * @prog: the BPF program to evaluate
53 *
54 * seccomp_filter objects are organized in a tree linked via the @prev
55 * pointer. For any task, it appears to be a singly-linked list starting
56 * with current->seccomp.filter, the most recently attached or inherited filter.
57 * However, multiple filters may share a @prev node, by way of fork(), which
58 * results in a unidirectional tree existing in memory. This is similar to
59 * how namespaces work.
60 *
61 * seccomp_filter objects should never be modified after being attached
62 * to a task_struct (other than @usage).
63 */
64struct seccomp_filter {
65 refcount_t usage;
66 bool log;
67 struct seccomp_filter *prev;
68 struct bpf_prog *prog;
69};
70
71/* Limit any path through the tree to 256KB worth of instructions. */
72#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
73
74/*
75 * Endianness is explicitly ignored and left for BPF program authors to manage
76 * as per the specific architecture.
77 */
78static void populate_seccomp_data(struct seccomp_data *sd)
79{
80 struct task_struct *task = current;
81 struct pt_regs *regs = task_pt_regs(task);
82 unsigned long args[6];
83
84 sd->nr = syscall_get_nr(task, regs);
85 sd->arch = syscall_get_arch();
86 syscall_get_arguments(task, regs, 0, 6, args);
87 sd->args[0] = args[0];
88 sd->args[1] = args[1];
89 sd->args[2] = args[2];
90 sd->args[3] = args[3];
91 sd->args[4] = args[4];
92 sd->args[5] = args[5];
93 sd->instruction_pointer = KSTK_EIP(task);
94}
95
96/**
97 * seccomp_check_filter - verify seccomp filter code
98 * @filter: filter to verify
99 * @flen: length of filter
100 *
101 * Takes a previously checked filter (by bpf_check_classic) and
102 * redirects all filter code that loads struct sk_buff data
103 * and related data through seccomp_bpf_load. It also
104 * enforces length and alignment checking of those loads.
105 *
106 * Returns 0 if the rule set is legal or -EINVAL if not.
107 */
108static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
109{
110 int pc;
111 for (pc = 0; pc < flen; pc++) {
112 struct sock_filter *ftest = &filter[pc];
113 u16 code = ftest->code;
114 u32 k = ftest->k;
115
116 switch (code) {
117 case BPF_LD | BPF_W | BPF_ABS:
118 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
119 /* 32-bit aligned and not out of bounds. */
120 if (k >= sizeof(struct seccomp_data) || k & 3)
121 return -EINVAL;
122 continue;
123 case BPF_LD | BPF_W | BPF_LEN:
124 ftest->code = BPF_LD | BPF_IMM;
125 ftest->k = sizeof(struct seccomp_data);
126 continue;
127 case BPF_LDX | BPF_W | BPF_LEN:
128 ftest->code = BPF_LDX | BPF_IMM;
129 ftest->k = sizeof(struct seccomp_data);
130 continue;
131 /* Explicitly include allowed calls. */
132 case BPF_RET | BPF_K:
133 case BPF_RET | BPF_A:
134 case BPF_ALU | BPF_ADD | BPF_K:
135 case BPF_ALU | BPF_ADD | BPF_X:
136 case BPF_ALU | BPF_SUB | BPF_K:
137 case BPF_ALU | BPF_SUB | BPF_X:
138 case BPF_ALU | BPF_MUL | BPF_K:
139 case BPF_ALU | BPF_MUL | BPF_X:
140 case BPF_ALU | BPF_DIV | BPF_K:
141 case BPF_ALU | BPF_DIV | BPF_X:
142 case BPF_ALU | BPF_AND | BPF_K:
143 case BPF_ALU | BPF_AND | BPF_X:
144 case BPF_ALU | BPF_OR | BPF_K:
145 case BPF_ALU | BPF_OR | BPF_X:
146 case BPF_ALU | BPF_XOR | BPF_K:
147 case BPF_ALU | BPF_XOR | BPF_X:
148 case BPF_ALU | BPF_LSH | BPF_K:
149 case BPF_ALU | BPF_LSH | BPF_X:
150 case BPF_ALU | BPF_RSH | BPF_K:
151 case BPF_ALU | BPF_RSH | BPF_X:
152 case BPF_ALU | BPF_NEG:
153 case BPF_LD | BPF_IMM:
154 case BPF_LDX | BPF_IMM:
155 case BPF_MISC | BPF_TAX:
156 case BPF_MISC | BPF_TXA:
157 case BPF_LD | BPF_MEM:
158 case BPF_LDX | BPF_MEM:
159 case BPF_ST:
160 case BPF_STX:
161 case BPF_JMP | BPF_JA:
162 case BPF_JMP | BPF_JEQ | BPF_K:
163 case BPF_JMP | BPF_JEQ | BPF_X:
164 case BPF_JMP | BPF_JGE | BPF_K:
165 case BPF_JMP | BPF_JGE | BPF_X:
166 case BPF_JMP | BPF_JGT | BPF_K:
167 case BPF_JMP | BPF_JGT | BPF_X:
168 case BPF_JMP | BPF_JSET | BPF_K:
169 case BPF_JMP | BPF_JSET | BPF_X:
170 continue;
171 default:
172 return -EINVAL;
173 }
174 }
175 return 0;
176}
177
178/**
179 * seccomp_run_filters - evaluates all seccomp filters against @sd
180 * @sd: optional seccomp data to be passed to filters
181 * @match: stores struct seccomp_filter that resulted in the return value,
182 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
183 * be unchanged.
184 *
185 * Returns valid seccomp BPF response codes.
186 */
187#define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
188static u32 seccomp_run_filters(const struct seccomp_data *sd,
189 struct seccomp_filter **match)
190{
191 struct seccomp_data sd_local;
192 u32 ret = SECCOMP_RET_ALLOW;
193 /* Make sure cross-thread synced filter points somewhere sane. */
194 struct seccomp_filter *f =
195 READ_ONCE(current->seccomp.filter);
196
197 /* Ensure unexpected behavior doesn't result in failing open. */
198 if (unlikely(WARN_ON(f == NULL)))
199 return SECCOMP_RET_KILL_PROCESS;
200
201 if (!sd) {
202 populate_seccomp_data(&sd_local);
203 sd = &sd_local;
204 }
205
206 /*
207 * All filters in the list are evaluated and the lowest BPF return
208 * value always takes priority (ignoring the DATA).
209 */
210 for (; f; f = f->prev) {
211 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
212
213 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
214 ret = cur_ret;
215 *match = f;
216 }
217 }
218 return ret;
219}
220#endif /* CONFIG_SECCOMP_FILTER */
221
222static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
223{
224 assert_spin_locked(¤t->sighand->siglock);
225
226 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
227 return false;
228
229 return true;
230}
231
232void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
233
234static inline void seccomp_assign_mode(struct task_struct *task,
235 unsigned long seccomp_mode,
236 unsigned long flags)
237{
238 assert_spin_locked(&task->sighand->siglock);
239
240 task->seccomp.mode = seccomp_mode;
241 /*
242 * Make sure TIF_SECCOMP cannot be set before the mode (and
243 * filter) is set.
244 */
245 smp_mb__before_atomic();
246 /* Assume default seccomp processes want spec flaw mitigation. */
247 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
248 arch_seccomp_spec_mitigate(task);
249 set_tsk_thread_flag(task, TIF_SECCOMP);
250}
251
252#ifdef CONFIG_SECCOMP_FILTER
253/* Returns 1 if the parent is an ancestor of the child. */
254static int is_ancestor(struct seccomp_filter *parent,
255 struct seccomp_filter *child)
256{
257 /* NULL is the root ancestor. */
258 if (parent == NULL)
259 return 1;
260 for (; child; child = child->prev)
261 if (child == parent)
262 return 1;
263 return 0;
264}
265
266/**
267 * seccomp_can_sync_threads: checks if all threads can be synchronized
268 *
269 * Expects sighand and cred_guard_mutex locks to be held.
270 *
271 * Returns 0 on success, -ve on error, or the pid of a thread which was
272 * either not in the correct seccomp mode or it did not have an ancestral
273 * seccomp filter.
274 */
275static inline pid_t seccomp_can_sync_threads(void)
276{
277 struct task_struct *thread, *caller;
278
279 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
280 assert_spin_locked(¤t->sighand->siglock);
281
282 /* Validate all threads being eligible for synchronization. */
283 caller = current;
284 for_each_thread(caller, thread) {
285 pid_t failed;
286
287 /* Skip current, since it is initiating the sync. */
288 if (thread == caller)
289 continue;
290
291 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
292 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
293 is_ancestor(thread->seccomp.filter,
294 caller->seccomp.filter)))
295 continue;
296
297 /* Return the first thread that cannot be synchronized. */
298 failed = task_pid_vnr(thread);
299 /* If the pid cannot be resolved, then return -ESRCH */
300 if (unlikely(WARN_ON(failed == 0)))
301 failed = -ESRCH;
302 return failed;
303 }
304
305 return 0;
306}
307
308/**
309 * seccomp_sync_threads: sets all threads to use current's filter
310 *
311 * Expects sighand and cred_guard_mutex locks to be held, and for
312 * seccomp_can_sync_threads() to have returned success already
313 * without dropping the locks.
314 *
315 */
316static inline void seccomp_sync_threads(unsigned long flags)
317{
318 struct task_struct *thread, *caller;
319
320 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
321 assert_spin_locked(¤t->sighand->siglock);
322
323 /* Synchronize all threads. */
324 caller = current;
325 for_each_thread(caller, thread) {
326 /* Skip current, since it needs no changes. */
327 if (thread == caller)
328 continue;
329
330 /* Get a task reference for the new leaf node. */
331 get_seccomp_filter(caller);
332 /*
333 * Drop the task reference to the shared ancestor since
334 * current's path will hold a reference. (This also
335 * allows a put before the assignment.)
336 */
337 put_seccomp_filter(thread);
338 smp_store_release(&thread->seccomp.filter,
339 caller->seccomp.filter);
340
341 /*
342 * Don't let an unprivileged task work around
343 * the no_new_privs restriction by creating
344 * a thread that sets it up, enters seccomp,
345 * then dies.
346 */
347 if (task_no_new_privs(caller))
348 task_set_no_new_privs(thread);
349
350 /*
351 * Opt the other thread into seccomp if needed.
352 * As threads are considered to be trust-realm
353 * equivalent (see ptrace_may_access), it is safe to
354 * allow one thread to transition the other.
355 */
356 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
357 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
358 flags);
359 }
360}
361
362/**
363 * seccomp_prepare_filter: Prepares a seccomp filter for use.
364 * @fprog: BPF program to install
365 *
366 * Returns filter on success or an ERR_PTR on failure.
367 */
368static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
369{
370 struct seccomp_filter *sfilter;
371 int ret;
372 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
373
374 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
375 return ERR_PTR(-EINVAL);
376
377 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
378
379 /*
380 * Installing a seccomp filter requires that the task has
381 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
382 * This avoids scenarios where unprivileged tasks can affect the
383 * behavior of privileged children.
384 */
385 if (!task_no_new_privs(current) &&
386 security_capable_noaudit(current_cred(), current_user_ns(),
387 CAP_SYS_ADMIN) != 0)
388 return ERR_PTR(-EACCES);
389
390 /* Allocate a new seccomp_filter */
391 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
392 if (!sfilter)
393 return ERR_PTR(-ENOMEM);
394
395 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
396 seccomp_check_filter, save_orig);
397 if (ret < 0) {
398 kfree(sfilter);
399 return ERR_PTR(ret);
400 }
401
402 refcount_set(&sfilter->usage, 1);
403
404 return sfilter;
405}
406
407/**
408 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
409 * @user_filter: pointer to the user data containing a sock_fprog.
410 *
411 * Returns 0 on success and non-zero otherwise.
412 */
413static struct seccomp_filter *
414seccomp_prepare_user_filter(const char __user *user_filter)
415{
416 struct sock_fprog fprog;
417 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
418
419#ifdef CONFIG_COMPAT
420 if (in_compat_syscall()) {
421 struct compat_sock_fprog fprog32;
422 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
423 goto out;
424 fprog.len = fprog32.len;
425 fprog.filter = compat_ptr(fprog32.filter);
426 } else /* falls through to the if below. */
427#endif
428 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
429 goto out;
430 filter = seccomp_prepare_filter(&fprog);
431out:
432 return filter;
433}
434
435/**
436 * seccomp_attach_filter: validate and attach filter
437 * @flags: flags to change filter behavior
438 * @filter: seccomp filter to add to the current process
439 *
440 * Caller must be holding current->sighand->siglock lock.
441 *
442 * Returns 0 on success, -ve on error.
443 */
444static long seccomp_attach_filter(unsigned int flags,
445 struct seccomp_filter *filter)
446{
447 unsigned long total_insns;
448 struct seccomp_filter *walker;
449
450 assert_spin_locked(¤t->sighand->siglock);
451
452 /* Validate resulting filter length. */
453 total_insns = filter->prog->len;
454 for (walker = current->seccomp.filter; walker; walker = walker->prev)
455 total_insns += walker->prog->len + 4; /* 4 instr penalty */
456 if (total_insns > MAX_INSNS_PER_PATH)
457 return -ENOMEM;
458
459 /* If thread sync has been requested, check that it is possible. */
460 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
461 int ret;
462
463 ret = seccomp_can_sync_threads();
464 if (ret)
465 return ret;
466 }
467
468 /* Set log flag, if present. */
469 if (flags & SECCOMP_FILTER_FLAG_LOG)
470 filter->log = true;
471
472 /*
473 * If there is an existing filter, make it the prev and don't drop its
474 * task reference.
475 */
476 filter->prev = current->seccomp.filter;
477 current->seccomp.filter = filter;
478
479 /* Now that the new filter is in place, synchronize to all threads. */
480 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
481 seccomp_sync_threads(flags);
482
483 return 0;
484}
485
486static void __get_seccomp_filter(struct seccomp_filter *filter)
487{
488 /* Reference count is bounded by the number of total processes. */
489 refcount_inc(&filter->usage);
490}
491
492/* get_seccomp_filter - increments the reference count of the filter on @tsk */
493void get_seccomp_filter(struct task_struct *tsk)
494{
495 struct seccomp_filter *orig = tsk->seccomp.filter;
496 if (!orig)
497 return;
498 __get_seccomp_filter(orig);
499}
500
501static inline void seccomp_filter_free(struct seccomp_filter *filter)
502{
503 if (filter) {
504 bpf_prog_destroy(filter->prog);
505 kfree(filter);
506 }
507}
508
509static void __put_seccomp_filter(struct seccomp_filter *orig)
510{
511 /* Clean up single-reference branches iteratively. */
512 while (orig && refcount_dec_and_test(&orig->usage)) {
513 struct seccomp_filter *freeme = orig;
514 orig = orig->prev;
515 seccomp_filter_free(freeme);
516 }
517}
518
519/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
520void put_seccomp_filter(struct task_struct *tsk)
521{
522 __put_seccomp_filter(tsk->seccomp.filter);
523}
524
525static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
526{
527 clear_siginfo(info);
528 info->si_signo = SIGSYS;
529 info->si_code = SYS_SECCOMP;
530 info->si_call_addr = (void __user *)KSTK_EIP(current);
531 info->si_errno = reason;
532 info->si_arch = syscall_get_arch();
533 info->si_syscall = syscall;
534}
535
536/**
537 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
538 * @syscall: syscall number to send to userland
539 * @reason: filter-supplied reason code to send to userland (via si_errno)
540 *
541 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
542 */
543static void seccomp_send_sigsys(int syscall, int reason)
544{
545 struct siginfo info;
546 seccomp_init_siginfo(&info, syscall, reason);
547 force_sig_info(SIGSYS, &info, current);
548}
549#endif /* CONFIG_SECCOMP_FILTER */
550
551/* For use with seccomp_actions_logged */
552#define SECCOMP_LOG_KILL_PROCESS (1 << 0)
553#define SECCOMP_LOG_KILL_THREAD (1 << 1)
554#define SECCOMP_LOG_TRAP (1 << 2)
555#define SECCOMP_LOG_ERRNO (1 << 3)
556#define SECCOMP_LOG_TRACE (1 << 4)
557#define SECCOMP_LOG_LOG (1 << 5)
558#define SECCOMP_LOG_ALLOW (1 << 6)
559
560static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
561 SECCOMP_LOG_KILL_THREAD |
562 SECCOMP_LOG_TRAP |
563 SECCOMP_LOG_ERRNO |
564 SECCOMP_LOG_TRACE |
565 SECCOMP_LOG_LOG;
566
567static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
568 bool requested)
569{
570 bool log = false;
571
572 switch (action) {
573 case SECCOMP_RET_ALLOW:
574 break;
575 case SECCOMP_RET_TRAP:
576 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
577 break;
578 case SECCOMP_RET_ERRNO:
579 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
580 break;
581 case SECCOMP_RET_TRACE:
582 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
583 break;
584 case SECCOMP_RET_LOG:
585 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
586 break;
587 case SECCOMP_RET_KILL_THREAD:
588 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
589 break;
590 case SECCOMP_RET_KILL_PROCESS:
591 default:
592 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
593 }
594
595 /*
596 * Force an audit message to be emitted when the action is RET_KILL_*,
597 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
598 * allowed to be logged by the admin.
599 */
600 if (log)
601 return __audit_seccomp(syscall, signr, action);
602
603 /*
604 * Let the audit subsystem decide if the action should be audited based
605 * on whether the current task itself is being audited.
606 */
607 return audit_seccomp(syscall, signr, action);
608}
609
610/*
611 * Secure computing mode 1 allows only read/write/exit/sigreturn.
612 * To be fully secure this must be combined with rlimit
613 * to limit the stack allocations too.
614 */
615static const int mode1_syscalls[] = {
616 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
617 0, /* null terminated */
618};
619
620static void __secure_computing_strict(int this_syscall)
621{
622 const int *syscall_whitelist = mode1_syscalls;
623#ifdef CONFIG_COMPAT
624 if (in_compat_syscall())
625 syscall_whitelist = get_compat_mode1_syscalls();
626#endif
627 do {
628 if (*syscall_whitelist == this_syscall)
629 return;
630 } while (*++syscall_whitelist);
631
632#ifdef SECCOMP_DEBUG
633 dump_stack();
634#endif
635 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
636 do_exit(SIGKILL);
637}
638
639#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
640void secure_computing_strict(int this_syscall)
641{
642 int mode = current->seccomp.mode;
643
644 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
645 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
646 return;
647
648 if (mode == SECCOMP_MODE_DISABLED)
649 return;
650 else if (mode == SECCOMP_MODE_STRICT)
651 __secure_computing_strict(this_syscall);
652 else
653 BUG();
654}
655#else
656
657#ifdef CONFIG_SECCOMP_FILTER
658static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
659 const bool recheck_after_trace)
660{
661 u32 filter_ret, action;
662 struct seccomp_filter *match = NULL;
663 int data;
664
665 /*
666 * Make sure that any changes to mode from another thread have
667 * been seen after TIF_SECCOMP was seen.
668 */
669 rmb();
670
671 filter_ret = seccomp_run_filters(sd, &match);
672 data = filter_ret & SECCOMP_RET_DATA;
673 action = filter_ret & SECCOMP_RET_ACTION_FULL;
674
675 switch (action) {
676 case SECCOMP_RET_ERRNO:
677 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
678 if (data > MAX_ERRNO)
679 data = MAX_ERRNO;
680 syscall_set_return_value(current, task_pt_regs(current),
681 -data, 0);
682 goto skip;
683
684 case SECCOMP_RET_TRAP:
685 /* Show the handler the original registers. */
686 syscall_rollback(current, task_pt_regs(current));
687 /* Let the filter pass back 16 bits of data. */
688 seccomp_send_sigsys(this_syscall, data);
689 goto skip;
690
691 case SECCOMP_RET_TRACE:
692 /* We've been put in this state by the ptracer already. */
693 if (recheck_after_trace)
694 return 0;
695
696 /* ENOSYS these calls if there is no tracer attached. */
697 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
698 syscall_set_return_value(current,
699 task_pt_regs(current),
700 -ENOSYS, 0);
701 goto skip;
702 }
703
704 /* Allow the BPF to provide the event message */
705 ptrace_event(PTRACE_EVENT_SECCOMP, data);
706 /*
707 * The delivery of a fatal signal during event
708 * notification may silently skip tracer notification,
709 * which could leave us with a potentially unmodified
710 * syscall that the tracer would have liked to have
711 * changed. Since the process is about to die, we just
712 * force the syscall to be skipped and let the signal
713 * kill the process and correctly handle any tracer exit
714 * notifications.
715 */
716 if (fatal_signal_pending(current))
717 goto skip;
718 /* Check if the tracer forced the syscall to be skipped. */
719 this_syscall = syscall_get_nr(current, task_pt_regs(current));
720 if (this_syscall < 0)
721 goto skip;
722
723 /*
724 * Recheck the syscall, since it may have changed. This
725 * intentionally uses a NULL struct seccomp_data to force
726 * a reload of all registers. This does not goto skip since
727 * a skip would have already been reported.
728 */
729 if (__seccomp_filter(this_syscall, NULL, true))
730 return -1;
731
732 return 0;
733
734 case SECCOMP_RET_LOG:
735 seccomp_log(this_syscall, 0, action, true);
736 return 0;
737
738 case SECCOMP_RET_ALLOW:
739 /*
740 * Note that the "match" filter will always be NULL for
741 * this action since SECCOMP_RET_ALLOW is the starting
742 * state in seccomp_run_filters().
743 */
744 return 0;
745
746 case SECCOMP_RET_KILL_THREAD:
747 case SECCOMP_RET_KILL_PROCESS:
748 default:
749 seccomp_log(this_syscall, SIGSYS, action, true);
750 /* Dump core only if this is the last remaining thread. */
751 if (action == SECCOMP_RET_KILL_PROCESS ||
752 get_nr_threads(current) == 1) {
753 siginfo_t info;
754
755 /* Show the original registers in the dump. */
756 syscall_rollback(current, task_pt_regs(current));
757 /* Trigger a manual coredump since do_exit skips it. */
758 seccomp_init_siginfo(&info, this_syscall, data);
759 do_coredump(&info);
760 }
761 if (action == SECCOMP_RET_KILL_PROCESS)
762 do_group_exit(SIGSYS);
763 else
764 do_exit(SIGSYS);
765 }
766
767 unreachable();
768
769skip:
770 seccomp_log(this_syscall, 0, action, match ? match->log : false);
771 return -1;
772}
773#else
774static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
775 const bool recheck_after_trace)
776{
777 BUG();
778}
779#endif
780
781int __secure_computing(const struct seccomp_data *sd)
782{
783 int mode = current->seccomp.mode;
784 int this_syscall;
785
786 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
787 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
788 return 0;
789
790 this_syscall = sd ? sd->nr :
791 syscall_get_nr(current, task_pt_regs(current));
792
793 switch (mode) {
794 case SECCOMP_MODE_STRICT:
795 __secure_computing_strict(this_syscall); /* may call do_exit */
796 return 0;
797 case SECCOMP_MODE_FILTER:
798 return __seccomp_filter(this_syscall, sd, false);
799 default:
800 BUG();
801 }
802}
803#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
804
805long prctl_get_seccomp(void)
806{
807 return current->seccomp.mode;
808}
809
810/**
811 * seccomp_set_mode_strict: internal function for setting strict seccomp
812 *
813 * Once current->seccomp.mode is non-zero, it may not be changed.
814 *
815 * Returns 0 on success or -EINVAL on failure.
816 */
817static long seccomp_set_mode_strict(void)
818{
819 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
820 long ret = -EINVAL;
821
822 spin_lock_irq(¤t->sighand->siglock);
823
824 if (!seccomp_may_assign_mode(seccomp_mode))
825 goto out;
826
827#ifdef TIF_NOTSC
828 disable_TSC();
829#endif
830 seccomp_assign_mode(current, seccomp_mode, 0);
831 ret = 0;
832
833out:
834 spin_unlock_irq(¤t->sighand->siglock);
835
836 return ret;
837}
838
839#ifdef CONFIG_SECCOMP_FILTER
840/**
841 * seccomp_set_mode_filter: internal function for setting seccomp filter
842 * @flags: flags to change filter behavior
843 * @filter: struct sock_fprog containing filter
844 *
845 * This function may be called repeatedly to install additional filters.
846 * Every filter successfully installed will be evaluated (in reverse order)
847 * for each system call the task makes.
848 *
849 * Once current->seccomp.mode is non-zero, it may not be changed.
850 *
851 * Returns 0 on success or -EINVAL on failure.
852 */
853static long seccomp_set_mode_filter(unsigned int flags,
854 const char __user *filter)
855{
856 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
857 struct seccomp_filter *prepared = NULL;
858 long ret = -EINVAL;
859
860 /* Validate flags. */
861 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
862 return -EINVAL;
863
864 /* Prepare the new filter before holding any locks. */
865 prepared = seccomp_prepare_user_filter(filter);
866 if (IS_ERR(prepared))
867 return PTR_ERR(prepared);
868
869 /*
870 * Make sure we cannot change seccomp or nnp state via TSYNC
871 * while another thread is in the middle of calling exec.
872 */
873 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
874 mutex_lock_killable(¤t->signal->cred_guard_mutex))
875 goto out_free;
876
877 spin_lock_irq(¤t->sighand->siglock);
878
879 if (!seccomp_may_assign_mode(seccomp_mode))
880 goto out;
881
882 ret = seccomp_attach_filter(flags, prepared);
883 if (ret)
884 goto out;
885 /* Do not free the successfully attached filter. */
886 prepared = NULL;
887
888 seccomp_assign_mode(current, seccomp_mode, flags);
889out:
890 spin_unlock_irq(¤t->sighand->siglock);
891 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
892 mutex_unlock(¤t->signal->cred_guard_mutex);
893out_free:
894 seccomp_filter_free(prepared);
895 return ret;
896}
897#else
898static inline long seccomp_set_mode_filter(unsigned int flags,
899 const char __user *filter)
900{
901 return -EINVAL;
902}
903#endif
904
905static long seccomp_get_action_avail(const char __user *uaction)
906{
907 u32 action;
908
909 if (copy_from_user(&action, uaction, sizeof(action)))
910 return -EFAULT;
911
912 switch (action) {
913 case SECCOMP_RET_KILL_PROCESS:
914 case SECCOMP_RET_KILL_THREAD:
915 case SECCOMP_RET_TRAP:
916 case SECCOMP_RET_ERRNO:
917 case SECCOMP_RET_TRACE:
918 case SECCOMP_RET_LOG:
919 case SECCOMP_RET_ALLOW:
920 break;
921 default:
922 return -EOPNOTSUPP;
923 }
924
925 return 0;
926}
927
928/* Common entry point for both prctl and syscall. */
929static long do_seccomp(unsigned int op, unsigned int flags,
930 const char __user *uargs)
931{
932 switch (op) {
933 case SECCOMP_SET_MODE_STRICT:
934 if (flags != 0 || uargs != NULL)
935 return -EINVAL;
936 return seccomp_set_mode_strict();
937 case SECCOMP_SET_MODE_FILTER:
938 return seccomp_set_mode_filter(flags, uargs);
939 case SECCOMP_GET_ACTION_AVAIL:
940 if (flags != 0)
941 return -EINVAL;
942
943 return seccomp_get_action_avail(uargs);
944 default:
945 return -EINVAL;
946 }
947}
948
949SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
950 const char __user *, uargs)
951{
952 return do_seccomp(op, flags, uargs);
953}
954
955/**
956 * prctl_set_seccomp: configures current->seccomp.mode
957 * @seccomp_mode: requested mode to use
958 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
959 *
960 * Returns 0 on success or -EINVAL on failure.
961 */
962long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
963{
964 unsigned int op;
965 char __user *uargs;
966
967 switch (seccomp_mode) {
968 case SECCOMP_MODE_STRICT:
969 op = SECCOMP_SET_MODE_STRICT;
970 /*
971 * Setting strict mode through prctl always ignored filter,
972 * so make sure it is always NULL here to pass the internal
973 * check in do_seccomp().
974 */
975 uargs = NULL;
976 break;
977 case SECCOMP_MODE_FILTER:
978 op = SECCOMP_SET_MODE_FILTER;
979 uargs = filter;
980 break;
981 default:
982 return -EINVAL;
983 }
984
985 /* prctl interface doesn't have flags, so they are always zero. */
986 return do_seccomp(op, 0, uargs);
987}
988
989#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
990static struct seccomp_filter *get_nth_filter(struct task_struct *task,
991 unsigned long filter_off)
992{
993 struct seccomp_filter *orig, *filter;
994 unsigned long count;
995
996 /*
997 * Note: this is only correct because the caller should be the (ptrace)
998 * tracer of the task, otherwise lock_task_sighand is needed.
999 */
1000 spin_lock_irq(&task->sighand->siglock);
1001
1002 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1003 spin_unlock_irq(&task->sighand->siglock);
1004 return ERR_PTR(-EINVAL);
1005 }
1006
1007 orig = task->seccomp.filter;
1008 __get_seccomp_filter(orig);
1009 spin_unlock_irq(&task->sighand->siglock);
1010
1011 count = 0;
1012 for (filter = orig; filter; filter = filter->prev)
1013 count++;
1014
1015 if (filter_off >= count) {
1016 filter = ERR_PTR(-ENOENT);
1017 goto out;
1018 }
1019
1020 count -= filter_off;
1021 for (filter = orig; filter && count > 1; filter = filter->prev)
1022 count--;
1023
1024 if (WARN_ON(count != 1 || !filter)) {
1025 filter = ERR_PTR(-ENOENT);
1026 goto out;
1027 }
1028
1029 __get_seccomp_filter(filter);
1030
1031out:
1032 __put_seccomp_filter(orig);
1033 return filter;
1034}
1035
1036long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1037 void __user *data)
1038{
1039 struct seccomp_filter *filter;
1040 struct sock_fprog_kern *fprog;
1041 long ret;
1042
1043 if (!capable(CAP_SYS_ADMIN) ||
1044 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1045 return -EACCES;
1046 }
1047
1048 filter = get_nth_filter(task, filter_off);
1049 if (IS_ERR(filter))
1050 return PTR_ERR(filter);
1051
1052 fprog = filter->prog->orig_prog;
1053 if (!fprog) {
1054 /* This must be a new non-cBPF filter, since we save
1055 * every cBPF filter's orig_prog above when
1056 * CONFIG_CHECKPOINT_RESTORE is enabled.
1057 */
1058 ret = -EMEDIUMTYPE;
1059 goto out;
1060 }
1061
1062 ret = fprog->len;
1063 if (!data)
1064 goto out;
1065
1066 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1067 ret = -EFAULT;
1068
1069out:
1070 __put_seccomp_filter(filter);
1071 return ret;
1072}
1073
1074long seccomp_get_metadata(struct task_struct *task,
1075 unsigned long size, void __user *data)
1076{
1077 long ret;
1078 struct seccomp_filter *filter;
1079 struct seccomp_metadata kmd = {};
1080
1081 if (!capable(CAP_SYS_ADMIN) ||
1082 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1083 return -EACCES;
1084 }
1085
1086 size = min_t(unsigned long, size, sizeof(kmd));
1087
1088 if (size < sizeof(kmd.filter_off))
1089 return -EINVAL;
1090
1091 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
1092 return -EFAULT;
1093
1094 filter = get_nth_filter(task, kmd.filter_off);
1095 if (IS_ERR(filter))
1096 return PTR_ERR(filter);
1097
1098 if (filter->log)
1099 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
1100
1101 ret = size;
1102 if (copy_to_user(data, &kmd, size))
1103 ret = -EFAULT;
1104
1105 __put_seccomp_filter(filter);
1106 return ret;
1107}
1108#endif
1109
1110#ifdef CONFIG_SYSCTL
1111
1112/* Human readable action names for friendly sysctl interaction */
1113#define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
1114#define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
1115#define SECCOMP_RET_TRAP_NAME "trap"
1116#define SECCOMP_RET_ERRNO_NAME "errno"
1117#define SECCOMP_RET_TRACE_NAME "trace"
1118#define SECCOMP_RET_LOG_NAME "log"
1119#define SECCOMP_RET_ALLOW_NAME "allow"
1120
1121static const char seccomp_actions_avail[] =
1122 SECCOMP_RET_KILL_PROCESS_NAME " "
1123 SECCOMP_RET_KILL_THREAD_NAME " "
1124 SECCOMP_RET_TRAP_NAME " "
1125 SECCOMP_RET_ERRNO_NAME " "
1126 SECCOMP_RET_TRACE_NAME " "
1127 SECCOMP_RET_LOG_NAME " "
1128 SECCOMP_RET_ALLOW_NAME;
1129
1130struct seccomp_log_name {
1131 u32 log;
1132 const char *name;
1133};
1134
1135static const struct seccomp_log_name seccomp_log_names[] = {
1136 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
1137 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
1138 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1139 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1140 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1141 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1142 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1143 { }
1144};
1145
1146static bool seccomp_names_from_actions_logged(char *names, size_t size,
1147 u32 actions_logged)
1148{
1149 const struct seccomp_log_name *cur;
1150 bool append_space = false;
1151
1152 for (cur = seccomp_log_names; cur->name && size; cur++) {
1153 ssize_t ret;
1154
1155 if (!(actions_logged & cur->log))
1156 continue;
1157
1158 if (append_space) {
1159 ret = strscpy(names, " ", size);
1160 if (ret < 0)
1161 return false;
1162
1163 names += ret;
1164 size -= ret;
1165 } else
1166 append_space = true;
1167
1168 ret = strscpy(names, cur->name, size);
1169 if (ret < 0)
1170 return false;
1171
1172 names += ret;
1173 size -= ret;
1174 }
1175
1176 return true;
1177}
1178
1179static bool seccomp_action_logged_from_name(u32 *action_logged,
1180 const char *name)
1181{
1182 const struct seccomp_log_name *cur;
1183
1184 for (cur = seccomp_log_names; cur->name; cur++) {
1185 if (!strcmp(cur->name, name)) {
1186 *action_logged = cur->log;
1187 return true;
1188 }
1189 }
1190
1191 return false;
1192}
1193
1194static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1195{
1196 char *name;
1197
1198 *actions_logged = 0;
1199 while ((name = strsep(&names, " ")) && *name) {
1200 u32 action_logged = 0;
1201
1202 if (!seccomp_action_logged_from_name(&action_logged, name))
1203 return false;
1204
1205 *actions_logged |= action_logged;
1206 }
1207
1208 return true;
1209}
1210
1211static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1212 void __user *buffer, size_t *lenp,
1213 loff_t *ppos)
1214{
1215 char names[sizeof(seccomp_actions_avail)];
1216 struct ctl_table table;
1217 int ret;
1218
1219 if (write && !capable(CAP_SYS_ADMIN))
1220 return -EPERM;
1221
1222 memset(names, 0, sizeof(names));
1223
1224 if (!write) {
1225 if (!seccomp_names_from_actions_logged(names, sizeof(names),
1226 seccomp_actions_logged))
1227 return -EINVAL;
1228 }
1229
1230 table = *ro_table;
1231 table.data = names;
1232 table.maxlen = sizeof(names);
1233 ret = proc_dostring(&table, write, buffer, lenp, ppos);
1234 if (ret)
1235 return ret;
1236
1237 if (write) {
1238 u32 actions_logged;
1239
1240 if (!seccomp_actions_logged_from_names(&actions_logged,
1241 table.data))
1242 return -EINVAL;
1243
1244 if (actions_logged & SECCOMP_LOG_ALLOW)
1245 return -EINVAL;
1246
1247 seccomp_actions_logged = actions_logged;
1248 }
1249
1250 return 0;
1251}
1252
1253static struct ctl_path seccomp_sysctl_path[] = {
1254 { .procname = "kernel", },
1255 { .procname = "seccomp", },
1256 { }
1257};
1258
1259static struct ctl_table seccomp_sysctl_table[] = {
1260 {
1261 .procname = "actions_avail",
1262 .data = (void *) &seccomp_actions_avail,
1263 .maxlen = sizeof(seccomp_actions_avail),
1264 .mode = 0444,
1265 .proc_handler = proc_dostring,
1266 },
1267 {
1268 .procname = "actions_logged",
1269 .mode = 0644,
1270 .proc_handler = seccomp_actions_logged_handler,
1271 },
1272 { }
1273};
1274
1275static int __init seccomp_sysctl_init(void)
1276{
1277 struct ctl_table_header *hdr;
1278
1279 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1280 if (!hdr)
1281 pr_warn("seccomp: sysctl registration failed\n");
1282 else
1283 kmemleak_not_leak(hdr);
1284
1285 return 0;
1286}
1287
1288device_initcall(seccomp_sysctl_init)
1289
1290#endif /* CONFIG_SYSCTL */