Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Functions to manage eBPF programs attached to cgroups
4 *
5 * Copyright (c) 2016 Daniel Mack
6 */
7
8#include <linux/kernel.h>
9#include <linux/atomic.h>
10#include <linux/cgroup.h>
11#include <linux/filter.h>
12#include <linux/slab.h>
13#include <linux/sysctl.h>
14#include <linux/string.h>
15#include <linux/bpf.h>
16#include <linux/bpf-cgroup.h>
17#include <net/sock.h>
18#include <net/bpf_sk_storage.h>
19
20#include "../cgroup/cgroup-internal.h"
21
22DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23EXPORT_SYMBOL(cgroup_bpf_enabled_key);
24
25void cgroup_bpf_offline(struct cgroup *cgrp)
26{
27 cgroup_get(cgrp);
28 percpu_ref_kill(&cgrp->bpf.refcnt);
29}
30
31/**
32 * cgroup_bpf_release() - put references of all bpf programs and
33 * release all cgroup bpf data
34 * @work: work structure embedded into the cgroup to modify
35 */
36static void cgroup_bpf_release(struct work_struct *work)
37{
38 struct cgroup *cgrp = container_of(work, struct cgroup,
39 bpf.release_work);
40 enum bpf_cgroup_storage_type stype;
41 struct bpf_prog_array *old_array;
42 unsigned int type;
43
44 mutex_lock(&cgroup_mutex);
45
46 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
47 struct list_head *progs = &cgrp->bpf.progs[type];
48 struct bpf_prog_list *pl, *tmp;
49
50 list_for_each_entry_safe(pl, tmp, progs, node) {
51 list_del(&pl->node);
52 bpf_prog_put(pl->prog);
53 for_each_cgroup_storage_type(stype) {
54 bpf_cgroup_storage_unlink(pl->storage[stype]);
55 bpf_cgroup_storage_free(pl->storage[stype]);
56 }
57 kfree(pl);
58 static_branch_dec(&cgroup_bpf_enabled_key);
59 }
60 old_array = rcu_dereference_protected(
61 cgrp->bpf.effective[type],
62 lockdep_is_held(&cgroup_mutex));
63 bpf_prog_array_free(old_array);
64 }
65
66 mutex_unlock(&cgroup_mutex);
67
68 percpu_ref_exit(&cgrp->bpf.refcnt);
69 cgroup_put(cgrp);
70}
71
72/**
73 * cgroup_bpf_release_fn() - callback used to schedule releasing
74 * of bpf cgroup data
75 * @ref: percpu ref counter structure
76 */
77static void cgroup_bpf_release_fn(struct percpu_ref *ref)
78{
79 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
80
81 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
82 queue_work(system_wq, &cgrp->bpf.release_work);
83}
84
85/* count number of elements in the list.
86 * it's slow but the list cannot be long
87 */
88static u32 prog_list_length(struct list_head *head)
89{
90 struct bpf_prog_list *pl;
91 u32 cnt = 0;
92
93 list_for_each_entry(pl, head, node) {
94 if (!pl->prog)
95 continue;
96 cnt++;
97 }
98 return cnt;
99}
100
101/* if parent has non-overridable prog attached,
102 * disallow attaching new programs to the descendent cgroup.
103 * if parent has overridable or multi-prog, allow attaching
104 */
105static bool hierarchy_allows_attach(struct cgroup *cgrp,
106 enum bpf_attach_type type,
107 u32 new_flags)
108{
109 struct cgroup *p;
110
111 p = cgroup_parent(cgrp);
112 if (!p)
113 return true;
114 do {
115 u32 flags = p->bpf.flags[type];
116 u32 cnt;
117
118 if (flags & BPF_F_ALLOW_MULTI)
119 return true;
120 cnt = prog_list_length(&p->bpf.progs[type]);
121 WARN_ON_ONCE(cnt > 1);
122 if (cnt == 1)
123 return !!(flags & BPF_F_ALLOW_OVERRIDE);
124 p = cgroup_parent(p);
125 } while (p);
126 return true;
127}
128
129/* compute a chain of effective programs for a given cgroup:
130 * start from the list of programs in this cgroup and add
131 * all parent programs.
132 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
133 * to programs in this cgroup
134 */
135static int compute_effective_progs(struct cgroup *cgrp,
136 enum bpf_attach_type type,
137 struct bpf_prog_array **array)
138{
139 enum bpf_cgroup_storage_type stype;
140 struct bpf_prog_array *progs;
141 struct bpf_prog_list *pl;
142 struct cgroup *p = cgrp;
143 int cnt = 0;
144
145 /* count number of effective programs by walking parents */
146 do {
147 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
148 cnt += prog_list_length(&p->bpf.progs[type]);
149 p = cgroup_parent(p);
150 } while (p);
151
152 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
153 if (!progs)
154 return -ENOMEM;
155
156 /* populate the array with effective progs */
157 cnt = 0;
158 p = cgrp;
159 do {
160 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
161 continue;
162
163 list_for_each_entry(pl, &p->bpf.progs[type], node) {
164 if (!pl->prog)
165 continue;
166
167 progs->items[cnt].prog = pl->prog;
168 for_each_cgroup_storage_type(stype)
169 progs->items[cnt].cgroup_storage[stype] =
170 pl->storage[stype];
171 cnt++;
172 }
173 } while ((p = cgroup_parent(p)));
174
175 *array = progs;
176 return 0;
177}
178
179static void activate_effective_progs(struct cgroup *cgrp,
180 enum bpf_attach_type type,
181 struct bpf_prog_array *old_array)
182{
183 rcu_swap_protected(cgrp->bpf.effective[type], old_array,
184 lockdep_is_held(&cgroup_mutex));
185 /* free prog array after grace period, since __cgroup_bpf_run_*()
186 * might be still walking the array
187 */
188 bpf_prog_array_free(old_array);
189}
190
191/**
192 * cgroup_bpf_inherit() - inherit effective programs from parent
193 * @cgrp: the cgroup to modify
194 */
195int cgroup_bpf_inherit(struct cgroup *cgrp)
196{
197/* has to use marco instead of const int, since compiler thinks
198 * that array below is variable length
199 */
200#define NR ARRAY_SIZE(cgrp->bpf.effective)
201 struct bpf_prog_array *arrays[NR] = {};
202 int ret, i;
203
204 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
205 GFP_KERNEL);
206 if (ret)
207 return ret;
208
209 for (i = 0; i < NR; i++)
210 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
211
212 for (i = 0; i < NR; i++)
213 if (compute_effective_progs(cgrp, i, &arrays[i]))
214 goto cleanup;
215
216 for (i = 0; i < NR; i++)
217 activate_effective_progs(cgrp, i, arrays[i]);
218
219 return 0;
220cleanup:
221 for (i = 0; i < NR; i++)
222 bpf_prog_array_free(arrays[i]);
223
224 percpu_ref_exit(&cgrp->bpf.refcnt);
225
226 return -ENOMEM;
227}
228
229static int update_effective_progs(struct cgroup *cgrp,
230 enum bpf_attach_type type)
231{
232 struct cgroup_subsys_state *css;
233 int err;
234
235 /* allocate and recompute effective prog arrays */
236 css_for_each_descendant_pre(css, &cgrp->self) {
237 struct cgroup *desc = container_of(css, struct cgroup, self);
238
239 if (percpu_ref_is_zero(&desc->bpf.refcnt))
240 continue;
241
242 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
243 if (err)
244 goto cleanup;
245 }
246
247 /* all allocations were successful. Activate all prog arrays */
248 css_for_each_descendant_pre(css, &cgrp->self) {
249 struct cgroup *desc = container_of(css, struct cgroup, self);
250
251 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
252 if (unlikely(desc->bpf.inactive)) {
253 bpf_prog_array_free(desc->bpf.inactive);
254 desc->bpf.inactive = NULL;
255 }
256 continue;
257 }
258
259 activate_effective_progs(desc, type, desc->bpf.inactive);
260 desc->bpf.inactive = NULL;
261 }
262
263 return 0;
264
265cleanup:
266 /* oom while computing effective. Free all computed effective arrays
267 * since they were not activated
268 */
269 css_for_each_descendant_pre(css, &cgrp->self) {
270 struct cgroup *desc = container_of(css, struct cgroup, self);
271
272 bpf_prog_array_free(desc->bpf.inactive);
273 desc->bpf.inactive = NULL;
274 }
275
276 return err;
277}
278
279#define BPF_CGROUP_MAX_PROGS 64
280
281/**
282 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
283 * propagate the change to descendants
284 * @cgrp: The cgroup which descendants to traverse
285 * @prog: A program to attach
286 * @type: Type of attach operation
287 * @flags: Option flags
288 *
289 * Must be called with cgroup_mutex held.
290 */
291int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
292 enum bpf_attach_type type, u32 flags)
293{
294 struct list_head *progs = &cgrp->bpf.progs[type];
295 struct bpf_prog *old_prog = NULL;
296 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
297 *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
298 enum bpf_cgroup_storage_type stype;
299 struct bpf_prog_list *pl;
300 bool pl_was_allocated;
301 int err;
302
303 if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
304 /* invalid combination */
305 return -EINVAL;
306
307 if (!hierarchy_allows_attach(cgrp, type, flags))
308 return -EPERM;
309
310 if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
311 /* Disallow attaching non-overridable on top
312 * of existing overridable in this cgroup.
313 * Disallow attaching multi-prog if overridable or none
314 */
315 return -EPERM;
316
317 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
318 return -E2BIG;
319
320 for_each_cgroup_storage_type(stype) {
321 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
322 if (IS_ERR(storage[stype])) {
323 storage[stype] = NULL;
324 for_each_cgroup_storage_type(stype)
325 bpf_cgroup_storage_free(storage[stype]);
326 return -ENOMEM;
327 }
328 }
329
330 if (flags & BPF_F_ALLOW_MULTI) {
331 list_for_each_entry(pl, progs, node) {
332 if (pl->prog == prog) {
333 /* disallow attaching the same prog twice */
334 for_each_cgroup_storage_type(stype)
335 bpf_cgroup_storage_free(storage[stype]);
336 return -EINVAL;
337 }
338 }
339
340 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
341 if (!pl) {
342 for_each_cgroup_storage_type(stype)
343 bpf_cgroup_storage_free(storage[stype]);
344 return -ENOMEM;
345 }
346
347 pl_was_allocated = true;
348 pl->prog = prog;
349 for_each_cgroup_storage_type(stype)
350 pl->storage[stype] = storage[stype];
351 list_add_tail(&pl->node, progs);
352 } else {
353 if (list_empty(progs)) {
354 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
355 if (!pl) {
356 for_each_cgroup_storage_type(stype)
357 bpf_cgroup_storage_free(storage[stype]);
358 return -ENOMEM;
359 }
360 pl_was_allocated = true;
361 list_add_tail(&pl->node, progs);
362 } else {
363 pl = list_first_entry(progs, typeof(*pl), node);
364 old_prog = pl->prog;
365 for_each_cgroup_storage_type(stype) {
366 old_storage[stype] = pl->storage[stype];
367 bpf_cgroup_storage_unlink(old_storage[stype]);
368 }
369 pl_was_allocated = false;
370 }
371 pl->prog = prog;
372 for_each_cgroup_storage_type(stype)
373 pl->storage[stype] = storage[stype];
374 }
375
376 cgrp->bpf.flags[type] = flags;
377
378 err = update_effective_progs(cgrp, type);
379 if (err)
380 goto cleanup;
381
382 static_branch_inc(&cgroup_bpf_enabled_key);
383 for_each_cgroup_storage_type(stype) {
384 if (!old_storage[stype])
385 continue;
386 bpf_cgroup_storage_free(old_storage[stype]);
387 }
388 if (old_prog) {
389 bpf_prog_put(old_prog);
390 static_branch_dec(&cgroup_bpf_enabled_key);
391 }
392 for_each_cgroup_storage_type(stype)
393 bpf_cgroup_storage_link(storage[stype], cgrp, type);
394 return 0;
395
396cleanup:
397 /* and cleanup the prog list */
398 pl->prog = old_prog;
399 for_each_cgroup_storage_type(stype) {
400 bpf_cgroup_storage_free(pl->storage[stype]);
401 pl->storage[stype] = old_storage[stype];
402 bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
403 }
404 if (pl_was_allocated) {
405 list_del(&pl->node);
406 kfree(pl);
407 }
408 return err;
409}
410
411/**
412 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
413 * propagate the change to descendants
414 * @cgrp: The cgroup which descendants to traverse
415 * @prog: A program to detach or NULL
416 * @type: Type of detach operation
417 *
418 * Must be called with cgroup_mutex held.
419 */
420int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
421 enum bpf_attach_type type)
422{
423 struct list_head *progs = &cgrp->bpf.progs[type];
424 enum bpf_cgroup_storage_type stype;
425 u32 flags = cgrp->bpf.flags[type];
426 struct bpf_prog *old_prog = NULL;
427 struct bpf_prog_list *pl;
428 int err;
429
430 if (flags & BPF_F_ALLOW_MULTI) {
431 if (!prog)
432 /* to detach MULTI prog the user has to specify valid FD
433 * of the program to be detached
434 */
435 return -EINVAL;
436 } else {
437 if (list_empty(progs))
438 /* report error when trying to detach and nothing is attached */
439 return -ENOENT;
440 }
441
442 if (flags & BPF_F_ALLOW_MULTI) {
443 /* find the prog and detach it */
444 list_for_each_entry(pl, progs, node) {
445 if (pl->prog != prog)
446 continue;
447 old_prog = prog;
448 /* mark it deleted, so it's ignored while
449 * recomputing effective
450 */
451 pl->prog = NULL;
452 break;
453 }
454 if (!old_prog)
455 return -ENOENT;
456 } else {
457 /* to maintain backward compatibility NONE and OVERRIDE cgroups
458 * allow detaching with invalid FD (prog==NULL)
459 */
460 pl = list_first_entry(progs, typeof(*pl), node);
461 old_prog = pl->prog;
462 pl->prog = NULL;
463 }
464
465 err = update_effective_progs(cgrp, type);
466 if (err)
467 goto cleanup;
468
469 /* now can actually delete it from this cgroup list */
470 list_del(&pl->node);
471 for_each_cgroup_storage_type(stype) {
472 bpf_cgroup_storage_unlink(pl->storage[stype]);
473 bpf_cgroup_storage_free(pl->storage[stype]);
474 }
475 kfree(pl);
476 if (list_empty(progs))
477 /* last program was detached, reset flags to zero */
478 cgrp->bpf.flags[type] = 0;
479
480 bpf_prog_put(old_prog);
481 static_branch_dec(&cgroup_bpf_enabled_key);
482 return 0;
483
484cleanup:
485 /* and restore back old_prog */
486 pl->prog = old_prog;
487 return err;
488}
489
490/* Must be called with cgroup_mutex held to avoid races. */
491int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
492 union bpf_attr __user *uattr)
493{
494 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
495 enum bpf_attach_type type = attr->query.attach_type;
496 struct list_head *progs = &cgrp->bpf.progs[type];
497 u32 flags = cgrp->bpf.flags[type];
498 struct bpf_prog_array *effective;
499 int cnt, ret = 0, i;
500
501 effective = rcu_dereference_protected(cgrp->bpf.effective[type],
502 lockdep_is_held(&cgroup_mutex));
503
504 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
505 cnt = bpf_prog_array_length(effective);
506 else
507 cnt = prog_list_length(progs);
508
509 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
510 return -EFAULT;
511 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
512 return -EFAULT;
513 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
514 /* return early if user requested only program count + flags */
515 return 0;
516 if (attr->query.prog_cnt < cnt) {
517 cnt = attr->query.prog_cnt;
518 ret = -ENOSPC;
519 }
520
521 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
522 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
523 } else {
524 struct bpf_prog_list *pl;
525 u32 id;
526
527 i = 0;
528 list_for_each_entry(pl, progs, node) {
529 id = pl->prog->aux->id;
530 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
531 return -EFAULT;
532 if (++i == cnt)
533 break;
534 }
535 }
536 return ret;
537}
538
539int cgroup_bpf_prog_attach(const union bpf_attr *attr,
540 enum bpf_prog_type ptype, struct bpf_prog *prog)
541{
542 struct cgroup *cgrp;
543 int ret;
544
545 cgrp = cgroup_get_from_fd(attr->target_fd);
546 if (IS_ERR(cgrp))
547 return PTR_ERR(cgrp);
548
549 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
550 attr->attach_flags);
551 cgroup_put(cgrp);
552 return ret;
553}
554
555int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
556{
557 struct bpf_prog *prog;
558 struct cgroup *cgrp;
559 int ret;
560
561 cgrp = cgroup_get_from_fd(attr->target_fd);
562 if (IS_ERR(cgrp))
563 return PTR_ERR(cgrp);
564
565 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
566 if (IS_ERR(prog))
567 prog = NULL;
568
569 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
570 if (prog)
571 bpf_prog_put(prog);
572
573 cgroup_put(cgrp);
574 return ret;
575}
576
577int cgroup_bpf_prog_query(const union bpf_attr *attr,
578 union bpf_attr __user *uattr)
579{
580 struct cgroup *cgrp;
581 int ret;
582
583 cgrp = cgroup_get_from_fd(attr->query.target_fd);
584 if (IS_ERR(cgrp))
585 return PTR_ERR(cgrp);
586
587 ret = cgroup_bpf_query(cgrp, attr, uattr);
588
589 cgroup_put(cgrp);
590 return ret;
591}
592
593/**
594 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
595 * @sk: The socket sending or receiving traffic
596 * @skb: The skb that is being sent or received
597 * @type: The type of program to be exectuted
598 *
599 * If no socket is passed, or the socket is not of type INET or INET6,
600 * this function does nothing and returns 0.
601 *
602 * The program type passed in via @type must be suitable for network
603 * filtering. No further check is performed to assert that.
604 *
605 * For egress packets, this function can return:
606 * NET_XMIT_SUCCESS (0) - continue with packet output
607 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
608 * NET_XMIT_CN (2) - continue with packet output and notify TCP
609 * to call cwr
610 * -EPERM - drop packet
611 *
612 * For ingress packets, this function will return -EPERM if any
613 * attached program was found and if it returned != 1 during execution.
614 * Otherwise 0 is returned.
615 */
616int __cgroup_bpf_run_filter_skb(struct sock *sk,
617 struct sk_buff *skb,
618 enum bpf_attach_type type)
619{
620 unsigned int offset = skb->data - skb_network_header(skb);
621 struct sock *save_sk;
622 void *saved_data_end;
623 struct cgroup *cgrp;
624 int ret;
625
626 if (!sk || !sk_fullsock(sk))
627 return 0;
628
629 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
630 return 0;
631
632 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
633 save_sk = skb->sk;
634 skb->sk = sk;
635 __skb_push(skb, offset);
636
637 /* compute pointers for the bpf prog */
638 bpf_compute_and_save_data_end(skb, &saved_data_end);
639
640 if (type == BPF_CGROUP_INET_EGRESS) {
641 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
642 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
643 } else {
644 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
645 __bpf_prog_run_save_cb);
646 ret = (ret == 1 ? 0 : -EPERM);
647 }
648 bpf_restore_data_end(skb, saved_data_end);
649 __skb_pull(skb, offset);
650 skb->sk = save_sk;
651
652 return ret;
653}
654EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
655
656/**
657 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
658 * @sk: sock structure to manipulate
659 * @type: The type of program to be exectuted
660 *
661 * socket is passed is expected to be of type INET or INET6.
662 *
663 * The program type passed in via @type must be suitable for sock
664 * filtering. No further check is performed to assert that.
665 *
666 * This function will return %-EPERM if any if an attached program was found
667 * and if it returned != 1 during execution. In all other cases, 0 is returned.
668 */
669int __cgroup_bpf_run_filter_sk(struct sock *sk,
670 enum bpf_attach_type type)
671{
672 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
673 int ret;
674
675 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
676 return ret == 1 ? 0 : -EPERM;
677}
678EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
679
680/**
681 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
682 * provided by user sockaddr
683 * @sk: sock struct that will use sockaddr
684 * @uaddr: sockaddr struct provided by user
685 * @type: The type of program to be exectuted
686 * @t_ctx: Pointer to attach type specific context
687 *
688 * socket is expected to be of type INET or INET6.
689 *
690 * This function will return %-EPERM if an attached program is found and
691 * returned value != 1 during execution. In all other cases, 0 is returned.
692 */
693int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
694 struct sockaddr *uaddr,
695 enum bpf_attach_type type,
696 void *t_ctx)
697{
698 struct bpf_sock_addr_kern ctx = {
699 .sk = sk,
700 .uaddr = uaddr,
701 .t_ctx = t_ctx,
702 };
703 struct sockaddr_storage unspec;
704 struct cgroup *cgrp;
705 int ret;
706
707 /* Check socket family since not all sockets represent network
708 * endpoint (e.g. AF_UNIX).
709 */
710 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
711 return 0;
712
713 if (!ctx.uaddr) {
714 memset(&unspec, 0, sizeof(unspec));
715 ctx.uaddr = (struct sockaddr *)&unspec;
716 }
717
718 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
719 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
720
721 return ret == 1 ? 0 : -EPERM;
722}
723EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
724
725/**
726 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
727 * @sk: socket to get cgroup from
728 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
729 * sk with connection information (IP addresses, etc.) May not contain
730 * cgroup info if it is a req sock.
731 * @type: The type of program to be exectuted
732 *
733 * socket passed is expected to be of type INET or INET6.
734 *
735 * The program type passed in via @type must be suitable for sock_ops
736 * filtering. No further check is performed to assert that.
737 *
738 * This function will return %-EPERM if any if an attached program was found
739 * and if it returned != 1 during execution. In all other cases, 0 is returned.
740 */
741int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
742 struct bpf_sock_ops_kern *sock_ops,
743 enum bpf_attach_type type)
744{
745 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
746 int ret;
747
748 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
749 BPF_PROG_RUN);
750 return ret == 1 ? 0 : -EPERM;
751}
752EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
753
754int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
755 short access, enum bpf_attach_type type)
756{
757 struct cgroup *cgrp;
758 struct bpf_cgroup_dev_ctx ctx = {
759 .access_type = (access << 16) | dev_type,
760 .major = major,
761 .minor = minor,
762 };
763 int allow = 1;
764
765 rcu_read_lock();
766 cgrp = task_dfl_cgroup(current);
767 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
768 BPF_PROG_RUN);
769 rcu_read_unlock();
770
771 return !allow;
772}
773EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
774
775static const struct bpf_func_proto *
776cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
777{
778 switch (func_id) {
779 case BPF_FUNC_map_lookup_elem:
780 return &bpf_map_lookup_elem_proto;
781 case BPF_FUNC_map_update_elem:
782 return &bpf_map_update_elem_proto;
783 case BPF_FUNC_map_delete_elem:
784 return &bpf_map_delete_elem_proto;
785 case BPF_FUNC_map_push_elem:
786 return &bpf_map_push_elem_proto;
787 case BPF_FUNC_map_pop_elem:
788 return &bpf_map_pop_elem_proto;
789 case BPF_FUNC_map_peek_elem:
790 return &bpf_map_peek_elem_proto;
791 case BPF_FUNC_get_current_uid_gid:
792 return &bpf_get_current_uid_gid_proto;
793 case BPF_FUNC_get_local_storage:
794 return &bpf_get_local_storage_proto;
795 case BPF_FUNC_get_current_cgroup_id:
796 return &bpf_get_current_cgroup_id_proto;
797 case BPF_FUNC_trace_printk:
798 if (capable(CAP_SYS_ADMIN))
799 return bpf_get_trace_printk_proto();
800 /* fall through */
801 default:
802 return NULL;
803 }
804}
805
806static const struct bpf_func_proto *
807cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
808{
809 return cgroup_base_func_proto(func_id, prog);
810}
811
812static bool cgroup_dev_is_valid_access(int off, int size,
813 enum bpf_access_type type,
814 const struct bpf_prog *prog,
815 struct bpf_insn_access_aux *info)
816{
817 const int size_default = sizeof(__u32);
818
819 if (type == BPF_WRITE)
820 return false;
821
822 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
823 return false;
824 /* The verifier guarantees that size > 0. */
825 if (off % size != 0)
826 return false;
827
828 switch (off) {
829 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
830 bpf_ctx_record_field_size(info, size_default);
831 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
832 return false;
833 break;
834 default:
835 if (size != size_default)
836 return false;
837 }
838
839 return true;
840}
841
842const struct bpf_prog_ops cg_dev_prog_ops = {
843};
844
845const struct bpf_verifier_ops cg_dev_verifier_ops = {
846 .get_func_proto = cgroup_dev_func_proto,
847 .is_valid_access = cgroup_dev_is_valid_access,
848};
849
850/**
851 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
852 *
853 * @head: sysctl table header
854 * @table: sysctl table
855 * @write: sysctl is being read (= 0) or written (= 1)
856 * @buf: pointer to buffer passed by user space
857 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
858 * result is size of @new_buf if program set new value, initial value
859 * otherwise
860 * @ppos: value-result argument: value is position at which read from or write
861 * to sysctl is happening, result is new position if program overrode it,
862 * initial value otherwise
863 * @new_buf: pointer to pointer to new buffer that will be allocated if program
864 * overrides new value provided by user space on sysctl write
865 * NOTE: it's caller responsibility to free *new_buf if it was set
866 * @type: type of program to be executed
867 *
868 * Program is run when sysctl is being accessed, either read or written, and
869 * can allow or deny such access.
870 *
871 * This function will return %-EPERM if an attached program is found and
872 * returned value != 1 during execution. In all other cases 0 is returned.
873 */
874int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
875 struct ctl_table *table, int write,
876 void __user *buf, size_t *pcount,
877 loff_t *ppos, void **new_buf,
878 enum bpf_attach_type type)
879{
880 struct bpf_sysctl_kern ctx = {
881 .head = head,
882 .table = table,
883 .write = write,
884 .ppos = ppos,
885 .cur_val = NULL,
886 .cur_len = PAGE_SIZE,
887 .new_val = NULL,
888 .new_len = 0,
889 .new_updated = 0,
890 };
891 struct cgroup *cgrp;
892 int ret;
893
894 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
895 if (ctx.cur_val) {
896 mm_segment_t old_fs;
897 loff_t pos = 0;
898
899 old_fs = get_fs();
900 set_fs(KERNEL_DS);
901 if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
902 &ctx.cur_len, &pos)) {
903 /* Let BPF program decide how to proceed. */
904 ctx.cur_len = 0;
905 }
906 set_fs(old_fs);
907 } else {
908 /* Let BPF program decide how to proceed. */
909 ctx.cur_len = 0;
910 }
911
912 if (write && buf && *pcount) {
913 /* BPF program should be able to override new value with a
914 * buffer bigger than provided by user.
915 */
916 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
917 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
918 if (!ctx.new_val ||
919 copy_from_user(ctx.new_val, buf, ctx.new_len))
920 /* Let BPF program decide how to proceed. */
921 ctx.new_len = 0;
922 }
923
924 rcu_read_lock();
925 cgrp = task_dfl_cgroup(current);
926 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
927 rcu_read_unlock();
928
929 kfree(ctx.cur_val);
930
931 if (ret == 1 && ctx.new_updated) {
932 *new_buf = ctx.new_val;
933 *pcount = ctx.new_len;
934 } else {
935 kfree(ctx.new_val);
936 }
937
938 return ret == 1 ? 0 : -EPERM;
939}
940EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
941
942#ifdef CONFIG_NET
943static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
944 enum bpf_attach_type attach_type)
945{
946 struct bpf_prog_array *prog_array;
947 bool empty;
948
949 rcu_read_lock();
950 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
951 empty = bpf_prog_array_is_empty(prog_array);
952 rcu_read_unlock();
953
954 return empty;
955}
956
957static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
958{
959 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
960 return -EINVAL;
961
962 ctx->optval = kzalloc(max_optlen, GFP_USER);
963 if (!ctx->optval)
964 return -ENOMEM;
965
966 ctx->optval_end = ctx->optval + max_optlen;
967
968 return 0;
969}
970
971static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
972{
973 kfree(ctx->optval);
974}
975
976int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
977 int *optname, char __user *optval,
978 int *optlen, char **kernel_optval)
979{
980 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
981 struct bpf_sockopt_kern ctx = {
982 .sk = sk,
983 .level = *level,
984 .optname = *optname,
985 };
986 int ret, max_optlen;
987
988 /* Opportunistic check to see whether we have any BPF program
989 * attached to the hook so we don't waste time allocating
990 * memory and locking the socket.
991 */
992 if (!cgroup_bpf_enabled ||
993 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
994 return 0;
995
996 /* Allocate a bit more than the initial user buffer for
997 * BPF program. The canonical use case is overriding
998 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
999 */
1000 max_optlen = max_t(int, 16, *optlen);
1001
1002 ret = sockopt_alloc_buf(&ctx, max_optlen);
1003 if (ret)
1004 return ret;
1005
1006 ctx.optlen = *optlen;
1007
1008 if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
1009 ret = -EFAULT;
1010 goto out;
1011 }
1012
1013 lock_sock(sk);
1014 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1015 &ctx, BPF_PROG_RUN);
1016 release_sock(sk);
1017
1018 if (!ret) {
1019 ret = -EPERM;
1020 goto out;
1021 }
1022
1023 if (ctx.optlen == -1) {
1024 /* optlen set to -1, bypass kernel */
1025 ret = 1;
1026 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1027 /* optlen is out of bounds */
1028 ret = -EFAULT;
1029 } else {
1030 /* optlen within bounds, run kernel handler */
1031 ret = 0;
1032
1033 /* export any potential modifications */
1034 *level = ctx.level;
1035 *optname = ctx.optname;
1036 *optlen = ctx.optlen;
1037 *kernel_optval = ctx.optval;
1038 }
1039
1040out:
1041 if (ret)
1042 sockopt_free_buf(&ctx);
1043 return ret;
1044}
1045EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt);
1046
1047int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1048 int optname, char __user *optval,
1049 int __user *optlen, int max_optlen,
1050 int retval)
1051{
1052 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1053 struct bpf_sockopt_kern ctx = {
1054 .sk = sk,
1055 .level = level,
1056 .optname = optname,
1057 .retval = retval,
1058 };
1059 int ret;
1060
1061 /* Opportunistic check to see whether we have any BPF program
1062 * attached to the hook so we don't waste time allocating
1063 * memory and locking the socket.
1064 */
1065 if (!cgroup_bpf_enabled ||
1066 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1067 return retval;
1068
1069 ret = sockopt_alloc_buf(&ctx, max_optlen);
1070 if (ret)
1071 return ret;
1072
1073 ctx.optlen = max_optlen;
1074
1075 if (!retval) {
1076 /* If kernel getsockopt finished successfully,
1077 * copy whatever was returned to the user back
1078 * into our temporary buffer. Set optlen to the
1079 * one that kernel returned as well to let
1080 * BPF programs inspect the value.
1081 */
1082
1083 if (get_user(ctx.optlen, optlen)) {
1084 ret = -EFAULT;
1085 goto out;
1086 }
1087
1088 if (ctx.optlen > max_optlen)
1089 ctx.optlen = max_optlen;
1090
1091 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
1092 ret = -EFAULT;
1093 goto out;
1094 }
1095 }
1096
1097 lock_sock(sk);
1098 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1099 &ctx, BPF_PROG_RUN);
1100 release_sock(sk);
1101
1102 if (!ret) {
1103 ret = -EPERM;
1104 goto out;
1105 }
1106
1107 if (ctx.optlen > max_optlen) {
1108 ret = -EFAULT;
1109 goto out;
1110 }
1111
1112 /* BPF programs only allowed to set retval to 0, not some
1113 * arbitrary value.
1114 */
1115 if (ctx.retval != 0 && ctx.retval != retval) {
1116 ret = -EFAULT;
1117 goto out;
1118 }
1119
1120 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1121 put_user(ctx.optlen, optlen)) {
1122 ret = -EFAULT;
1123 goto out;
1124 }
1125
1126 ret = ctx.retval;
1127
1128out:
1129 sockopt_free_buf(&ctx);
1130 return ret;
1131}
1132EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt);
1133#endif
1134
1135static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1136 size_t *lenp)
1137{
1138 ssize_t tmp_ret = 0, ret;
1139
1140 if (dir->header.parent) {
1141 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1142 if (tmp_ret < 0)
1143 return tmp_ret;
1144 }
1145
1146 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1147 if (ret < 0)
1148 return ret;
1149 *bufp += ret;
1150 *lenp -= ret;
1151 ret += tmp_ret;
1152
1153 /* Avoid leading slash. */
1154 if (!ret)
1155 return ret;
1156
1157 tmp_ret = strscpy(*bufp, "/", *lenp);
1158 if (tmp_ret < 0)
1159 return tmp_ret;
1160 *bufp += tmp_ret;
1161 *lenp -= tmp_ret;
1162
1163 return ret + tmp_ret;
1164}
1165
1166BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1167 size_t, buf_len, u64, flags)
1168{
1169 ssize_t tmp_ret = 0, ret;
1170
1171 if (!buf)
1172 return -EINVAL;
1173
1174 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1175 if (!ctx->head)
1176 return -EINVAL;
1177 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1178 if (tmp_ret < 0)
1179 return tmp_ret;
1180 }
1181
1182 ret = strscpy(buf, ctx->table->procname, buf_len);
1183
1184 return ret < 0 ? ret : tmp_ret + ret;
1185}
1186
1187static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1188 .func = bpf_sysctl_get_name,
1189 .gpl_only = false,
1190 .ret_type = RET_INTEGER,
1191 .arg1_type = ARG_PTR_TO_CTX,
1192 .arg2_type = ARG_PTR_TO_MEM,
1193 .arg3_type = ARG_CONST_SIZE,
1194 .arg4_type = ARG_ANYTHING,
1195};
1196
1197static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1198 size_t src_len)
1199{
1200 if (!dst)
1201 return -EINVAL;
1202
1203 if (!dst_len)
1204 return -E2BIG;
1205
1206 if (!src || !src_len) {
1207 memset(dst, 0, dst_len);
1208 return -EINVAL;
1209 }
1210
1211 memcpy(dst, src, min(dst_len, src_len));
1212
1213 if (dst_len > src_len) {
1214 memset(dst + src_len, '\0', dst_len - src_len);
1215 return src_len;
1216 }
1217
1218 dst[dst_len - 1] = '\0';
1219
1220 return -E2BIG;
1221}
1222
1223BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1224 char *, buf, size_t, buf_len)
1225{
1226 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1227}
1228
1229static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1230 .func = bpf_sysctl_get_current_value,
1231 .gpl_only = false,
1232 .ret_type = RET_INTEGER,
1233 .arg1_type = ARG_PTR_TO_CTX,
1234 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1235 .arg3_type = ARG_CONST_SIZE,
1236};
1237
1238BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1239 size_t, buf_len)
1240{
1241 if (!ctx->write) {
1242 if (buf && buf_len)
1243 memset(buf, '\0', buf_len);
1244 return -EINVAL;
1245 }
1246 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1247}
1248
1249static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1250 .func = bpf_sysctl_get_new_value,
1251 .gpl_only = false,
1252 .ret_type = RET_INTEGER,
1253 .arg1_type = ARG_PTR_TO_CTX,
1254 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1255 .arg3_type = ARG_CONST_SIZE,
1256};
1257
1258BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1259 const char *, buf, size_t, buf_len)
1260{
1261 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1262 return -EINVAL;
1263
1264 if (buf_len > PAGE_SIZE - 1)
1265 return -E2BIG;
1266
1267 memcpy(ctx->new_val, buf, buf_len);
1268 ctx->new_len = buf_len;
1269 ctx->new_updated = 1;
1270
1271 return 0;
1272}
1273
1274static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1275 .func = bpf_sysctl_set_new_value,
1276 .gpl_only = false,
1277 .ret_type = RET_INTEGER,
1278 .arg1_type = ARG_PTR_TO_CTX,
1279 .arg2_type = ARG_PTR_TO_MEM,
1280 .arg3_type = ARG_CONST_SIZE,
1281};
1282
1283static const struct bpf_func_proto *
1284sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1285{
1286 switch (func_id) {
1287 case BPF_FUNC_strtol:
1288 return &bpf_strtol_proto;
1289 case BPF_FUNC_strtoul:
1290 return &bpf_strtoul_proto;
1291 case BPF_FUNC_sysctl_get_name:
1292 return &bpf_sysctl_get_name_proto;
1293 case BPF_FUNC_sysctl_get_current_value:
1294 return &bpf_sysctl_get_current_value_proto;
1295 case BPF_FUNC_sysctl_get_new_value:
1296 return &bpf_sysctl_get_new_value_proto;
1297 case BPF_FUNC_sysctl_set_new_value:
1298 return &bpf_sysctl_set_new_value_proto;
1299 default:
1300 return cgroup_base_func_proto(func_id, prog);
1301 }
1302}
1303
1304static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1305 const struct bpf_prog *prog,
1306 struct bpf_insn_access_aux *info)
1307{
1308 const int size_default = sizeof(__u32);
1309
1310 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1311 return false;
1312
1313 switch (off) {
1314 case bpf_ctx_range(struct bpf_sysctl, write):
1315 if (type != BPF_READ)
1316 return false;
1317 bpf_ctx_record_field_size(info, size_default);
1318 return bpf_ctx_narrow_access_ok(off, size, size_default);
1319 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1320 if (type == BPF_READ) {
1321 bpf_ctx_record_field_size(info, size_default);
1322 return bpf_ctx_narrow_access_ok(off, size, size_default);
1323 } else {
1324 return size == size_default;
1325 }
1326 default:
1327 return false;
1328 }
1329}
1330
1331static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1332 const struct bpf_insn *si,
1333 struct bpf_insn *insn_buf,
1334 struct bpf_prog *prog, u32 *target_size)
1335{
1336 struct bpf_insn *insn = insn_buf;
1337 u32 read_size;
1338
1339 switch (si->off) {
1340 case offsetof(struct bpf_sysctl, write):
1341 *insn++ = BPF_LDX_MEM(
1342 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1343 bpf_target_off(struct bpf_sysctl_kern, write,
1344 FIELD_SIZEOF(struct bpf_sysctl_kern,
1345 write),
1346 target_size));
1347 break;
1348 case offsetof(struct bpf_sysctl, file_pos):
1349 /* ppos is a pointer so it should be accessed via indirect
1350 * loads and stores. Also for stores additional temporary
1351 * register is used since neither src_reg nor dst_reg can be
1352 * overridden.
1353 */
1354 if (type == BPF_WRITE) {
1355 int treg = BPF_REG_9;
1356
1357 if (si->src_reg == treg || si->dst_reg == treg)
1358 --treg;
1359 if (si->src_reg == treg || si->dst_reg == treg)
1360 --treg;
1361 *insn++ = BPF_STX_MEM(
1362 BPF_DW, si->dst_reg, treg,
1363 offsetof(struct bpf_sysctl_kern, tmp_reg));
1364 *insn++ = BPF_LDX_MEM(
1365 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1366 treg, si->dst_reg,
1367 offsetof(struct bpf_sysctl_kern, ppos));
1368 *insn++ = BPF_STX_MEM(
1369 BPF_SIZEOF(u32), treg, si->src_reg,
1370 bpf_ctx_narrow_access_offset(
1371 0, sizeof(u32), sizeof(loff_t)));
1372 *insn++ = BPF_LDX_MEM(
1373 BPF_DW, treg, si->dst_reg,
1374 offsetof(struct bpf_sysctl_kern, tmp_reg));
1375 } else {
1376 *insn++ = BPF_LDX_MEM(
1377 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1378 si->dst_reg, si->src_reg,
1379 offsetof(struct bpf_sysctl_kern, ppos));
1380 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1381 *insn++ = BPF_LDX_MEM(
1382 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1383 bpf_ctx_narrow_access_offset(
1384 0, read_size, sizeof(loff_t)));
1385 }
1386 *target_size = sizeof(u32);
1387 break;
1388 }
1389
1390 return insn - insn_buf;
1391}
1392
1393const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1394 .get_func_proto = sysctl_func_proto,
1395 .is_valid_access = sysctl_is_valid_access,
1396 .convert_ctx_access = sysctl_convert_ctx_access,
1397};
1398
1399const struct bpf_prog_ops cg_sysctl_prog_ops = {
1400};
1401
1402static const struct bpf_func_proto *
1403cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1404{
1405 switch (func_id) {
1406#ifdef CONFIG_NET
1407 case BPF_FUNC_sk_storage_get:
1408 return &bpf_sk_storage_get_proto;
1409 case BPF_FUNC_sk_storage_delete:
1410 return &bpf_sk_storage_delete_proto;
1411#endif
1412#ifdef CONFIG_INET
1413 case BPF_FUNC_tcp_sock:
1414 return &bpf_tcp_sock_proto;
1415#endif
1416 default:
1417 return cgroup_base_func_proto(func_id, prog);
1418 }
1419}
1420
1421static bool cg_sockopt_is_valid_access(int off, int size,
1422 enum bpf_access_type type,
1423 const struct bpf_prog *prog,
1424 struct bpf_insn_access_aux *info)
1425{
1426 const int size_default = sizeof(__u32);
1427
1428 if (off < 0 || off >= sizeof(struct bpf_sockopt))
1429 return false;
1430
1431 if (off % size != 0)
1432 return false;
1433
1434 if (type == BPF_WRITE) {
1435 switch (off) {
1436 case offsetof(struct bpf_sockopt, retval):
1437 if (size != size_default)
1438 return false;
1439 return prog->expected_attach_type ==
1440 BPF_CGROUP_GETSOCKOPT;
1441 case offsetof(struct bpf_sockopt, optname):
1442 /* fallthrough */
1443 case offsetof(struct bpf_sockopt, level):
1444 if (size != size_default)
1445 return false;
1446 return prog->expected_attach_type ==
1447 BPF_CGROUP_SETSOCKOPT;
1448 case offsetof(struct bpf_sockopt, optlen):
1449 return size == size_default;
1450 default:
1451 return false;
1452 }
1453 }
1454
1455 switch (off) {
1456 case offsetof(struct bpf_sockopt, sk):
1457 if (size != sizeof(__u64))
1458 return false;
1459 info->reg_type = PTR_TO_SOCKET;
1460 break;
1461 case offsetof(struct bpf_sockopt, optval):
1462 if (size != sizeof(__u64))
1463 return false;
1464 info->reg_type = PTR_TO_PACKET;
1465 break;
1466 case offsetof(struct bpf_sockopt, optval_end):
1467 if (size != sizeof(__u64))
1468 return false;
1469 info->reg_type = PTR_TO_PACKET_END;
1470 break;
1471 case offsetof(struct bpf_sockopt, retval):
1472 if (size != size_default)
1473 return false;
1474 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1475 default:
1476 if (size != size_default)
1477 return false;
1478 break;
1479 }
1480 return true;
1481}
1482
1483#define CG_SOCKOPT_ACCESS_FIELD(T, F) \
1484 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
1485 si->dst_reg, si->src_reg, \
1486 offsetof(struct bpf_sockopt_kern, F))
1487
1488static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1489 const struct bpf_insn *si,
1490 struct bpf_insn *insn_buf,
1491 struct bpf_prog *prog,
1492 u32 *target_size)
1493{
1494 struct bpf_insn *insn = insn_buf;
1495
1496 switch (si->off) {
1497 case offsetof(struct bpf_sockopt, sk):
1498 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
1499 break;
1500 case offsetof(struct bpf_sockopt, level):
1501 if (type == BPF_WRITE)
1502 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
1503 else
1504 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
1505 break;
1506 case offsetof(struct bpf_sockopt, optname):
1507 if (type == BPF_WRITE)
1508 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
1509 else
1510 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
1511 break;
1512 case offsetof(struct bpf_sockopt, optlen):
1513 if (type == BPF_WRITE)
1514 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
1515 else
1516 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
1517 break;
1518 case offsetof(struct bpf_sockopt, retval):
1519 if (type == BPF_WRITE)
1520 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
1521 else
1522 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
1523 break;
1524 case offsetof(struct bpf_sockopt, optval):
1525 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
1526 break;
1527 case offsetof(struct bpf_sockopt, optval_end):
1528 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
1529 break;
1530 }
1531
1532 return insn - insn_buf;
1533}
1534
1535static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
1536 bool direct_write,
1537 const struct bpf_prog *prog)
1538{
1539 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
1540 */
1541 return 0;
1542}
1543
1544const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
1545 .get_func_proto = cg_sockopt_func_proto,
1546 .is_valid_access = cg_sockopt_is_valid_access,
1547 .convert_ctx_access = cg_sockopt_convert_ctx_access,
1548 .gen_prologue = cg_sockopt_get_prologue,
1549};
1550
1551const struct bpf_prog_ops cg_sockopt_prog_ops = {
1552};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Functions to manage eBPF programs attached to cgroups
4 *
5 * Copyright (c) 2016 Daniel Mack
6 */
7
8#include <linux/kernel.h>
9#include <linux/atomic.h>
10#include <linux/cgroup.h>
11#include <linux/filter.h>
12#include <linux/slab.h>
13#include <linux/sysctl.h>
14#include <linux/string.h>
15#include <linux/bpf.h>
16#include <linux/bpf-cgroup.h>
17#include <net/sock.h>
18#include <net/bpf_sk_storage.h>
19
20#include "../cgroup/cgroup-internal.h"
21
22DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23EXPORT_SYMBOL(cgroup_bpf_enabled_key);
24
25void cgroup_bpf_offline(struct cgroup *cgrp)
26{
27 cgroup_get(cgrp);
28 percpu_ref_kill(&cgrp->bpf.refcnt);
29}
30
31static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
32{
33 enum bpf_cgroup_storage_type stype;
34
35 for_each_cgroup_storage_type(stype)
36 bpf_cgroup_storage_free(storages[stype]);
37}
38
39static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
40 struct bpf_cgroup_storage *new_storages[],
41 enum bpf_attach_type type,
42 struct bpf_prog *prog,
43 struct cgroup *cgrp)
44{
45 enum bpf_cgroup_storage_type stype;
46 struct bpf_cgroup_storage_key key;
47 struct bpf_map *map;
48
49 key.cgroup_inode_id = cgroup_id(cgrp);
50 key.attach_type = type;
51
52 for_each_cgroup_storage_type(stype) {
53 map = prog->aux->cgroup_storage[stype];
54 if (!map)
55 continue;
56
57 storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
58 if (storages[stype])
59 continue;
60
61 storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
62 if (IS_ERR(storages[stype])) {
63 bpf_cgroup_storages_free(new_storages);
64 return -ENOMEM;
65 }
66
67 new_storages[stype] = storages[stype];
68 }
69
70 return 0;
71}
72
73static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
74 struct bpf_cgroup_storage *src[])
75{
76 enum bpf_cgroup_storage_type stype;
77
78 for_each_cgroup_storage_type(stype)
79 dst[stype] = src[stype];
80}
81
82static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
83 struct cgroup *cgrp,
84 enum bpf_attach_type attach_type)
85{
86 enum bpf_cgroup_storage_type stype;
87
88 for_each_cgroup_storage_type(stype)
89 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
90}
91
92/* Called when bpf_cgroup_link is auto-detached from dying cgroup.
93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
94 * doesn't free link memory, which will eventually be done by bpf_link's
95 * release() callback, when its last FD is closed.
96 */
97static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
98{
99 cgroup_put(link->cgroup);
100 link->cgroup = NULL;
101}
102
103/**
104 * cgroup_bpf_release() - put references of all bpf programs and
105 * release all cgroup bpf data
106 * @work: work structure embedded into the cgroup to modify
107 */
108static void cgroup_bpf_release(struct work_struct *work)
109{
110 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
111 bpf.release_work);
112 struct bpf_prog_array *old_array;
113 struct list_head *storages = &cgrp->bpf.storages;
114 struct bpf_cgroup_storage *storage, *stmp;
115
116 unsigned int type;
117
118 mutex_lock(&cgroup_mutex);
119
120 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
121 struct list_head *progs = &cgrp->bpf.progs[type];
122 struct bpf_prog_list *pl, *pltmp;
123
124 list_for_each_entry_safe(pl, pltmp, progs, node) {
125 list_del(&pl->node);
126 if (pl->prog)
127 bpf_prog_put(pl->prog);
128 if (pl->link)
129 bpf_cgroup_link_auto_detach(pl->link);
130 kfree(pl);
131 static_branch_dec(&cgroup_bpf_enabled_key);
132 }
133 old_array = rcu_dereference_protected(
134 cgrp->bpf.effective[type],
135 lockdep_is_held(&cgroup_mutex));
136 bpf_prog_array_free(old_array);
137 }
138
139 list_for_each_entry_safe(storage, stmp, storages, list_cg) {
140 bpf_cgroup_storage_unlink(storage);
141 bpf_cgroup_storage_free(storage);
142 }
143
144 mutex_unlock(&cgroup_mutex);
145
146 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
147 cgroup_bpf_put(p);
148
149 percpu_ref_exit(&cgrp->bpf.refcnt);
150 cgroup_put(cgrp);
151}
152
153/**
154 * cgroup_bpf_release_fn() - callback used to schedule releasing
155 * of bpf cgroup data
156 * @ref: percpu ref counter structure
157 */
158static void cgroup_bpf_release_fn(struct percpu_ref *ref)
159{
160 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
161
162 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
163 queue_work(system_wq, &cgrp->bpf.release_work);
164}
165
166/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
167 * link or direct prog.
168 */
169static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
170{
171 if (pl->prog)
172 return pl->prog;
173 if (pl->link)
174 return pl->link->link.prog;
175 return NULL;
176}
177
178/* count number of elements in the list.
179 * it's slow but the list cannot be long
180 */
181static u32 prog_list_length(struct list_head *head)
182{
183 struct bpf_prog_list *pl;
184 u32 cnt = 0;
185
186 list_for_each_entry(pl, head, node) {
187 if (!prog_list_prog(pl))
188 continue;
189 cnt++;
190 }
191 return cnt;
192}
193
194/* if parent has non-overridable prog attached,
195 * disallow attaching new programs to the descendent cgroup.
196 * if parent has overridable or multi-prog, allow attaching
197 */
198static bool hierarchy_allows_attach(struct cgroup *cgrp,
199 enum bpf_attach_type type)
200{
201 struct cgroup *p;
202
203 p = cgroup_parent(cgrp);
204 if (!p)
205 return true;
206 do {
207 u32 flags = p->bpf.flags[type];
208 u32 cnt;
209
210 if (flags & BPF_F_ALLOW_MULTI)
211 return true;
212 cnt = prog_list_length(&p->bpf.progs[type]);
213 WARN_ON_ONCE(cnt > 1);
214 if (cnt == 1)
215 return !!(flags & BPF_F_ALLOW_OVERRIDE);
216 p = cgroup_parent(p);
217 } while (p);
218 return true;
219}
220
221/* compute a chain of effective programs for a given cgroup:
222 * start from the list of programs in this cgroup and add
223 * all parent programs.
224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
225 * to programs in this cgroup
226 */
227static int compute_effective_progs(struct cgroup *cgrp,
228 enum bpf_attach_type type,
229 struct bpf_prog_array **array)
230{
231 struct bpf_prog_array_item *item;
232 struct bpf_prog_array *progs;
233 struct bpf_prog_list *pl;
234 struct cgroup *p = cgrp;
235 int cnt = 0;
236
237 /* count number of effective programs by walking parents */
238 do {
239 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
240 cnt += prog_list_length(&p->bpf.progs[type]);
241 p = cgroup_parent(p);
242 } while (p);
243
244 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
245 if (!progs)
246 return -ENOMEM;
247
248 /* populate the array with effective progs */
249 cnt = 0;
250 p = cgrp;
251 do {
252 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
253 continue;
254
255 list_for_each_entry(pl, &p->bpf.progs[type], node) {
256 if (!prog_list_prog(pl))
257 continue;
258
259 item = &progs->items[cnt];
260 item->prog = prog_list_prog(pl);
261 bpf_cgroup_storages_assign(item->cgroup_storage,
262 pl->storage);
263 cnt++;
264 }
265 } while ((p = cgroup_parent(p)));
266
267 *array = progs;
268 return 0;
269}
270
271static void activate_effective_progs(struct cgroup *cgrp,
272 enum bpf_attach_type type,
273 struct bpf_prog_array *old_array)
274{
275 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
276 lockdep_is_held(&cgroup_mutex));
277 /* free prog array after grace period, since __cgroup_bpf_run_*()
278 * might be still walking the array
279 */
280 bpf_prog_array_free(old_array);
281}
282
283/**
284 * cgroup_bpf_inherit() - inherit effective programs from parent
285 * @cgrp: the cgroup to modify
286 */
287int cgroup_bpf_inherit(struct cgroup *cgrp)
288{
289/* has to use marco instead of const int, since compiler thinks
290 * that array below is variable length
291 */
292#define NR ARRAY_SIZE(cgrp->bpf.effective)
293 struct bpf_prog_array *arrays[NR] = {};
294 struct cgroup *p;
295 int ret, i;
296
297 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
298 GFP_KERNEL);
299 if (ret)
300 return ret;
301
302 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
303 cgroup_bpf_get(p);
304
305 for (i = 0; i < NR; i++)
306 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
307
308 INIT_LIST_HEAD(&cgrp->bpf.storages);
309
310 for (i = 0; i < NR; i++)
311 if (compute_effective_progs(cgrp, i, &arrays[i]))
312 goto cleanup;
313
314 for (i = 0; i < NR; i++)
315 activate_effective_progs(cgrp, i, arrays[i]);
316
317 return 0;
318cleanup:
319 for (i = 0; i < NR; i++)
320 bpf_prog_array_free(arrays[i]);
321
322 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
323 cgroup_bpf_put(p);
324
325 percpu_ref_exit(&cgrp->bpf.refcnt);
326
327 return -ENOMEM;
328}
329
330static int update_effective_progs(struct cgroup *cgrp,
331 enum bpf_attach_type type)
332{
333 struct cgroup_subsys_state *css;
334 int err;
335
336 /* allocate and recompute effective prog arrays */
337 css_for_each_descendant_pre(css, &cgrp->self) {
338 struct cgroup *desc = container_of(css, struct cgroup, self);
339
340 if (percpu_ref_is_zero(&desc->bpf.refcnt))
341 continue;
342
343 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
344 if (err)
345 goto cleanup;
346 }
347
348 /* all allocations were successful. Activate all prog arrays */
349 css_for_each_descendant_pre(css, &cgrp->self) {
350 struct cgroup *desc = container_of(css, struct cgroup, self);
351
352 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
353 if (unlikely(desc->bpf.inactive)) {
354 bpf_prog_array_free(desc->bpf.inactive);
355 desc->bpf.inactive = NULL;
356 }
357 continue;
358 }
359
360 activate_effective_progs(desc, type, desc->bpf.inactive);
361 desc->bpf.inactive = NULL;
362 }
363
364 return 0;
365
366cleanup:
367 /* oom while computing effective. Free all computed effective arrays
368 * since they were not activated
369 */
370 css_for_each_descendant_pre(css, &cgrp->self) {
371 struct cgroup *desc = container_of(css, struct cgroup, self);
372
373 bpf_prog_array_free(desc->bpf.inactive);
374 desc->bpf.inactive = NULL;
375 }
376
377 return err;
378}
379
380#define BPF_CGROUP_MAX_PROGS 64
381
382static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
383 struct bpf_prog *prog,
384 struct bpf_cgroup_link *link,
385 struct bpf_prog *replace_prog,
386 bool allow_multi)
387{
388 struct bpf_prog_list *pl;
389
390 /* single-attach case */
391 if (!allow_multi) {
392 if (list_empty(progs))
393 return NULL;
394 return list_first_entry(progs, typeof(*pl), node);
395 }
396
397 list_for_each_entry(pl, progs, node) {
398 if (prog && pl->prog == prog && prog != replace_prog)
399 /* disallow attaching the same prog twice */
400 return ERR_PTR(-EINVAL);
401 if (link && pl->link == link)
402 /* disallow attaching the same link twice */
403 return ERR_PTR(-EINVAL);
404 }
405
406 /* direct prog multi-attach w/ replacement case */
407 if (replace_prog) {
408 list_for_each_entry(pl, progs, node) {
409 if (pl->prog == replace_prog)
410 /* a match found */
411 return pl;
412 }
413 /* prog to replace not found for cgroup */
414 return ERR_PTR(-ENOENT);
415 }
416
417 return NULL;
418}
419
420/**
421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
422 * propagate the change to descendants
423 * @cgrp: The cgroup which descendants to traverse
424 * @prog: A program to attach
425 * @link: A link to attach
426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
427 * @type: Type of attach operation
428 * @flags: Option flags
429 *
430 * Exactly one of @prog or @link can be non-null.
431 * Must be called with cgroup_mutex held.
432 */
433int __cgroup_bpf_attach(struct cgroup *cgrp,
434 struct bpf_prog *prog, struct bpf_prog *replace_prog,
435 struct bpf_cgroup_link *link,
436 enum bpf_attach_type type, u32 flags)
437{
438 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
439 struct list_head *progs = &cgrp->bpf.progs[type];
440 struct bpf_prog *old_prog = NULL;
441 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
442 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
443 struct bpf_prog_list *pl;
444 int err;
445
446 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
447 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
448 /* invalid combination */
449 return -EINVAL;
450 if (link && (prog || replace_prog))
451 /* only either link or prog/replace_prog can be specified */
452 return -EINVAL;
453 if (!!replace_prog != !!(flags & BPF_F_REPLACE))
454 /* replace_prog implies BPF_F_REPLACE, and vice versa */
455 return -EINVAL;
456
457 if (!hierarchy_allows_attach(cgrp, type))
458 return -EPERM;
459
460 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
461 /* Disallow attaching non-overridable on top
462 * of existing overridable in this cgroup.
463 * Disallow attaching multi-prog if overridable or none
464 */
465 return -EPERM;
466
467 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
468 return -E2BIG;
469
470 pl = find_attach_entry(progs, prog, link, replace_prog,
471 flags & BPF_F_ALLOW_MULTI);
472 if (IS_ERR(pl))
473 return PTR_ERR(pl);
474
475 if (bpf_cgroup_storages_alloc(storage, new_storage, type,
476 prog ? : link->link.prog, cgrp))
477 return -ENOMEM;
478
479 if (pl) {
480 old_prog = pl->prog;
481 } else {
482 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
483 if (!pl) {
484 bpf_cgroup_storages_free(new_storage);
485 return -ENOMEM;
486 }
487 list_add_tail(&pl->node, progs);
488 }
489
490 pl->prog = prog;
491 pl->link = link;
492 bpf_cgroup_storages_assign(pl->storage, storage);
493 cgrp->bpf.flags[type] = saved_flags;
494
495 err = update_effective_progs(cgrp, type);
496 if (err)
497 goto cleanup;
498
499 if (old_prog)
500 bpf_prog_put(old_prog);
501 else
502 static_branch_inc(&cgroup_bpf_enabled_key);
503 bpf_cgroup_storages_link(new_storage, cgrp, type);
504 return 0;
505
506cleanup:
507 if (old_prog) {
508 pl->prog = old_prog;
509 pl->link = NULL;
510 }
511 bpf_cgroup_storages_free(new_storage);
512 if (!old_prog) {
513 list_del(&pl->node);
514 kfree(pl);
515 }
516 return err;
517}
518
519/* Swap updated BPF program for given link in effective program arrays across
520 * all descendant cgroups. This function is guaranteed to succeed.
521 */
522static void replace_effective_prog(struct cgroup *cgrp,
523 enum bpf_attach_type type,
524 struct bpf_cgroup_link *link)
525{
526 struct bpf_prog_array_item *item;
527 struct cgroup_subsys_state *css;
528 struct bpf_prog_array *progs;
529 struct bpf_prog_list *pl;
530 struct list_head *head;
531 struct cgroup *cg;
532 int pos;
533
534 css_for_each_descendant_pre(css, &cgrp->self) {
535 struct cgroup *desc = container_of(css, struct cgroup, self);
536
537 if (percpu_ref_is_zero(&desc->bpf.refcnt))
538 continue;
539
540 /* find position of link in effective progs array */
541 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
542 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI))
543 continue;
544
545 head = &cg->bpf.progs[type];
546 list_for_each_entry(pl, head, node) {
547 if (!prog_list_prog(pl))
548 continue;
549 if (pl->link == link)
550 goto found;
551 pos++;
552 }
553 }
554found:
555 BUG_ON(!cg);
556 progs = rcu_dereference_protected(
557 desc->bpf.effective[type],
558 lockdep_is_held(&cgroup_mutex));
559 item = &progs->items[pos];
560 WRITE_ONCE(item->prog, link->link.prog);
561 }
562}
563
564/**
565 * __cgroup_bpf_replace() - Replace link's program and propagate the change
566 * to descendants
567 * @cgrp: The cgroup which descendants to traverse
568 * @link: A link for which to replace BPF program
569 * @type: Type of attach operation
570 *
571 * Must be called with cgroup_mutex held.
572 */
573static int __cgroup_bpf_replace(struct cgroup *cgrp,
574 struct bpf_cgroup_link *link,
575 struct bpf_prog *new_prog)
576{
577 struct list_head *progs = &cgrp->bpf.progs[link->type];
578 struct bpf_prog *old_prog;
579 struct bpf_prog_list *pl;
580 bool found = false;
581
582 if (link->link.prog->type != new_prog->type)
583 return -EINVAL;
584
585 list_for_each_entry(pl, progs, node) {
586 if (pl->link == link) {
587 found = true;
588 break;
589 }
590 }
591 if (!found)
592 return -ENOENT;
593
594 old_prog = xchg(&link->link.prog, new_prog);
595 replace_effective_prog(cgrp, link->type, link);
596 bpf_prog_put(old_prog);
597 return 0;
598}
599
600static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
601 struct bpf_prog *old_prog)
602{
603 struct bpf_cgroup_link *cg_link;
604 int ret;
605
606 cg_link = container_of(link, struct bpf_cgroup_link, link);
607
608 mutex_lock(&cgroup_mutex);
609 /* link might have been auto-released by dying cgroup, so fail */
610 if (!cg_link->cgroup) {
611 ret = -ENOLINK;
612 goto out_unlock;
613 }
614 if (old_prog && link->prog != old_prog) {
615 ret = -EPERM;
616 goto out_unlock;
617 }
618 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
619out_unlock:
620 mutex_unlock(&cgroup_mutex);
621 return ret;
622}
623
624static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
625 struct bpf_prog *prog,
626 struct bpf_cgroup_link *link,
627 bool allow_multi)
628{
629 struct bpf_prog_list *pl;
630
631 if (!allow_multi) {
632 if (list_empty(progs))
633 /* report error when trying to detach and nothing is attached */
634 return ERR_PTR(-ENOENT);
635
636 /* to maintain backward compatibility NONE and OVERRIDE cgroups
637 * allow detaching with invalid FD (prog==NULL) in legacy mode
638 */
639 return list_first_entry(progs, typeof(*pl), node);
640 }
641
642 if (!prog && !link)
643 /* to detach MULTI prog the user has to specify valid FD
644 * of the program or link to be detached
645 */
646 return ERR_PTR(-EINVAL);
647
648 /* find the prog or link and detach it */
649 list_for_each_entry(pl, progs, node) {
650 if (pl->prog == prog && pl->link == link)
651 return pl;
652 }
653 return ERR_PTR(-ENOENT);
654}
655
656/**
657 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
658 * propagate the change to descendants
659 * @cgrp: The cgroup which descendants to traverse
660 * @prog: A program to detach or NULL
661 * @prog: A link to detach or NULL
662 * @type: Type of detach operation
663 *
664 * At most one of @prog or @link can be non-NULL.
665 * Must be called with cgroup_mutex held.
666 */
667int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
668 struct bpf_cgroup_link *link, enum bpf_attach_type type)
669{
670 struct list_head *progs = &cgrp->bpf.progs[type];
671 u32 flags = cgrp->bpf.flags[type];
672 struct bpf_prog_list *pl;
673 struct bpf_prog *old_prog;
674 int err;
675
676 if (prog && link)
677 /* only one of prog or link can be specified */
678 return -EINVAL;
679
680 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
681 if (IS_ERR(pl))
682 return PTR_ERR(pl);
683
684 /* mark it deleted, so it's ignored while recomputing effective */
685 old_prog = pl->prog;
686 pl->prog = NULL;
687 pl->link = NULL;
688
689 err = update_effective_progs(cgrp, type);
690 if (err)
691 goto cleanup;
692
693 /* now can actually delete it from this cgroup list */
694 list_del(&pl->node);
695 kfree(pl);
696 if (list_empty(progs))
697 /* last program was detached, reset flags to zero */
698 cgrp->bpf.flags[type] = 0;
699 if (old_prog)
700 bpf_prog_put(old_prog);
701 static_branch_dec(&cgroup_bpf_enabled_key);
702 return 0;
703
704cleanup:
705 /* restore back prog or link */
706 pl->prog = old_prog;
707 pl->link = link;
708 return err;
709}
710
711/* Must be called with cgroup_mutex held to avoid races. */
712int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
713 union bpf_attr __user *uattr)
714{
715 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
716 enum bpf_attach_type type = attr->query.attach_type;
717 struct list_head *progs = &cgrp->bpf.progs[type];
718 u32 flags = cgrp->bpf.flags[type];
719 struct bpf_prog_array *effective;
720 struct bpf_prog *prog;
721 int cnt, ret = 0, i;
722
723 effective = rcu_dereference_protected(cgrp->bpf.effective[type],
724 lockdep_is_held(&cgroup_mutex));
725
726 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
727 cnt = bpf_prog_array_length(effective);
728 else
729 cnt = prog_list_length(progs);
730
731 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
732 return -EFAULT;
733 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
734 return -EFAULT;
735 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
736 /* return early if user requested only program count + flags */
737 return 0;
738 if (attr->query.prog_cnt < cnt) {
739 cnt = attr->query.prog_cnt;
740 ret = -ENOSPC;
741 }
742
743 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
744 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
745 } else {
746 struct bpf_prog_list *pl;
747 u32 id;
748
749 i = 0;
750 list_for_each_entry(pl, progs, node) {
751 prog = prog_list_prog(pl);
752 id = prog->aux->id;
753 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
754 return -EFAULT;
755 if (++i == cnt)
756 break;
757 }
758 }
759 return ret;
760}
761
762int cgroup_bpf_prog_attach(const union bpf_attr *attr,
763 enum bpf_prog_type ptype, struct bpf_prog *prog)
764{
765 struct bpf_prog *replace_prog = NULL;
766 struct cgroup *cgrp;
767 int ret;
768
769 cgrp = cgroup_get_from_fd(attr->target_fd);
770 if (IS_ERR(cgrp))
771 return PTR_ERR(cgrp);
772
773 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
774 (attr->attach_flags & BPF_F_REPLACE)) {
775 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
776 if (IS_ERR(replace_prog)) {
777 cgroup_put(cgrp);
778 return PTR_ERR(replace_prog);
779 }
780 }
781
782 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
783 attr->attach_type, attr->attach_flags);
784
785 if (replace_prog)
786 bpf_prog_put(replace_prog);
787 cgroup_put(cgrp);
788 return ret;
789}
790
791int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
792{
793 struct bpf_prog *prog;
794 struct cgroup *cgrp;
795 int ret;
796
797 cgrp = cgroup_get_from_fd(attr->target_fd);
798 if (IS_ERR(cgrp))
799 return PTR_ERR(cgrp);
800
801 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
802 if (IS_ERR(prog))
803 prog = NULL;
804
805 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
806 if (prog)
807 bpf_prog_put(prog);
808
809 cgroup_put(cgrp);
810 return ret;
811}
812
813static void bpf_cgroup_link_release(struct bpf_link *link)
814{
815 struct bpf_cgroup_link *cg_link =
816 container_of(link, struct bpf_cgroup_link, link);
817 struct cgroup *cg;
818
819 /* link might have been auto-detached by dying cgroup already,
820 * in that case our work is done here
821 */
822 if (!cg_link->cgroup)
823 return;
824
825 mutex_lock(&cgroup_mutex);
826
827 /* re-check cgroup under lock again */
828 if (!cg_link->cgroup) {
829 mutex_unlock(&cgroup_mutex);
830 return;
831 }
832
833 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
834 cg_link->type));
835
836 cg = cg_link->cgroup;
837 cg_link->cgroup = NULL;
838
839 mutex_unlock(&cgroup_mutex);
840
841 cgroup_put(cg);
842}
843
844static void bpf_cgroup_link_dealloc(struct bpf_link *link)
845{
846 struct bpf_cgroup_link *cg_link =
847 container_of(link, struct bpf_cgroup_link, link);
848
849 kfree(cg_link);
850}
851
852static int bpf_cgroup_link_detach(struct bpf_link *link)
853{
854 bpf_cgroup_link_release(link);
855
856 return 0;
857}
858
859static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
860 struct seq_file *seq)
861{
862 struct bpf_cgroup_link *cg_link =
863 container_of(link, struct bpf_cgroup_link, link);
864 u64 cg_id = 0;
865
866 mutex_lock(&cgroup_mutex);
867 if (cg_link->cgroup)
868 cg_id = cgroup_id(cg_link->cgroup);
869 mutex_unlock(&cgroup_mutex);
870
871 seq_printf(seq,
872 "cgroup_id:\t%llu\n"
873 "attach_type:\t%d\n",
874 cg_id,
875 cg_link->type);
876}
877
878static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
879 struct bpf_link_info *info)
880{
881 struct bpf_cgroup_link *cg_link =
882 container_of(link, struct bpf_cgroup_link, link);
883 u64 cg_id = 0;
884
885 mutex_lock(&cgroup_mutex);
886 if (cg_link->cgroup)
887 cg_id = cgroup_id(cg_link->cgroup);
888 mutex_unlock(&cgroup_mutex);
889
890 info->cgroup.cgroup_id = cg_id;
891 info->cgroup.attach_type = cg_link->type;
892 return 0;
893}
894
895static const struct bpf_link_ops bpf_cgroup_link_lops = {
896 .release = bpf_cgroup_link_release,
897 .dealloc = bpf_cgroup_link_dealloc,
898 .detach = bpf_cgroup_link_detach,
899 .update_prog = cgroup_bpf_replace,
900 .show_fdinfo = bpf_cgroup_link_show_fdinfo,
901 .fill_link_info = bpf_cgroup_link_fill_link_info,
902};
903
904int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
905{
906 struct bpf_link_primer link_primer;
907 struct bpf_cgroup_link *link;
908 struct cgroup *cgrp;
909 int err;
910
911 if (attr->link_create.flags)
912 return -EINVAL;
913
914 cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
915 if (IS_ERR(cgrp))
916 return PTR_ERR(cgrp);
917
918 link = kzalloc(sizeof(*link), GFP_USER);
919 if (!link) {
920 err = -ENOMEM;
921 goto out_put_cgroup;
922 }
923 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
924 prog);
925 link->cgroup = cgrp;
926 link->type = attr->link_create.attach_type;
927
928 err = bpf_link_prime(&link->link, &link_primer);
929 if (err) {
930 kfree(link);
931 goto out_put_cgroup;
932 }
933
934 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
935 BPF_F_ALLOW_MULTI);
936 if (err) {
937 bpf_link_cleanup(&link_primer);
938 goto out_put_cgroup;
939 }
940
941 return bpf_link_settle(&link_primer);
942
943out_put_cgroup:
944 cgroup_put(cgrp);
945 return err;
946}
947
948int cgroup_bpf_prog_query(const union bpf_attr *attr,
949 union bpf_attr __user *uattr)
950{
951 struct cgroup *cgrp;
952 int ret;
953
954 cgrp = cgroup_get_from_fd(attr->query.target_fd);
955 if (IS_ERR(cgrp))
956 return PTR_ERR(cgrp);
957
958 ret = cgroup_bpf_query(cgrp, attr, uattr);
959
960 cgroup_put(cgrp);
961 return ret;
962}
963
964/**
965 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
966 * @sk: The socket sending or receiving traffic
967 * @skb: The skb that is being sent or received
968 * @type: The type of program to be exectuted
969 *
970 * If no socket is passed, or the socket is not of type INET or INET6,
971 * this function does nothing and returns 0.
972 *
973 * The program type passed in via @type must be suitable for network
974 * filtering. No further check is performed to assert that.
975 *
976 * For egress packets, this function can return:
977 * NET_XMIT_SUCCESS (0) - continue with packet output
978 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
979 * NET_XMIT_CN (2) - continue with packet output and notify TCP
980 * to call cwr
981 * -EPERM - drop packet
982 *
983 * For ingress packets, this function will return -EPERM if any
984 * attached program was found and if it returned != 1 during execution.
985 * Otherwise 0 is returned.
986 */
987int __cgroup_bpf_run_filter_skb(struct sock *sk,
988 struct sk_buff *skb,
989 enum bpf_attach_type type)
990{
991 unsigned int offset = skb->data - skb_network_header(skb);
992 struct sock *save_sk;
993 void *saved_data_end;
994 struct cgroup *cgrp;
995 int ret;
996
997 if (!sk || !sk_fullsock(sk))
998 return 0;
999
1000 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1001 return 0;
1002
1003 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1004 save_sk = skb->sk;
1005 skb->sk = sk;
1006 __skb_push(skb, offset);
1007
1008 /* compute pointers for the bpf prog */
1009 bpf_compute_and_save_data_end(skb, &saved_data_end);
1010
1011 if (type == BPF_CGROUP_INET_EGRESS) {
1012 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
1013 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
1014 } else {
1015 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
1016 __bpf_prog_run_save_cb);
1017 ret = (ret == 1 ? 0 : -EPERM);
1018 }
1019 bpf_restore_data_end(skb, saved_data_end);
1020 __skb_pull(skb, offset);
1021 skb->sk = save_sk;
1022
1023 return ret;
1024}
1025EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1026
1027/**
1028 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1029 * @sk: sock structure to manipulate
1030 * @type: The type of program to be exectuted
1031 *
1032 * socket is passed is expected to be of type INET or INET6.
1033 *
1034 * The program type passed in via @type must be suitable for sock
1035 * filtering. No further check is performed to assert that.
1036 *
1037 * This function will return %-EPERM if any if an attached program was found
1038 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1039 */
1040int __cgroup_bpf_run_filter_sk(struct sock *sk,
1041 enum bpf_attach_type type)
1042{
1043 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1044 int ret;
1045
1046 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
1047 return ret == 1 ? 0 : -EPERM;
1048}
1049EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1050
1051/**
1052 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1053 * provided by user sockaddr
1054 * @sk: sock struct that will use sockaddr
1055 * @uaddr: sockaddr struct provided by user
1056 * @type: The type of program to be exectuted
1057 * @t_ctx: Pointer to attach type specific context
1058 *
1059 * socket is expected to be of type INET or INET6.
1060 *
1061 * This function will return %-EPERM if an attached program is found and
1062 * returned value != 1 during execution. In all other cases, 0 is returned.
1063 */
1064int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1065 struct sockaddr *uaddr,
1066 enum bpf_attach_type type,
1067 void *t_ctx)
1068{
1069 struct bpf_sock_addr_kern ctx = {
1070 .sk = sk,
1071 .uaddr = uaddr,
1072 .t_ctx = t_ctx,
1073 };
1074 struct sockaddr_storage unspec;
1075 struct cgroup *cgrp;
1076 int ret;
1077
1078 /* Check socket family since not all sockets represent network
1079 * endpoint (e.g. AF_UNIX).
1080 */
1081 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1082 return 0;
1083
1084 if (!ctx.uaddr) {
1085 memset(&unspec, 0, sizeof(unspec));
1086 ctx.uaddr = (struct sockaddr *)&unspec;
1087 }
1088
1089 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1090 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
1091
1092 return ret == 1 ? 0 : -EPERM;
1093}
1094EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1095
1096/**
1097 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1098 * @sk: socket to get cgroup from
1099 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1100 * sk with connection information (IP addresses, etc.) May not contain
1101 * cgroup info if it is a req sock.
1102 * @type: The type of program to be exectuted
1103 *
1104 * socket passed is expected to be of type INET or INET6.
1105 *
1106 * The program type passed in via @type must be suitable for sock_ops
1107 * filtering. No further check is performed to assert that.
1108 *
1109 * This function will return %-EPERM if any if an attached program was found
1110 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1111 */
1112int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1113 struct bpf_sock_ops_kern *sock_ops,
1114 enum bpf_attach_type type)
1115{
1116 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1117 int ret;
1118
1119 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
1120 BPF_PROG_RUN);
1121 return ret == 1 ? 0 : -EPERM;
1122}
1123EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1124
1125int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1126 short access, enum bpf_attach_type type)
1127{
1128 struct cgroup *cgrp;
1129 struct bpf_cgroup_dev_ctx ctx = {
1130 .access_type = (access << 16) | dev_type,
1131 .major = major,
1132 .minor = minor,
1133 };
1134 int allow = 1;
1135
1136 rcu_read_lock();
1137 cgrp = task_dfl_cgroup(current);
1138 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
1139 BPF_PROG_RUN);
1140 rcu_read_unlock();
1141
1142 return !allow;
1143}
1144
1145static const struct bpf_func_proto *
1146cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1147{
1148 switch (func_id) {
1149 case BPF_FUNC_get_current_uid_gid:
1150 return &bpf_get_current_uid_gid_proto;
1151 case BPF_FUNC_get_local_storage:
1152 return &bpf_get_local_storage_proto;
1153 case BPF_FUNC_get_current_cgroup_id:
1154 return &bpf_get_current_cgroup_id_proto;
1155 case BPF_FUNC_perf_event_output:
1156 return &bpf_event_output_data_proto;
1157 default:
1158 return bpf_base_func_proto(func_id);
1159 }
1160}
1161
1162static const struct bpf_func_proto *
1163cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1164{
1165 return cgroup_base_func_proto(func_id, prog);
1166}
1167
1168static bool cgroup_dev_is_valid_access(int off, int size,
1169 enum bpf_access_type type,
1170 const struct bpf_prog *prog,
1171 struct bpf_insn_access_aux *info)
1172{
1173 const int size_default = sizeof(__u32);
1174
1175 if (type == BPF_WRITE)
1176 return false;
1177
1178 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1179 return false;
1180 /* The verifier guarantees that size > 0. */
1181 if (off % size != 0)
1182 return false;
1183
1184 switch (off) {
1185 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1186 bpf_ctx_record_field_size(info, size_default);
1187 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1188 return false;
1189 break;
1190 default:
1191 if (size != size_default)
1192 return false;
1193 }
1194
1195 return true;
1196}
1197
1198const struct bpf_prog_ops cg_dev_prog_ops = {
1199};
1200
1201const struct bpf_verifier_ops cg_dev_verifier_ops = {
1202 .get_func_proto = cgroup_dev_func_proto,
1203 .is_valid_access = cgroup_dev_is_valid_access,
1204};
1205
1206/**
1207 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1208 *
1209 * @head: sysctl table header
1210 * @table: sysctl table
1211 * @write: sysctl is being read (= 0) or written (= 1)
1212 * @buf: pointer to buffer (in and out)
1213 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1214 * result is size of @new_buf if program set new value, initial value
1215 * otherwise
1216 * @ppos: value-result argument: value is position at which read from or write
1217 * to sysctl is happening, result is new position if program overrode it,
1218 * initial value otherwise
1219 * @type: type of program to be executed
1220 *
1221 * Program is run when sysctl is being accessed, either read or written, and
1222 * can allow or deny such access.
1223 *
1224 * This function will return %-EPERM if an attached program is found and
1225 * returned value != 1 during execution. In all other cases 0 is returned.
1226 */
1227int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1228 struct ctl_table *table, int write,
1229 void **buf, size_t *pcount, loff_t *ppos,
1230 enum bpf_attach_type type)
1231{
1232 struct bpf_sysctl_kern ctx = {
1233 .head = head,
1234 .table = table,
1235 .write = write,
1236 .ppos = ppos,
1237 .cur_val = NULL,
1238 .cur_len = PAGE_SIZE,
1239 .new_val = NULL,
1240 .new_len = 0,
1241 .new_updated = 0,
1242 };
1243 struct cgroup *cgrp;
1244 loff_t pos = 0;
1245 int ret;
1246
1247 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1248 if (!ctx.cur_val ||
1249 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1250 /* Let BPF program decide how to proceed. */
1251 ctx.cur_len = 0;
1252 }
1253
1254 if (write && *buf && *pcount) {
1255 /* BPF program should be able to override new value with a
1256 * buffer bigger than provided by user.
1257 */
1258 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1259 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1260 if (ctx.new_val) {
1261 memcpy(ctx.new_val, *buf, ctx.new_len);
1262 } else {
1263 /* Let BPF program decide how to proceed. */
1264 ctx.new_len = 0;
1265 }
1266 }
1267
1268 rcu_read_lock();
1269 cgrp = task_dfl_cgroup(current);
1270 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
1271 rcu_read_unlock();
1272
1273 kfree(ctx.cur_val);
1274
1275 if (ret == 1 && ctx.new_updated) {
1276 kfree(*buf);
1277 *buf = ctx.new_val;
1278 *pcount = ctx.new_len;
1279 } else {
1280 kfree(ctx.new_val);
1281 }
1282
1283 return ret == 1 ? 0 : -EPERM;
1284}
1285
1286#ifdef CONFIG_NET
1287static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
1288 enum bpf_attach_type attach_type)
1289{
1290 struct bpf_prog_array *prog_array;
1291 bool empty;
1292
1293 rcu_read_lock();
1294 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
1295 empty = bpf_prog_array_is_empty(prog_array);
1296 rcu_read_unlock();
1297
1298 return empty;
1299}
1300
1301static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
1302{
1303 if (unlikely(max_optlen < 0))
1304 return -EINVAL;
1305
1306 if (unlikely(max_optlen > PAGE_SIZE)) {
1307 /* We don't expose optvals that are greater than PAGE_SIZE
1308 * to the BPF program.
1309 */
1310 max_optlen = PAGE_SIZE;
1311 }
1312
1313 ctx->optval = kzalloc(max_optlen, GFP_USER);
1314 if (!ctx->optval)
1315 return -ENOMEM;
1316
1317 ctx->optval_end = ctx->optval + max_optlen;
1318
1319 return max_optlen;
1320}
1321
1322static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
1323{
1324 kfree(ctx->optval);
1325}
1326
1327int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1328 int *optname, char __user *optval,
1329 int *optlen, char **kernel_optval)
1330{
1331 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1332 struct bpf_sockopt_kern ctx = {
1333 .sk = sk,
1334 .level = *level,
1335 .optname = *optname,
1336 };
1337 int ret, max_optlen;
1338
1339 /* Opportunistic check to see whether we have any BPF program
1340 * attached to the hook so we don't waste time allocating
1341 * memory and locking the socket.
1342 */
1343 if (!cgroup_bpf_enabled ||
1344 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
1345 return 0;
1346
1347 /* Allocate a bit more than the initial user buffer for
1348 * BPF program. The canonical use case is overriding
1349 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1350 */
1351 max_optlen = max_t(int, 16, *optlen);
1352
1353 max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
1354 if (max_optlen < 0)
1355 return max_optlen;
1356
1357 ctx.optlen = *optlen;
1358
1359 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1360 ret = -EFAULT;
1361 goto out;
1362 }
1363
1364 lock_sock(sk);
1365 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1366 &ctx, BPF_PROG_RUN);
1367 release_sock(sk);
1368
1369 if (!ret) {
1370 ret = -EPERM;
1371 goto out;
1372 }
1373
1374 if (ctx.optlen == -1) {
1375 /* optlen set to -1, bypass kernel */
1376 ret = 1;
1377 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1378 /* optlen is out of bounds */
1379 ret = -EFAULT;
1380 } else {
1381 /* optlen within bounds, run kernel handler */
1382 ret = 0;
1383
1384 /* export any potential modifications */
1385 *level = ctx.level;
1386 *optname = ctx.optname;
1387
1388 /* optlen == 0 from BPF indicates that we should
1389 * use original userspace data.
1390 */
1391 if (ctx.optlen != 0) {
1392 *optlen = ctx.optlen;
1393 *kernel_optval = ctx.optval;
1394 }
1395 }
1396
1397out:
1398 if (ret)
1399 sockopt_free_buf(&ctx);
1400 return ret;
1401}
1402
1403int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1404 int optname, char __user *optval,
1405 int __user *optlen, int max_optlen,
1406 int retval)
1407{
1408 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1409 struct bpf_sockopt_kern ctx = {
1410 .sk = sk,
1411 .level = level,
1412 .optname = optname,
1413 .retval = retval,
1414 };
1415 int ret;
1416
1417 /* Opportunistic check to see whether we have any BPF program
1418 * attached to the hook so we don't waste time allocating
1419 * memory and locking the socket.
1420 */
1421 if (!cgroup_bpf_enabled ||
1422 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1423 return retval;
1424
1425 ctx.optlen = max_optlen;
1426
1427 max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
1428 if (max_optlen < 0)
1429 return max_optlen;
1430
1431 if (!retval) {
1432 /* If kernel getsockopt finished successfully,
1433 * copy whatever was returned to the user back
1434 * into our temporary buffer. Set optlen to the
1435 * one that kernel returned as well to let
1436 * BPF programs inspect the value.
1437 */
1438
1439 if (get_user(ctx.optlen, optlen)) {
1440 ret = -EFAULT;
1441 goto out;
1442 }
1443
1444 if (copy_from_user(ctx.optval, optval,
1445 min(ctx.optlen, max_optlen)) != 0) {
1446 ret = -EFAULT;
1447 goto out;
1448 }
1449 }
1450
1451 lock_sock(sk);
1452 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1453 &ctx, BPF_PROG_RUN);
1454 release_sock(sk);
1455
1456 if (!ret) {
1457 ret = -EPERM;
1458 goto out;
1459 }
1460
1461 if (ctx.optlen > max_optlen) {
1462 ret = -EFAULT;
1463 goto out;
1464 }
1465
1466 /* BPF programs only allowed to set retval to 0, not some
1467 * arbitrary value.
1468 */
1469 if (ctx.retval != 0 && ctx.retval != retval) {
1470 ret = -EFAULT;
1471 goto out;
1472 }
1473
1474 if (ctx.optlen != 0) {
1475 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1476 put_user(ctx.optlen, optlen)) {
1477 ret = -EFAULT;
1478 goto out;
1479 }
1480 }
1481
1482 ret = ctx.retval;
1483
1484out:
1485 sockopt_free_buf(&ctx);
1486 return ret;
1487}
1488#endif
1489
1490static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1491 size_t *lenp)
1492{
1493 ssize_t tmp_ret = 0, ret;
1494
1495 if (dir->header.parent) {
1496 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1497 if (tmp_ret < 0)
1498 return tmp_ret;
1499 }
1500
1501 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1502 if (ret < 0)
1503 return ret;
1504 *bufp += ret;
1505 *lenp -= ret;
1506 ret += tmp_ret;
1507
1508 /* Avoid leading slash. */
1509 if (!ret)
1510 return ret;
1511
1512 tmp_ret = strscpy(*bufp, "/", *lenp);
1513 if (tmp_ret < 0)
1514 return tmp_ret;
1515 *bufp += tmp_ret;
1516 *lenp -= tmp_ret;
1517
1518 return ret + tmp_ret;
1519}
1520
1521BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1522 size_t, buf_len, u64, flags)
1523{
1524 ssize_t tmp_ret = 0, ret;
1525
1526 if (!buf)
1527 return -EINVAL;
1528
1529 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1530 if (!ctx->head)
1531 return -EINVAL;
1532 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1533 if (tmp_ret < 0)
1534 return tmp_ret;
1535 }
1536
1537 ret = strscpy(buf, ctx->table->procname, buf_len);
1538
1539 return ret < 0 ? ret : tmp_ret + ret;
1540}
1541
1542static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1543 .func = bpf_sysctl_get_name,
1544 .gpl_only = false,
1545 .ret_type = RET_INTEGER,
1546 .arg1_type = ARG_PTR_TO_CTX,
1547 .arg2_type = ARG_PTR_TO_MEM,
1548 .arg3_type = ARG_CONST_SIZE,
1549 .arg4_type = ARG_ANYTHING,
1550};
1551
1552static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1553 size_t src_len)
1554{
1555 if (!dst)
1556 return -EINVAL;
1557
1558 if (!dst_len)
1559 return -E2BIG;
1560
1561 if (!src || !src_len) {
1562 memset(dst, 0, dst_len);
1563 return -EINVAL;
1564 }
1565
1566 memcpy(dst, src, min(dst_len, src_len));
1567
1568 if (dst_len > src_len) {
1569 memset(dst + src_len, '\0', dst_len - src_len);
1570 return src_len;
1571 }
1572
1573 dst[dst_len - 1] = '\0';
1574
1575 return -E2BIG;
1576}
1577
1578BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1579 char *, buf, size_t, buf_len)
1580{
1581 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1582}
1583
1584static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1585 .func = bpf_sysctl_get_current_value,
1586 .gpl_only = false,
1587 .ret_type = RET_INTEGER,
1588 .arg1_type = ARG_PTR_TO_CTX,
1589 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1590 .arg3_type = ARG_CONST_SIZE,
1591};
1592
1593BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1594 size_t, buf_len)
1595{
1596 if (!ctx->write) {
1597 if (buf && buf_len)
1598 memset(buf, '\0', buf_len);
1599 return -EINVAL;
1600 }
1601 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1602}
1603
1604static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1605 .func = bpf_sysctl_get_new_value,
1606 .gpl_only = false,
1607 .ret_type = RET_INTEGER,
1608 .arg1_type = ARG_PTR_TO_CTX,
1609 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1610 .arg3_type = ARG_CONST_SIZE,
1611};
1612
1613BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1614 const char *, buf, size_t, buf_len)
1615{
1616 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1617 return -EINVAL;
1618
1619 if (buf_len > PAGE_SIZE - 1)
1620 return -E2BIG;
1621
1622 memcpy(ctx->new_val, buf, buf_len);
1623 ctx->new_len = buf_len;
1624 ctx->new_updated = 1;
1625
1626 return 0;
1627}
1628
1629static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1630 .func = bpf_sysctl_set_new_value,
1631 .gpl_only = false,
1632 .ret_type = RET_INTEGER,
1633 .arg1_type = ARG_PTR_TO_CTX,
1634 .arg2_type = ARG_PTR_TO_MEM,
1635 .arg3_type = ARG_CONST_SIZE,
1636};
1637
1638static const struct bpf_func_proto *
1639sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1640{
1641 switch (func_id) {
1642 case BPF_FUNC_strtol:
1643 return &bpf_strtol_proto;
1644 case BPF_FUNC_strtoul:
1645 return &bpf_strtoul_proto;
1646 case BPF_FUNC_sysctl_get_name:
1647 return &bpf_sysctl_get_name_proto;
1648 case BPF_FUNC_sysctl_get_current_value:
1649 return &bpf_sysctl_get_current_value_proto;
1650 case BPF_FUNC_sysctl_get_new_value:
1651 return &bpf_sysctl_get_new_value_proto;
1652 case BPF_FUNC_sysctl_set_new_value:
1653 return &bpf_sysctl_set_new_value_proto;
1654 default:
1655 return cgroup_base_func_proto(func_id, prog);
1656 }
1657}
1658
1659static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1660 const struct bpf_prog *prog,
1661 struct bpf_insn_access_aux *info)
1662{
1663 const int size_default = sizeof(__u32);
1664
1665 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1666 return false;
1667
1668 switch (off) {
1669 case bpf_ctx_range(struct bpf_sysctl, write):
1670 if (type != BPF_READ)
1671 return false;
1672 bpf_ctx_record_field_size(info, size_default);
1673 return bpf_ctx_narrow_access_ok(off, size, size_default);
1674 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1675 if (type == BPF_READ) {
1676 bpf_ctx_record_field_size(info, size_default);
1677 return bpf_ctx_narrow_access_ok(off, size, size_default);
1678 } else {
1679 return size == size_default;
1680 }
1681 default:
1682 return false;
1683 }
1684}
1685
1686static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1687 const struct bpf_insn *si,
1688 struct bpf_insn *insn_buf,
1689 struct bpf_prog *prog, u32 *target_size)
1690{
1691 struct bpf_insn *insn = insn_buf;
1692 u32 read_size;
1693
1694 switch (si->off) {
1695 case offsetof(struct bpf_sysctl, write):
1696 *insn++ = BPF_LDX_MEM(
1697 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1698 bpf_target_off(struct bpf_sysctl_kern, write,
1699 sizeof_field(struct bpf_sysctl_kern,
1700 write),
1701 target_size));
1702 break;
1703 case offsetof(struct bpf_sysctl, file_pos):
1704 /* ppos is a pointer so it should be accessed via indirect
1705 * loads and stores. Also for stores additional temporary
1706 * register is used since neither src_reg nor dst_reg can be
1707 * overridden.
1708 */
1709 if (type == BPF_WRITE) {
1710 int treg = BPF_REG_9;
1711
1712 if (si->src_reg == treg || si->dst_reg == treg)
1713 --treg;
1714 if (si->src_reg == treg || si->dst_reg == treg)
1715 --treg;
1716 *insn++ = BPF_STX_MEM(
1717 BPF_DW, si->dst_reg, treg,
1718 offsetof(struct bpf_sysctl_kern, tmp_reg));
1719 *insn++ = BPF_LDX_MEM(
1720 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1721 treg, si->dst_reg,
1722 offsetof(struct bpf_sysctl_kern, ppos));
1723 *insn++ = BPF_STX_MEM(
1724 BPF_SIZEOF(u32), treg, si->src_reg,
1725 bpf_ctx_narrow_access_offset(
1726 0, sizeof(u32), sizeof(loff_t)));
1727 *insn++ = BPF_LDX_MEM(
1728 BPF_DW, treg, si->dst_reg,
1729 offsetof(struct bpf_sysctl_kern, tmp_reg));
1730 } else {
1731 *insn++ = BPF_LDX_MEM(
1732 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1733 si->dst_reg, si->src_reg,
1734 offsetof(struct bpf_sysctl_kern, ppos));
1735 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1736 *insn++ = BPF_LDX_MEM(
1737 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1738 bpf_ctx_narrow_access_offset(
1739 0, read_size, sizeof(loff_t)));
1740 }
1741 *target_size = sizeof(u32);
1742 break;
1743 }
1744
1745 return insn - insn_buf;
1746}
1747
1748const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1749 .get_func_proto = sysctl_func_proto,
1750 .is_valid_access = sysctl_is_valid_access,
1751 .convert_ctx_access = sysctl_convert_ctx_access,
1752};
1753
1754const struct bpf_prog_ops cg_sysctl_prog_ops = {
1755};
1756
1757static const struct bpf_func_proto *
1758cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1759{
1760 switch (func_id) {
1761#ifdef CONFIG_NET
1762 case BPF_FUNC_sk_storage_get:
1763 return &bpf_sk_storage_get_proto;
1764 case BPF_FUNC_sk_storage_delete:
1765 return &bpf_sk_storage_delete_proto;
1766#endif
1767#ifdef CONFIG_INET
1768 case BPF_FUNC_tcp_sock:
1769 return &bpf_tcp_sock_proto;
1770#endif
1771 default:
1772 return cgroup_base_func_proto(func_id, prog);
1773 }
1774}
1775
1776static bool cg_sockopt_is_valid_access(int off, int size,
1777 enum bpf_access_type type,
1778 const struct bpf_prog *prog,
1779 struct bpf_insn_access_aux *info)
1780{
1781 const int size_default = sizeof(__u32);
1782
1783 if (off < 0 || off >= sizeof(struct bpf_sockopt))
1784 return false;
1785
1786 if (off % size != 0)
1787 return false;
1788
1789 if (type == BPF_WRITE) {
1790 switch (off) {
1791 case offsetof(struct bpf_sockopt, retval):
1792 if (size != size_default)
1793 return false;
1794 return prog->expected_attach_type ==
1795 BPF_CGROUP_GETSOCKOPT;
1796 case offsetof(struct bpf_sockopt, optname):
1797 fallthrough;
1798 case offsetof(struct bpf_sockopt, level):
1799 if (size != size_default)
1800 return false;
1801 return prog->expected_attach_type ==
1802 BPF_CGROUP_SETSOCKOPT;
1803 case offsetof(struct bpf_sockopt, optlen):
1804 return size == size_default;
1805 default:
1806 return false;
1807 }
1808 }
1809
1810 switch (off) {
1811 case offsetof(struct bpf_sockopt, sk):
1812 if (size != sizeof(__u64))
1813 return false;
1814 info->reg_type = PTR_TO_SOCKET;
1815 break;
1816 case offsetof(struct bpf_sockopt, optval):
1817 if (size != sizeof(__u64))
1818 return false;
1819 info->reg_type = PTR_TO_PACKET;
1820 break;
1821 case offsetof(struct bpf_sockopt, optval_end):
1822 if (size != sizeof(__u64))
1823 return false;
1824 info->reg_type = PTR_TO_PACKET_END;
1825 break;
1826 case offsetof(struct bpf_sockopt, retval):
1827 if (size != size_default)
1828 return false;
1829 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1830 default:
1831 if (size != size_default)
1832 return false;
1833 break;
1834 }
1835 return true;
1836}
1837
1838#define CG_SOCKOPT_ACCESS_FIELD(T, F) \
1839 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
1840 si->dst_reg, si->src_reg, \
1841 offsetof(struct bpf_sockopt_kern, F))
1842
1843static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1844 const struct bpf_insn *si,
1845 struct bpf_insn *insn_buf,
1846 struct bpf_prog *prog,
1847 u32 *target_size)
1848{
1849 struct bpf_insn *insn = insn_buf;
1850
1851 switch (si->off) {
1852 case offsetof(struct bpf_sockopt, sk):
1853 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
1854 break;
1855 case offsetof(struct bpf_sockopt, level):
1856 if (type == BPF_WRITE)
1857 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
1858 else
1859 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
1860 break;
1861 case offsetof(struct bpf_sockopt, optname):
1862 if (type == BPF_WRITE)
1863 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
1864 else
1865 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
1866 break;
1867 case offsetof(struct bpf_sockopt, optlen):
1868 if (type == BPF_WRITE)
1869 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
1870 else
1871 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
1872 break;
1873 case offsetof(struct bpf_sockopt, retval):
1874 if (type == BPF_WRITE)
1875 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
1876 else
1877 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
1878 break;
1879 case offsetof(struct bpf_sockopt, optval):
1880 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
1881 break;
1882 case offsetof(struct bpf_sockopt, optval_end):
1883 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
1884 break;
1885 }
1886
1887 return insn - insn_buf;
1888}
1889
1890static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
1891 bool direct_write,
1892 const struct bpf_prog *prog)
1893{
1894 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
1895 */
1896 return 0;
1897}
1898
1899const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
1900 .get_func_proto = cg_sockopt_func_proto,
1901 .is_valid_access = cg_sockopt_is_valid_access,
1902 .convert_ctx_access = cg_sockopt_convert_ctx_access,
1903 .gen_prologue = cg_sockopt_get_prologue,
1904};
1905
1906const struct bpf_prog_ops cg_sockopt_prog_ops = {
1907};