Loading...
1/*
2 * cls_cgroup.h Control Group Classifier
3 *
4 * Authors: Thomas Graf <tgraf@suug.ch>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _NET_CLS_CGROUP_H
14#define _NET_CLS_CGROUP_H
15
16#include <linux/cgroup.h>
17#include <linux/hardirq.h>
18#include <linux/rcupdate.h>
19
20#ifdef CONFIG_CGROUPS
21struct cgroup_cls_state
22{
23 struct cgroup_subsys_state css;
24 u32 classid;
25};
26
27#ifdef CONFIG_NET_CLS_CGROUP
28static inline u32 task_cls_classid(struct task_struct *p)
29{
30 int classid;
31
32 if (in_interrupt())
33 return 0;
34
35 rcu_read_lock();
36 classid = container_of(task_subsys_state(p, net_cls_subsys_id),
37 struct cgroup_cls_state, css)->classid;
38 rcu_read_unlock();
39
40 return classid;
41}
42#else
43extern int net_cls_subsys_id;
44
45static inline u32 task_cls_classid(struct task_struct *p)
46{
47 int id;
48 u32 classid = 0;
49
50 if (in_interrupt())
51 return 0;
52
53 rcu_read_lock();
54 id = rcu_dereference_index_check(net_cls_subsys_id,
55 rcu_read_lock_held());
56 if (id >= 0)
57 classid = container_of(task_subsys_state(p, id),
58 struct cgroup_cls_state, css)->classid;
59 rcu_read_unlock();
60
61 return classid;
62}
63#endif
64#else
65static inline u32 task_cls_classid(struct task_struct *p)
66{
67 return 0;
68}
69#endif
70#endif /* _NET_CLS_CGROUP_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * cls_cgroup.h Control Group Classifier
4 *
5 * Authors: Thomas Graf <tgraf@suug.ch>
6 */
7
8#ifndef _NET_CLS_CGROUP_H
9#define _NET_CLS_CGROUP_H
10
11#include <linux/cgroup.h>
12#include <linux/hardirq.h>
13#include <linux/rcupdate.h>
14#include <net/sock.h>
15#include <net/inet_sock.h>
16
17#ifdef CONFIG_CGROUP_NET_CLASSID
18struct cgroup_cls_state {
19 struct cgroup_subsys_state css;
20 u32 classid;
21};
22
23struct cgroup_cls_state *task_cls_state(struct task_struct *p);
24
25static inline u32 task_cls_classid(struct task_struct *p)
26{
27 u32 classid;
28
29 if (in_interrupt())
30 return 0;
31
32 rcu_read_lock();
33 classid = container_of(task_css(p, net_cls_cgrp_id),
34 struct cgroup_cls_state, css)->classid;
35 rcu_read_unlock();
36
37 return classid;
38}
39
40static inline void sock_update_classid(struct sock_cgroup_data *skcd)
41{
42 u32 classid;
43
44 classid = task_cls_classid(current);
45 sock_cgroup_set_classid(skcd, classid);
46}
47
48static inline u32 __task_get_classid(struct task_struct *task)
49{
50 return task_cls_state(task)->classid;
51}
52
53static inline u32 task_get_classid(const struct sk_buff *skb)
54{
55 u32 classid = __task_get_classid(current);
56
57 /* Due to the nature of the classifier it is required to ignore all
58 * packets originating from softirq context as accessing `current'
59 * would lead to false results.
60 *
61 * This test assumes that all callers of dev_queue_xmit() explicitly
62 * disable bh. Knowing this, it is possible to detect softirq based
63 * calls by looking at the number of nested bh disable calls because
64 * softirqs always disables bh.
65 */
66 if (in_serving_softirq()) {
67 struct sock *sk = skb_to_full_sk(skb);
68
69 /* If there is an sock_cgroup_classid we'll use that. */
70 if (!sk || !sk_fullsock(sk))
71 return 0;
72
73 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
74 }
75
76 return classid;
77}
78#else /* !CONFIG_CGROUP_NET_CLASSID */
79static inline void sock_update_classid(struct sock_cgroup_data *skcd)
80{
81}
82
83static inline u32 task_get_classid(const struct sk_buff *skb)
84{
85 return 0;
86}
87#endif /* CONFIG_CGROUP_NET_CLASSID */
88#endif /* _NET_CLS_CGROUP_H */