Loading...
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2020 Facebook */
3#include <errno.h>
4#include <stdbool.h>
5#include <stdio.h>
6#include <stdlib.h>
7#include <string.h>
8#include <unistd.h>
9#include <bpf/bpf.h>
10
11#include "main.h"
12#include "skeleton/pid_iter.h"
13
14#ifdef BPFTOOL_WITHOUT_SKELETONS
15
16int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
17{
18 return -ENOTSUP;
19}
20void delete_obj_refs_table(struct obj_refs_table *table) {}
21void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {}
22void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {}
23
24#else /* BPFTOOL_WITHOUT_SKELETONS */
25
26#include "pid_iter.skel.h"
27
28static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
29{
30 struct obj_refs *refs;
31 struct obj_ref *ref;
32 void *tmp;
33 int i;
34
35 hash_for_each_possible(table->table, refs, node, e->id) {
36 if (refs->id != e->id)
37 continue;
38
39 for (i = 0; i < refs->ref_cnt; i++) {
40 if (refs->refs[i].pid == e->pid)
41 return;
42 }
43
44 tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
45 if (!tmp) {
46 p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
47 e->id, e->pid, e->comm);
48 return;
49 }
50 refs->refs = tmp;
51 ref = &refs->refs[refs->ref_cnt];
52 ref->pid = e->pid;
53 memcpy(ref->comm, e->comm, sizeof(ref->comm));
54 refs->ref_cnt++;
55
56 return;
57 }
58
59 /* new ref */
60 refs = calloc(1, sizeof(*refs));
61 if (!refs) {
62 p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
63 e->id, e->pid, e->comm);
64 return;
65 }
66
67 refs->id = e->id;
68 refs->refs = malloc(sizeof(*refs->refs));
69 if (!refs->refs) {
70 free(refs);
71 p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
72 e->id, e->pid, e->comm);
73 return;
74 }
75 ref = &refs->refs[0];
76 ref->pid = e->pid;
77 memcpy(ref->comm, e->comm, sizeof(ref->comm));
78 refs->ref_cnt = 1;
79 hash_add(table->table, &refs->node, e->id);
80}
81
82static int __printf(2, 0)
83libbpf_print_none(__maybe_unused enum libbpf_print_level level,
84 __maybe_unused const char *format,
85 __maybe_unused va_list args)
86{
87 return 0;
88}
89
90int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
91{
92 struct pid_iter_entry *e;
93 char buf[4096 / sizeof(*e) * sizeof(*e)];
94 struct pid_iter_bpf *skel;
95 int err, ret, fd = -1, i;
96 libbpf_print_fn_t default_print;
97
98 hash_init(table->table);
99 set_max_rlimit();
100
101 skel = pid_iter_bpf__open();
102 if (!skel) {
103 p_err("failed to open PID iterator skeleton");
104 return -1;
105 }
106
107 skel->rodata->obj_type = type;
108
109 /* we don't want output polluted with libbpf errors if bpf_iter is not
110 * supported
111 */
112 default_print = libbpf_set_print(libbpf_print_none);
113 err = pid_iter_bpf__load(skel);
114 libbpf_set_print(default_print);
115 if (err) {
116 /* too bad, kernel doesn't support BPF iterators yet */
117 err = 0;
118 goto out;
119 }
120 err = pid_iter_bpf__attach(skel);
121 if (err) {
122 /* if we loaded above successfully, attach has to succeed */
123 p_err("failed to attach PID iterator: %d", err);
124 goto out;
125 }
126
127 fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
128 if (fd < 0) {
129 err = -errno;
130 p_err("failed to create PID iterator session: %d", err);
131 goto out;
132 }
133
134 while (true) {
135 ret = read(fd, buf, sizeof(buf));
136 if (ret < 0) {
137 if (errno == EAGAIN)
138 continue;
139 err = -errno;
140 p_err("failed to read PID iterator output: %d", err);
141 goto out;
142 }
143 if (ret == 0)
144 break;
145 if (ret % sizeof(*e)) {
146 err = -EINVAL;
147 p_err("invalid PID iterator output format");
148 goto out;
149 }
150 ret /= sizeof(*e);
151
152 e = (void *)buf;
153 for (i = 0; i < ret; i++, e++) {
154 add_ref(table, e);
155 }
156 }
157 err = 0;
158out:
159 if (fd >= 0)
160 close(fd);
161 pid_iter_bpf__destroy(skel);
162 return err;
163}
164
165void delete_obj_refs_table(struct obj_refs_table *table)
166{
167 struct obj_refs *refs;
168 struct hlist_node *tmp;
169 unsigned int bkt;
170
171 hash_for_each_safe(table->table, bkt, tmp, refs, node) {
172 hash_del(&refs->node);
173 free(refs->refs);
174 free(refs);
175 }
176}
177
178void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
179 json_writer_t *json_writer)
180{
181 struct obj_refs *refs;
182 struct obj_ref *ref;
183 int i;
184
185 if (hash_empty(table->table))
186 return;
187
188 hash_for_each_possible(table->table, refs, node, id) {
189 if (refs->id != id)
190 continue;
191 if (refs->ref_cnt == 0)
192 break;
193
194 jsonw_name(json_writer, "pids");
195 jsonw_start_array(json_writer);
196 for (i = 0; i < refs->ref_cnt; i++) {
197 ref = &refs->refs[i];
198 jsonw_start_object(json_writer);
199 jsonw_int_field(json_writer, "pid", ref->pid);
200 jsonw_string_field(json_writer, "comm", ref->comm);
201 jsonw_end_object(json_writer);
202 }
203 jsonw_end_array(json_writer);
204 break;
205 }
206}
207
208void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix)
209{
210 struct obj_refs *refs;
211 struct obj_ref *ref;
212 int i;
213
214 if (hash_empty(table->table))
215 return;
216
217 hash_for_each_possible(table->table, refs, node, id) {
218 if (refs->id != id)
219 continue;
220 if (refs->ref_cnt == 0)
221 break;
222
223 printf("%s", prefix);
224 for (i = 0; i < refs->ref_cnt; i++) {
225 ref = &refs->refs[i];
226 printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
227 }
228 break;
229 }
230}
231
232
233#endif
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2020 Facebook */
3#include <errno.h>
4#include <linux/err.h>
5#include <stdbool.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <string.h>
9#include <unistd.h>
10
11#include <bpf/bpf.h>
12#include <bpf/hashmap.h>
13
14#include "main.h"
15#include "skeleton/pid_iter.h"
16
17#ifdef BPFTOOL_WITHOUT_SKELETONS
18
19int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
20{
21 return -ENOTSUP;
22}
23void delete_obj_refs_table(struct hashmap *map) {}
24void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
25void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
26
27#else /* BPFTOOL_WITHOUT_SKELETONS */
28
29#include "pid_iter.skel.h"
30
31static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
32{
33 struct hashmap_entry *entry;
34 struct obj_refs *refs;
35 struct obj_ref *ref;
36 int err, i;
37 void *tmp;
38
39 hashmap__for_each_key_entry(map, entry, e->id) {
40 refs = entry->pvalue;
41
42 for (i = 0; i < refs->ref_cnt; i++) {
43 if (refs->refs[i].pid == e->pid)
44 return;
45 }
46
47 tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
48 if (!tmp) {
49 p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
50 e->id, e->pid, e->comm);
51 return;
52 }
53 refs->refs = tmp;
54 ref = &refs->refs[refs->ref_cnt];
55 ref->pid = e->pid;
56 memcpy(ref->comm, e->comm, sizeof(ref->comm));
57 refs->ref_cnt++;
58
59 return;
60 }
61
62 /* new ref */
63 refs = calloc(1, sizeof(*refs));
64 if (!refs) {
65 p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
66 e->id, e->pid, e->comm);
67 return;
68 }
69
70 refs->refs = malloc(sizeof(*refs->refs));
71 if (!refs->refs) {
72 free(refs);
73 p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
74 e->id, e->pid, e->comm);
75 return;
76 }
77 ref = &refs->refs[0];
78 ref->pid = e->pid;
79 memcpy(ref->comm, e->comm, sizeof(ref->comm));
80 refs->ref_cnt = 1;
81 refs->has_bpf_cookie = e->has_bpf_cookie;
82 refs->bpf_cookie = e->bpf_cookie;
83
84 err = hashmap__append(map, e->id, refs);
85 if (err)
86 p_err("failed to append entry to hashmap for ID %u: %s",
87 e->id, strerror(errno));
88}
89
90static int __printf(2, 0)
91libbpf_print_none(__maybe_unused enum libbpf_print_level level,
92 __maybe_unused const char *format,
93 __maybe_unused va_list args)
94{
95 return 0;
96}
97
98int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
99{
100 struct pid_iter_entry *e;
101 char buf[4096 / sizeof(*e) * sizeof(*e)];
102 struct pid_iter_bpf *skel;
103 int err, ret, fd = -1, i;
104 libbpf_print_fn_t default_print;
105
106 *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
107 if (IS_ERR(*map)) {
108 p_err("failed to create hashmap for PID references");
109 return -1;
110 }
111 set_max_rlimit();
112
113 skel = pid_iter_bpf__open();
114 if (!skel) {
115 p_err("failed to open PID iterator skeleton");
116 return -1;
117 }
118
119 skel->rodata->obj_type = type;
120
121 /* we don't want output polluted with libbpf errors if bpf_iter is not
122 * supported
123 */
124 default_print = libbpf_set_print(libbpf_print_none);
125 err = pid_iter_bpf__load(skel);
126 libbpf_set_print(default_print);
127 if (err) {
128 /* too bad, kernel doesn't support BPF iterators yet */
129 err = 0;
130 goto out;
131 }
132 err = pid_iter_bpf__attach(skel);
133 if (err) {
134 /* if we loaded above successfully, attach has to succeed */
135 p_err("failed to attach PID iterator: %d", err);
136 goto out;
137 }
138
139 fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
140 if (fd < 0) {
141 err = -errno;
142 p_err("failed to create PID iterator session: %d", err);
143 goto out;
144 }
145
146 while (true) {
147 ret = read(fd, buf, sizeof(buf));
148 if (ret < 0) {
149 if (errno == EAGAIN)
150 continue;
151 err = -errno;
152 p_err("failed to read PID iterator output: %d", err);
153 goto out;
154 }
155 if (ret == 0)
156 break;
157 if (ret % sizeof(*e)) {
158 err = -EINVAL;
159 p_err("invalid PID iterator output format");
160 goto out;
161 }
162 ret /= sizeof(*e);
163
164 e = (void *)buf;
165 for (i = 0; i < ret; i++, e++) {
166 add_ref(*map, e);
167 }
168 }
169 err = 0;
170out:
171 if (fd >= 0)
172 close(fd);
173 pid_iter_bpf__destroy(skel);
174 return err;
175}
176
177void delete_obj_refs_table(struct hashmap *map)
178{
179 struct hashmap_entry *entry;
180 size_t bkt;
181
182 if (!map)
183 return;
184
185 hashmap__for_each_entry(map, entry, bkt) {
186 struct obj_refs *refs = entry->pvalue;
187
188 free(refs->refs);
189 free(refs);
190 }
191
192 hashmap__free(map);
193}
194
195void emit_obj_refs_json(struct hashmap *map, __u32 id,
196 json_writer_t *json_writer)
197{
198 struct hashmap_entry *entry;
199
200 if (hashmap__empty(map))
201 return;
202
203 hashmap__for_each_key_entry(map, entry, id) {
204 struct obj_refs *refs = entry->pvalue;
205 int i;
206
207 if (refs->ref_cnt == 0)
208 break;
209
210 if (refs->has_bpf_cookie)
211 jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie);
212
213 jsonw_name(json_writer, "pids");
214 jsonw_start_array(json_writer);
215 for (i = 0; i < refs->ref_cnt; i++) {
216 struct obj_ref *ref = &refs->refs[i];
217
218 jsonw_start_object(json_writer);
219 jsonw_int_field(json_writer, "pid", ref->pid);
220 jsonw_string_field(json_writer, "comm", ref->comm);
221 jsonw_end_object(json_writer);
222 }
223 jsonw_end_array(json_writer);
224 break;
225 }
226}
227
228void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
229{
230 struct hashmap_entry *entry;
231
232 if (hashmap__empty(map))
233 return;
234
235 hashmap__for_each_key_entry(map, entry, id) {
236 struct obj_refs *refs = entry->pvalue;
237 int i;
238
239 if (refs->ref_cnt == 0)
240 break;
241
242 if (refs->has_bpf_cookie)
243 printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie);
244
245 printf("%s", prefix);
246 for (i = 0; i < refs->ref_cnt; i++) {
247 struct obj_ref *ref = &refs->refs[i];
248
249 printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
250 }
251 break;
252 }
253}
254
255
256#endif