Loading...
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2018 Netronome Systems, Inc. */
3
4#include <linux/list.h>
5#include <stdlib.h>
6#include <string.h>
7
8#include "cfg.h"
9#include "main.h"
10#include "xlated_dumper.h"
11
12struct cfg {
13 struct list_head funcs;
14 int func_num;
15};
16
17struct func_node {
18 struct list_head l;
19 struct list_head bbs;
20 struct bpf_insn *start;
21 struct bpf_insn *end;
22 int idx;
23 int bb_num;
24};
25
26struct bb_node {
27 struct list_head l;
28 struct list_head e_prevs;
29 struct list_head e_succs;
30 struct bpf_insn *head;
31 struct bpf_insn *tail;
32 int idx;
33};
34
35#define EDGE_FLAG_EMPTY 0x0
36#define EDGE_FLAG_FALLTHROUGH 0x1
37#define EDGE_FLAG_JUMP 0x2
38struct edge_node {
39 struct list_head l;
40 struct bb_node *src;
41 struct bb_node *dst;
42 int flags;
43};
44
45#define ENTRY_BLOCK_INDEX 0
46#define EXIT_BLOCK_INDEX 1
47#define NUM_FIXED_BLOCKS 2
48#define func_prev(func) list_prev_entry(func, l)
49#define func_next(func) list_next_entry(func, l)
50#define bb_prev(bb) list_prev_entry(bb, l)
51#define bb_next(bb) list_next_entry(bb, l)
52#define entry_bb(func) func_first_bb(func)
53#define exit_bb(func) func_last_bb(func)
54#define cfg_first_func(cfg) \
55 list_first_entry(&cfg->funcs, struct func_node, l)
56#define cfg_last_func(cfg) \
57 list_last_entry(&cfg->funcs, struct func_node, l)
58#define func_first_bb(func) \
59 list_first_entry(&func->bbs, struct bb_node, l)
60#define func_last_bb(func) \
61 list_last_entry(&func->bbs, struct bb_node, l)
62
63static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
64{
65 struct func_node *new_func, *func;
66
67 list_for_each_entry(func, &cfg->funcs, l) {
68 if (func->start == insn)
69 return func;
70 else if (func->start > insn)
71 break;
72 }
73
74 func = func_prev(func);
75 new_func = calloc(1, sizeof(*new_func));
76 if (!new_func) {
77 p_err("OOM when allocating FUNC node");
78 return NULL;
79 }
80 new_func->start = insn;
81 new_func->idx = cfg->func_num;
82 list_add(&new_func->l, &func->l);
83 cfg->func_num++;
84
85 return new_func;
86}
87
88static struct bb_node *func_append_bb(struct func_node *func,
89 struct bpf_insn *insn)
90{
91 struct bb_node *new_bb, *bb;
92
93 list_for_each_entry(bb, &func->bbs, l) {
94 if (bb->head == insn)
95 return bb;
96 else if (bb->head > insn)
97 break;
98 }
99
100 bb = bb_prev(bb);
101 new_bb = calloc(1, sizeof(*new_bb));
102 if (!new_bb) {
103 p_err("OOM when allocating BB node");
104 return NULL;
105 }
106 new_bb->head = insn;
107 INIT_LIST_HEAD(&new_bb->e_prevs);
108 INIT_LIST_HEAD(&new_bb->e_succs);
109 list_add(&new_bb->l, &bb->l);
110
111 return new_bb;
112}
113
114static struct bb_node *func_insert_dummy_bb(struct list_head *after)
115{
116 struct bb_node *bb;
117
118 bb = calloc(1, sizeof(*bb));
119 if (!bb) {
120 p_err("OOM when allocating BB node");
121 return NULL;
122 }
123
124 INIT_LIST_HEAD(&bb->e_prevs);
125 INIT_LIST_HEAD(&bb->e_succs);
126 list_add(&bb->l, after);
127
128 return bb;
129}
130
131static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
132 struct bpf_insn *end)
133{
134 struct func_node *func, *last_func;
135
136 func = cfg_append_func(cfg, cur);
137 if (!func)
138 return true;
139
140 for (; cur < end; cur++) {
141 if (cur->code != (BPF_JMP | BPF_CALL))
142 continue;
143 if (cur->src_reg != BPF_PSEUDO_CALL)
144 continue;
145 func = cfg_append_func(cfg, cur + cur->off + 1);
146 if (!func)
147 return true;
148 }
149
150 last_func = cfg_last_func(cfg);
151 last_func->end = end - 1;
152 func = cfg_first_func(cfg);
153 list_for_each_entry_from(func, &last_func->l, l) {
154 func->end = func_next(func)->start - 1;
155 }
156
157 return false;
158}
159
160static bool is_jmp_insn(__u8 code)
161{
162 return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
163}
164
165static bool func_partition_bb_head(struct func_node *func)
166{
167 struct bpf_insn *cur, *end;
168 struct bb_node *bb;
169
170 cur = func->start;
171 end = func->end;
172 INIT_LIST_HEAD(&func->bbs);
173 bb = func_append_bb(func, cur);
174 if (!bb)
175 return true;
176
177 for (; cur <= end; cur++) {
178 if (is_jmp_insn(cur->code)) {
179 __u8 opcode = BPF_OP(cur->code);
180
181 if (opcode == BPF_EXIT || opcode == BPF_CALL)
182 continue;
183
184 bb = func_append_bb(func, cur + cur->off + 1);
185 if (!bb)
186 return true;
187
188 if (opcode != BPF_JA) {
189 bb = func_append_bb(func, cur + 1);
190 if (!bb)
191 return true;
192 }
193 }
194 }
195
196 return false;
197}
198
199static void func_partition_bb_tail(struct func_node *func)
200{
201 unsigned int bb_idx = NUM_FIXED_BLOCKS;
202 struct bb_node *bb, *last;
203
204 last = func_last_bb(func);
205 last->tail = func->end;
206 bb = func_first_bb(func);
207 list_for_each_entry_from(bb, &last->l, l) {
208 bb->tail = bb_next(bb)->head - 1;
209 bb->idx = bb_idx++;
210 }
211
212 last->idx = bb_idx++;
213 func->bb_num = bb_idx;
214}
215
216static bool func_add_special_bb(struct func_node *func)
217{
218 struct bb_node *bb;
219
220 bb = func_insert_dummy_bb(&func->bbs);
221 if (!bb)
222 return true;
223 bb->idx = ENTRY_BLOCK_INDEX;
224
225 bb = func_insert_dummy_bb(&func_last_bb(func)->l);
226 if (!bb)
227 return true;
228 bb->idx = EXIT_BLOCK_INDEX;
229
230 return false;
231}
232
233static bool func_partition_bb(struct func_node *func)
234{
235 if (func_partition_bb_head(func))
236 return true;
237
238 func_partition_bb_tail(func);
239
240 return false;
241}
242
243static struct bb_node *func_search_bb_with_head(struct func_node *func,
244 struct bpf_insn *insn)
245{
246 struct bb_node *bb;
247
248 list_for_each_entry(bb, &func->bbs, l) {
249 if (bb->head == insn)
250 return bb;
251 }
252
253 return NULL;
254}
255
256static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
257 int flags)
258{
259 struct edge_node *e;
260
261 e = calloc(1, sizeof(*e));
262 if (!e) {
263 p_err("OOM when allocating edge node");
264 return NULL;
265 }
266
267 if (src)
268 e->src = src;
269 if (dst)
270 e->dst = dst;
271
272 e->flags |= flags;
273
274 return e;
275}
276
277static bool func_add_bb_edges(struct func_node *func)
278{
279 struct bpf_insn *insn;
280 struct edge_node *e;
281 struct bb_node *bb;
282
283 bb = entry_bb(func);
284 e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
285 if (!e)
286 return true;
287 list_add_tail(&e->l, &bb->e_succs);
288
289 bb = exit_bb(func);
290 e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
291 if (!e)
292 return true;
293 list_add_tail(&e->l, &bb->e_prevs);
294
295 bb = entry_bb(func);
296 bb = bb_next(bb);
297 list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
298 e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
299 if (!e)
300 return true;
301 e->src = bb;
302
303 insn = bb->tail;
304 if (!is_jmp_insn(insn->code) ||
305 BPF_OP(insn->code) == BPF_EXIT) {
306 e->dst = bb_next(bb);
307 e->flags |= EDGE_FLAG_FALLTHROUGH;
308 list_add_tail(&e->l, &bb->e_succs);
309 continue;
310 } else if (BPF_OP(insn->code) == BPF_JA) {
311 e->dst = func_search_bb_with_head(func,
312 insn + insn->off + 1);
313 e->flags |= EDGE_FLAG_JUMP;
314 list_add_tail(&e->l, &bb->e_succs);
315 continue;
316 }
317
318 e->dst = bb_next(bb);
319 e->flags |= EDGE_FLAG_FALLTHROUGH;
320 list_add_tail(&e->l, &bb->e_succs);
321
322 e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
323 if (!e)
324 return true;
325 e->src = bb;
326 e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
327 list_add_tail(&e->l, &bb->e_succs);
328 }
329
330 return false;
331}
332
333static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
334{
335 int cnt = len / sizeof(*insn);
336 struct func_node *func;
337
338 INIT_LIST_HEAD(&cfg->funcs);
339
340 if (cfg_partition_funcs(cfg, insn, insn + cnt))
341 return true;
342
343 list_for_each_entry(func, &cfg->funcs, l) {
344 if (func_partition_bb(func) || func_add_special_bb(func))
345 return true;
346
347 if (func_add_bb_edges(func))
348 return true;
349 }
350
351 return false;
352}
353
354static void cfg_destroy(struct cfg *cfg)
355{
356 struct func_node *func, *func2;
357
358 list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
359 struct bb_node *bb, *bb2;
360
361 list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
362 struct edge_node *e, *e2;
363
364 list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
365 list_del(&e->l);
366 free(e);
367 }
368
369 list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
370 list_del(&e->l);
371 free(e);
372 }
373
374 list_del(&bb->l);
375 free(bb);
376 }
377
378 list_del(&func->l);
379 free(func);
380 }
381}
382
383static void
384draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
385 bool opcodes, bool linum)
386{
387 const char *shape;
388
389 if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
390 shape = "Mdiamond";
391 else
392 shape = "record";
393
394 printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
395 func->idx, bb->idx, shape);
396
397 if (bb->idx == ENTRY_BLOCK_INDEX) {
398 printf("ENTRY");
399 } else if (bb->idx == EXIT_BLOCK_INDEX) {
400 printf("EXIT");
401 } else {
402 unsigned int start_idx;
403 printf("{\\\n");
404 start_idx = bb->head - func->start;
405 dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
406 opcodes, linum);
407 printf("}");
408 }
409
410 printf("\"];\n\n");
411}
412
413static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
414{
415 const char *style = "\"solid,bold\"";
416 const char *color = "black";
417 int func_idx = func->idx;
418 struct edge_node *e;
419 int weight = 10;
420
421 if (list_empty(&bb->e_succs))
422 return;
423
424 list_for_each_entry(e, &bb->e_succs, l) {
425 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
426 func_idx, e->src->idx, func_idx, e->dst->idx,
427 style, color, weight);
428 printf("];\n");
429 }
430}
431
432static void
433func_output_bb_def(struct func_node *func, struct dump_data *dd,
434 bool opcodes, bool linum)
435{
436 struct bb_node *bb;
437
438 list_for_each_entry(bb, &func->bbs, l) {
439 draw_bb_node(func, bb, dd, opcodes, linum);
440 }
441}
442
443static void func_output_edges(struct func_node *func)
444{
445 int func_idx = func->idx;
446 struct bb_node *bb;
447
448 list_for_each_entry(bb, &func->bbs, l) {
449 draw_bb_succ_edges(func, bb);
450 }
451
452 /* Add an invisible edge from ENTRY to EXIT, this is to
453 * improve the graph layout.
454 */
455 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
456 func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
457}
458
459static void
460cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
461{
462 struct func_node *func;
463
464 printf("digraph \"DOT graph for eBPF program\" {\n");
465 list_for_each_entry(func, &cfg->funcs, l) {
466 printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
467 func->idx, func->idx);
468 func_output_bb_def(func, dd, opcodes, linum);
469 func_output_edges(func);
470 printf("}\n");
471 }
472 printf("}\n");
473}
474
475void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
476 bool opcodes, bool linum)
477{
478 struct bpf_insn *insn = buf;
479 struct cfg cfg;
480
481 memset(&cfg, 0, sizeof(cfg));
482 if (cfg_build(&cfg, insn, len))
483 return;
484
485 cfg_dump(&cfg, dd, opcodes, linum);
486
487 cfg_destroy(&cfg);
488}
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/*
3 * Copyright (C) 2018 Netronome Systems, Inc.
4 *
5 * This software is dual licensed under the GNU General License Version 2,
6 * June 1991 as shown in the file COPYING in the top-level directory of this
7 * source tree or the BSD 2-Clause License provided below. You have the
8 * option to license this software under the complete terms of either license.
9 *
10 * The BSD 2-Clause License:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * 2. Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/list.h>
39#include <stdlib.h>
40#include <string.h>
41
42#include "cfg.h"
43#include "main.h"
44#include "xlated_dumper.h"
45
46struct cfg {
47 struct list_head funcs;
48 int func_num;
49};
50
51struct func_node {
52 struct list_head l;
53 struct list_head bbs;
54 struct bpf_insn *start;
55 struct bpf_insn *end;
56 int idx;
57 int bb_num;
58};
59
60struct bb_node {
61 struct list_head l;
62 struct list_head e_prevs;
63 struct list_head e_succs;
64 struct bpf_insn *head;
65 struct bpf_insn *tail;
66 int idx;
67};
68
69#define EDGE_FLAG_EMPTY 0x0
70#define EDGE_FLAG_FALLTHROUGH 0x1
71#define EDGE_FLAG_JUMP 0x2
72struct edge_node {
73 struct list_head l;
74 struct bb_node *src;
75 struct bb_node *dst;
76 int flags;
77};
78
79#define ENTRY_BLOCK_INDEX 0
80#define EXIT_BLOCK_INDEX 1
81#define NUM_FIXED_BLOCKS 2
82#define func_prev(func) list_prev_entry(func, l)
83#define func_next(func) list_next_entry(func, l)
84#define bb_prev(bb) list_prev_entry(bb, l)
85#define bb_next(bb) list_next_entry(bb, l)
86#define entry_bb(func) func_first_bb(func)
87#define exit_bb(func) func_last_bb(func)
88#define cfg_first_func(cfg) \
89 list_first_entry(&cfg->funcs, struct func_node, l)
90#define cfg_last_func(cfg) \
91 list_last_entry(&cfg->funcs, struct func_node, l)
92#define func_first_bb(func) \
93 list_first_entry(&func->bbs, struct bb_node, l)
94#define func_last_bb(func) \
95 list_last_entry(&func->bbs, struct bb_node, l)
96
97static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
98{
99 struct func_node *new_func, *func;
100
101 list_for_each_entry(func, &cfg->funcs, l) {
102 if (func->start == insn)
103 return func;
104 else if (func->start > insn)
105 break;
106 }
107
108 func = func_prev(func);
109 new_func = calloc(1, sizeof(*new_func));
110 if (!new_func) {
111 p_err("OOM when allocating FUNC node");
112 return NULL;
113 }
114 new_func->start = insn;
115 new_func->idx = cfg->func_num;
116 list_add(&new_func->l, &func->l);
117 cfg->func_num++;
118
119 return new_func;
120}
121
122static struct bb_node *func_append_bb(struct func_node *func,
123 struct bpf_insn *insn)
124{
125 struct bb_node *new_bb, *bb;
126
127 list_for_each_entry(bb, &func->bbs, l) {
128 if (bb->head == insn)
129 return bb;
130 else if (bb->head > insn)
131 break;
132 }
133
134 bb = bb_prev(bb);
135 new_bb = calloc(1, sizeof(*new_bb));
136 if (!new_bb) {
137 p_err("OOM when allocating BB node");
138 return NULL;
139 }
140 new_bb->head = insn;
141 INIT_LIST_HEAD(&new_bb->e_prevs);
142 INIT_LIST_HEAD(&new_bb->e_succs);
143 list_add(&new_bb->l, &bb->l);
144
145 return new_bb;
146}
147
148static struct bb_node *func_insert_dummy_bb(struct list_head *after)
149{
150 struct bb_node *bb;
151
152 bb = calloc(1, sizeof(*bb));
153 if (!bb) {
154 p_err("OOM when allocating BB node");
155 return NULL;
156 }
157
158 INIT_LIST_HEAD(&bb->e_prevs);
159 INIT_LIST_HEAD(&bb->e_succs);
160 list_add(&bb->l, after);
161
162 return bb;
163}
164
165static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
166 struct bpf_insn *end)
167{
168 struct func_node *func, *last_func;
169
170 func = cfg_append_func(cfg, cur);
171 if (!func)
172 return true;
173
174 for (; cur < end; cur++) {
175 if (cur->code != (BPF_JMP | BPF_CALL))
176 continue;
177 if (cur->src_reg != BPF_PSEUDO_CALL)
178 continue;
179 func = cfg_append_func(cfg, cur + cur->off + 1);
180 if (!func)
181 return true;
182 }
183
184 last_func = cfg_last_func(cfg);
185 last_func->end = end - 1;
186 func = cfg_first_func(cfg);
187 list_for_each_entry_from(func, &last_func->l, l) {
188 func->end = func_next(func)->start - 1;
189 }
190
191 return false;
192}
193
194static bool func_partition_bb_head(struct func_node *func)
195{
196 struct bpf_insn *cur, *end;
197 struct bb_node *bb;
198
199 cur = func->start;
200 end = func->end;
201 INIT_LIST_HEAD(&func->bbs);
202 bb = func_append_bb(func, cur);
203 if (!bb)
204 return true;
205
206 for (; cur <= end; cur++) {
207 if (BPF_CLASS(cur->code) == BPF_JMP) {
208 u8 opcode = BPF_OP(cur->code);
209
210 if (opcode == BPF_EXIT || opcode == BPF_CALL)
211 continue;
212
213 bb = func_append_bb(func, cur + cur->off + 1);
214 if (!bb)
215 return true;
216
217 if (opcode != BPF_JA) {
218 bb = func_append_bb(func, cur + 1);
219 if (!bb)
220 return true;
221 }
222 }
223 }
224
225 return false;
226}
227
228static void func_partition_bb_tail(struct func_node *func)
229{
230 unsigned int bb_idx = NUM_FIXED_BLOCKS;
231 struct bb_node *bb, *last;
232
233 last = func_last_bb(func);
234 last->tail = func->end;
235 bb = func_first_bb(func);
236 list_for_each_entry_from(bb, &last->l, l) {
237 bb->tail = bb_next(bb)->head - 1;
238 bb->idx = bb_idx++;
239 }
240
241 last->idx = bb_idx++;
242 func->bb_num = bb_idx;
243}
244
245static bool func_add_special_bb(struct func_node *func)
246{
247 struct bb_node *bb;
248
249 bb = func_insert_dummy_bb(&func->bbs);
250 if (!bb)
251 return true;
252 bb->idx = ENTRY_BLOCK_INDEX;
253
254 bb = func_insert_dummy_bb(&func_last_bb(func)->l);
255 if (!bb)
256 return true;
257 bb->idx = EXIT_BLOCK_INDEX;
258
259 return false;
260}
261
262static bool func_partition_bb(struct func_node *func)
263{
264 if (func_partition_bb_head(func))
265 return true;
266
267 func_partition_bb_tail(func);
268
269 return false;
270}
271
272static struct bb_node *func_search_bb_with_head(struct func_node *func,
273 struct bpf_insn *insn)
274{
275 struct bb_node *bb;
276
277 list_for_each_entry(bb, &func->bbs, l) {
278 if (bb->head == insn)
279 return bb;
280 }
281
282 return NULL;
283}
284
285static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
286 int flags)
287{
288 struct edge_node *e;
289
290 e = calloc(1, sizeof(*e));
291 if (!e) {
292 p_err("OOM when allocating edge node");
293 return NULL;
294 }
295
296 if (src)
297 e->src = src;
298 if (dst)
299 e->dst = dst;
300
301 e->flags |= flags;
302
303 return e;
304}
305
306static bool func_add_bb_edges(struct func_node *func)
307{
308 struct bpf_insn *insn;
309 struct edge_node *e;
310 struct bb_node *bb;
311
312 bb = entry_bb(func);
313 e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
314 if (!e)
315 return true;
316 list_add_tail(&e->l, &bb->e_succs);
317
318 bb = exit_bb(func);
319 e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
320 if (!e)
321 return true;
322 list_add_tail(&e->l, &bb->e_prevs);
323
324 bb = entry_bb(func);
325 bb = bb_next(bb);
326 list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
327 e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
328 if (!e)
329 return true;
330 e->src = bb;
331
332 insn = bb->tail;
333 if (BPF_CLASS(insn->code) != BPF_JMP ||
334 BPF_OP(insn->code) == BPF_EXIT) {
335 e->dst = bb_next(bb);
336 e->flags |= EDGE_FLAG_FALLTHROUGH;
337 list_add_tail(&e->l, &bb->e_succs);
338 continue;
339 } else if (BPF_OP(insn->code) == BPF_JA) {
340 e->dst = func_search_bb_with_head(func,
341 insn + insn->off + 1);
342 e->flags |= EDGE_FLAG_JUMP;
343 list_add_tail(&e->l, &bb->e_succs);
344 continue;
345 }
346
347 e->dst = bb_next(bb);
348 e->flags |= EDGE_FLAG_FALLTHROUGH;
349 list_add_tail(&e->l, &bb->e_succs);
350
351 e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
352 if (!e)
353 return true;
354 e->src = bb;
355 e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
356 list_add_tail(&e->l, &bb->e_succs);
357 }
358
359 return false;
360}
361
362static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
363{
364 int cnt = len / sizeof(*insn);
365 struct func_node *func;
366
367 INIT_LIST_HEAD(&cfg->funcs);
368
369 if (cfg_partition_funcs(cfg, insn, insn + cnt))
370 return true;
371
372 list_for_each_entry(func, &cfg->funcs, l) {
373 if (func_partition_bb(func) || func_add_special_bb(func))
374 return true;
375
376 if (func_add_bb_edges(func))
377 return true;
378 }
379
380 return false;
381}
382
383static void cfg_destroy(struct cfg *cfg)
384{
385 struct func_node *func, *func2;
386
387 list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
388 struct bb_node *bb, *bb2;
389
390 list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
391 struct edge_node *e, *e2;
392
393 list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
394 list_del(&e->l);
395 free(e);
396 }
397
398 list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
399 list_del(&e->l);
400 free(e);
401 }
402
403 list_del(&bb->l);
404 free(bb);
405 }
406
407 list_del(&func->l);
408 free(func);
409 }
410}
411
412static void draw_bb_node(struct func_node *func, struct bb_node *bb)
413{
414 const char *shape;
415
416 if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
417 shape = "Mdiamond";
418 else
419 shape = "record";
420
421 printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
422 func->idx, bb->idx, shape);
423
424 if (bb->idx == ENTRY_BLOCK_INDEX) {
425 printf("ENTRY");
426 } else if (bb->idx == EXIT_BLOCK_INDEX) {
427 printf("EXIT");
428 } else {
429 unsigned int start_idx;
430 struct dump_data dd = {};
431
432 printf("{");
433 kernel_syms_load(&dd);
434 start_idx = bb->head - func->start;
435 dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
436 kernel_syms_destroy(&dd);
437 printf("}");
438 }
439
440 printf("\"];\n\n");
441}
442
443static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
444{
445 const char *style = "\"solid,bold\"";
446 const char *color = "black";
447 int func_idx = func->idx;
448 struct edge_node *e;
449 int weight = 10;
450
451 if (list_empty(&bb->e_succs))
452 return;
453
454 list_for_each_entry(e, &bb->e_succs, l) {
455 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
456 func_idx, e->src->idx, func_idx, e->dst->idx,
457 style, color, weight);
458 printf("];\n");
459 }
460}
461
462static void func_output_bb_def(struct func_node *func)
463{
464 struct bb_node *bb;
465
466 list_for_each_entry(bb, &func->bbs, l) {
467 draw_bb_node(func, bb);
468 }
469}
470
471static void func_output_edges(struct func_node *func)
472{
473 int func_idx = func->idx;
474 struct bb_node *bb;
475
476 list_for_each_entry(bb, &func->bbs, l) {
477 draw_bb_succ_edges(func, bb);
478 }
479
480 /* Add an invisible edge from ENTRY to EXIT, this is to
481 * improve the graph layout.
482 */
483 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
484 func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
485}
486
487static void cfg_dump(struct cfg *cfg)
488{
489 struct func_node *func;
490
491 printf("digraph \"DOT graph for eBPF program\" {\n");
492 list_for_each_entry(func, &cfg->funcs, l) {
493 printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
494 func->idx, func->idx);
495 func_output_bb_def(func);
496 func_output_edges(func);
497 printf("}\n");
498 }
499 printf("}\n");
500}
501
502void dump_xlated_cfg(void *buf, unsigned int len)
503{
504 struct bpf_insn *insn = buf;
505 struct cfg cfg;
506
507 memset(&cfg, 0, sizeof(cfg));
508 if (cfg_build(&cfg, insn, len))
509 return;
510
511 cfg_dump(&cfg);
512
513 cfg_destroy(&cfg);
514}