Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> #include <stdbool.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __type(key, int); __type(value, int); } perf_buf_map SEC(".maps"); #define _(P) (__builtin_preserve_access_index(P)) /* define few struct-s that bpf program needs to access */ struct callback_head { struct callback_head *next; void (*func)(struct callback_head *head); }; struct dev_ifalias { struct callback_head rcuhead; }; struct net_device /* same as kernel's struct net_device */ { int ifindex; struct dev_ifalias *ifalias; }; typedef struct { int counter; } atomic_t; typedef struct refcount_struct { atomic_t refs; } refcount_t; struct sk_buff { /* field names and sizes should match to those in the kernel */ unsigned int len, data_len; __u16 mac_len, hdr_len, queue_mapping; struct net_device *dev; /* order of the fields doesn't matter */ refcount_t users; unsigned char *data; char __pkt_type_offset[0]; char cb[48]; }; struct meta { int ifindex; __u32 cb32_0; __u8 cb8_0; }; /* TRACE_EVENT(kfree_skb, * TP_PROTO(struct sk_buff *skb, void *location), */ SEC("tp_btf/kfree_skb") int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location) { struct net_device *dev; struct callback_head *ptr; void *func; int users; unsigned char *data; unsigned short pkt_data; struct meta meta = {}; char pkt_type; __u32 *cb32; __u8 *cb8; __builtin_preserve_access_index(({ users = skb->users.refs.counter; data = skb->data; dev = skb->dev; ptr = dev->ifalias->rcuhead.next; func = ptr->func; cb8 = (__u8 *)&skb->cb; cb32 = (__u32 *)&skb->cb; })); meta.ifindex = _(dev->ifindex); meta.cb8_0 = cb8[8]; meta.cb32_0 = cb32[2]; bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset)); pkt_type &= 7; /* read eth proto */ bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12); bpf_printk("rcuhead.next %llx func %llx\n", ptr, func); bpf_printk("skb->len %d users %d pkt_type %x\n", _(skb->len), users, pkt_type); bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping)); bpf_printk("dev->ifindex %d data %llx pkt_data %x\n", meta.ifindex, data, pkt_data); bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0); if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1) /* raw tp ignores return value */ return 0; /* send first 72 byte of the packet to user space */ bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU, &meta, sizeof(meta)); return 0; } struct { bool fentry_test_ok; bool fexit_test_ok; } result = {}; SEC("fentry/eth_type_trans") int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev, unsigned short protocol) { int len, ifindex; __builtin_preserve_access_index(({ len = skb->len; ifindex = dev->ifindex; })); /* fentry sees full packet including L2 header */ if (len != 74 || ifindex != 1) return 0; result.fentry_test_ok = true; return 0; } SEC("fexit/eth_type_trans") int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, struct net_device *dev, unsigned short protocol) { int len, ifindex; __builtin_preserve_access_index(({ len = skb->len; ifindex = dev->ifindex; })); /* fexit sees packet without L2 header that eth_type_trans should have * consumed. */ if (len != 60 || protocol != bpf_htons(0x86dd) || ifindex != 1) return 0; result.fexit_test_ok = true; return 0; } |