Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2/* Copyright (c) 2019 Netronome Systems, Inc. */
  3
  4#include <errno.h>
  5#include <fcntl.h>
  6#include <string.h>
  7#include <stdlib.h>
  8#include <unistd.h>
  9#include <net/if.h>
 10#include <sys/utsname.h>
 11
 12#include <linux/btf.h>
 13#include <linux/filter.h>
 14#include <linux/kernel.h>
 15
 16#include "bpf.h"
 17#include "libbpf.h"
 18#include "libbpf_internal.h"
 19
 20/* make sure libbpf doesn't use kernel-only integer typedefs */
 21#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
 22
 23static bool grep(const char *buffer, const char *pattern)
 24{
 25	return !!strstr(buffer, pattern);
 26}
 27
 28static int get_vendor_id(int ifindex)
 29{
 30	char ifname[IF_NAMESIZE], path[64], buf[8];
 31	ssize_t len;
 32	int fd;
 33
 34	if (!if_indextoname(ifindex, ifname))
 35		return -1;
 36
 37	snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
 38
 39	fd = open(path, O_RDONLY);
 40	if (fd < 0)
 41		return -1;
 42
 43	len = read(fd, buf, sizeof(buf));
 44	close(fd);
 45	if (len < 0)
 46		return -1;
 47	if (len >= (ssize_t)sizeof(buf))
 48		return -1;
 49	buf[len] = '\0';
 50
 51	return strtol(buf, NULL, 0);
 52}
 53
 54static int get_kernel_version(void)
 55{
 56	int version, subversion, patchlevel;
 57	struct utsname utsn;
 58
 59	/* Return 0 on failure, and attempt to probe with empty kversion */
 60	if (uname(&utsn))
 61		return 0;
 62
 63	if (sscanf(utsn.release, "%d.%d.%d",
 64		   &version, &subversion, &patchlevel) != 3)
 65		return 0;
 66
 67	return (version << 16) + (subversion << 8) + patchlevel;
 68}
 69
 70static void
 71probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
 72	   size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
 73{
 74	struct bpf_load_program_attr xattr = {};
 75	int fd;
 76
 77	switch (prog_type) {
 78	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
 79		xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
 80		break;
 81	case BPF_PROG_TYPE_SK_LOOKUP:
 82		xattr.expected_attach_type = BPF_SK_LOOKUP;
 83		break;
 84	case BPF_PROG_TYPE_KPROBE:
 85		xattr.kern_version = get_kernel_version();
 86		break;
 87	case BPF_PROG_TYPE_UNSPEC:
 88	case BPF_PROG_TYPE_SOCKET_FILTER:
 89	case BPF_PROG_TYPE_SCHED_CLS:
 90	case BPF_PROG_TYPE_SCHED_ACT:
 91	case BPF_PROG_TYPE_TRACEPOINT:
 92	case BPF_PROG_TYPE_XDP:
 93	case BPF_PROG_TYPE_PERF_EVENT:
 94	case BPF_PROG_TYPE_CGROUP_SKB:
 95	case BPF_PROG_TYPE_CGROUP_SOCK:
 96	case BPF_PROG_TYPE_LWT_IN:
 97	case BPF_PROG_TYPE_LWT_OUT:
 98	case BPF_PROG_TYPE_LWT_XMIT:
 99	case BPF_PROG_TYPE_SOCK_OPS:
100	case BPF_PROG_TYPE_SK_SKB:
101	case BPF_PROG_TYPE_CGROUP_DEVICE:
102	case BPF_PROG_TYPE_SK_MSG:
103	case BPF_PROG_TYPE_RAW_TRACEPOINT:
104	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
105	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
106	case BPF_PROG_TYPE_LIRC_MODE2:
107	case BPF_PROG_TYPE_SK_REUSEPORT:
108	case BPF_PROG_TYPE_FLOW_DISSECTOR:
109	case BPF_PROG_TYPE_CGROUP_SYSCTL:
110	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
111	case BPF_PROG_TYPE_TRACING:
112	case BPF_PROG_TYPE_STRUCT_OPS:
113	case BPF_PROG_TYPE_EXT:
114	case BPF_PROG_TYPE_LSM:
115	default:
116		break;
117	}
118
119	xattr.prog_type = prog_type;
120	xattr.insns = insns;
121	xattr.insns_cnt = insns_cnt;
122	xattr.license = "GPL";
123	xattr.prog_ifindex = ifindex;
124
125	fd = bpf_load_program_xattr(&xattr, buf, buf_len);
126	if (fd >= 0)
127		close(fd);
128}
129
130bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
131{
132	struct bpf_insn insns[2] = {
133		BPF_MOV64_IMM(BPF_REG_0, 0),
134		BPF_EXIT_INSN()
135	};
136
137	if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
138		/* nfp returns -EINVAL on exit(0) with TC offload */
139		insns[0].imm = 2;
140
141	errno = 0;
142	probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
143
144	return errno != EINVAL && errno != EOPNOTSUPP;
145}
146
147int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
148			 const char *str_sec, size_t str_len)
149{
150	struct btf_header hdr = {
151		.magic = BTF_MAGIC,
152		.version = BTF_VERSION,
153		.hdr_len = sizeof(struct btf_header),
154		.type_len = types_len,
155		.str_off = types_len,
156		.str_len = str_len,
157	};
158	int btf_fd, btf_len;
159	__u8 *raw_btf;
160
161	btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
162	raw_btf = malloc(btf_len);
163	if (!raw_btf)
164		return -ENOMEM;
165
166	memcpy(raw_btf, &hdr, sizeof(hdr));
167	memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
168	memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
169
170	btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
171
172	free(raw_btf);
173	return btf_fd;
174}
175
176static int load_sk_storage_btf(void)
177{
178	const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
179	/* struct bpf_spin_lock {
180	 *   int val;
181	 * };
182	 * struct val {
183	 *   int cnt;
184	 *   struct bpf_spin_lock l;
185	 * };
186	 */
187	__u32 types[] = {
188		/* int */
189		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
190		/* struct bpf_spin_lock */                      /* [2] */
191		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
192		BTF_MEMBER_ENC(15, 1, 0), /* int val; */
193		/* struct val */                                /* [3] */
194		BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
195		BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
196		BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
197	};
198
199	return libbpf__load_raw_btf((char *)types, sizeof(types),
200				     strs, sizeof(strs));
201}
202
203bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
204{
205	int key_size, value_size, max_entries, map_flags;
206	__u32 btf_key_type_id = 0, btf_value_type_id = 0;
207	struct bpf_create_map_attr attr = {};
208	int fd = -1, btf_fd = -1, fd_inner;
209
210	key_size	= sizeof(__u32);
211	value_size	= sizeof(__u32);
212	max_entries	= 1;
213	map_flags	= 0;
214
215	switch (map_type) {
216	case BPF_MAP_TYPE_STACK_TRACE:
217		value_size	= sizeof(__u64);
218		break;
219	case BPF_MAP_TYPE_LPM_TRIE:
220		key_size	= sizeof(__u64);
221		value_size	= sizeof(__u64);
222		map_flags	= BPF_F_NO_PREALLOC;
223		break;
224	case BPF_MAP_TYPE_CGROUP_STORAGE:
225	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
226		key_size	= sizeof(struct bpf_cgroup_storage_key);
227		value_size	= sizeof(__u64);
228		max_entries	= 0;
229		break;
230	case BPF_MAP_TYPE_QUEUE:
231	case BPF_MAP_TYPE_STACK:
232		key_size	= 0;
233		break;
234	case BPF_MAP_TYPE_SK_STORAGE:
235		btf_key_type_id = 1;
236		btf_value_type_id = 3;
237		value_size = 8;
238		max_entries = 0;
239		map_flags = BPF_F_NO_PREALLOC;
240		btf_fd = load_sk_storage_btf();
241		if (btf_fd < 0)
242			return false;
243		break;
244	case BPF_MAP_TYPE_RINGBUF:
245		key_size = 0;
246		value_size = 0;
247		max_entries = 4096;
248		break;
249	case BPF_MAP_TYPE_UNSPEC:
250	case BPF_MAP_TYPE_HASH:
251	case BPF_MAP_TYPE_ARRAY:
252	case BPF_MAP_TYPE_PROG_ARRAY:
253	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
254	case BPF_MAP_TYPE_PERCPU_HASH:
255	case BPF_MAP_TYPE_PERCPU_ARRAY:
256	case BPF_MAP_TYPE_CGROUP_ARRAY:
257	case BPF_MAP_TYPE_LRU_HASH:
258	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
259	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
260	case BPF_MAP_TYPE_HASH_OF_MAPS:
261	case BPF_MAP_TYPE_DEVMAP:
262	case BPF_MAP_TYPE_DEVMAP_HASH:
263	case BPF_MAP_TYPE_SOCKMAP:
264	case BPF_MAP_TYPE_CPUMAP:
265	case BPF_MAP_TYPE_XSKMAP:
266	case BPF_MAP_TYPE_SOCKHASH:
267	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
268	case BPF_MAP_TYPE_STRUCT_OPS:
269	default:
270		break;
271	}
272
273	if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
274	    map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
275		/* TODO: probe for device, once libbpf has a function to create
276		 * map-in-map for offload
277		 */
278		if (ifindex)
279			return false;
280
281		fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
282					  sizeof(__u32), sizeof(__u32), 1, 0);
283		if (fd_inner < 0)
284			return false;
285		fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
286					   fd_inner, 1, 0);
287		close(fd_inner);
288	} else {
289		/* Note: No other restriction on map type probes for offload */
290		attr.map_type = map_type;
291		attr.key_size = key_size;
292		attr.value_size = value_size;
293		attr.max_entries = max_entries;
294		attr.map_flags = map_flags;
295		attr.map_ifindex = ifindex;
296		if (btf_fd >= 0) {
297			attr.btf_fd = btf_fd;
298			attr.btf_key_type_id = btf_key_type_id;
299			attr.btf_value_type_id = btf_value_type_id;
300		}
301
302		fd = bpf_create_map_xattr(&attr);
303	}
304	if (fd >= 0)
305		close(fd);
306	if (btf_fd >= 0)
307		close(btf_fd);
308
309	return fd >= 0;
310}
311
312bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
313		      __u32 ifindex)
314{
315	struct bpf_insn insns[2] = {
316		BPF_EMIT_CALL(id),
317		BPF_EXIT_INSN()
318	};
319	char buf[4096] = {};
320	bool res;
321
322	probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
323		   ifindex);
324	res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
325
326	if (ifindex) {
327		switch (get_vendor_id(ifindex)) {
328		case 0x19ee: /* Netronome specific */
329			res = res && !grep(buf, "not supported by FW") &&
330				!grep(buf, "unsupported function id");
331			break;
332		default:
333			break;
334		}
335	}
336
337	return res;
338}
339
340/*
341 * Probe for availability of kernel commit (5.3):
342 *
343 * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
344 */
345bool bpf_probe_large_insn_limit(__u32 ifindex)
346{
347	struct bpf_insn insns[BPF_MAXINSNS + 1];
348	int i;
349
350	for (i = 0; i < BPF_MAXINSNS; i++)
351		insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
352	insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
353
354	errno = 0;
355	probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
356		   ifindex);
357
358	return errno != E2BIG && errno != EINVAL;
359}