Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2019 Facebook */
   3
   4#ifndef _GNU_SOURCE
   5#define _GNU_SOURCE
   6#endif
   7#include <ctype.h>
   8#include <errno.h>
   9#include <fcntl.h>
 
  10#include <linux/err.h>
  11#include <stdbool.h>
  12#include <stdio.h>
  13#include <string.h>
  14#include <unistd.h>
  15#include <bpf/bpf.h>
  16#include <bpf/libbpf.h>
 
  17#include <sys/types.h>
  18#include <sys/stat.h>
  19#include <sys/mman.h>
  20#include <bpf/btf.h>
  21#include <bpf/bpf_gen_internal.h>
  22
  23#include "json_writer.h"
  24#include "main.h"
  25
  26#define MAX_OBJ_NAME_LEN 64
  27
  28static void sanitize_identifier(char *name)
  29{
  30	int i;
  31
  32	for (i = 0; name[i]; i++)
  33		if (!isalnum(name[i]) && name[i] != '_')
  34			name[i] = '_';
  35}
  36
 
 
 
 
 
  37static bool str_has_suffix(const char *str, const char *suffix)
  38{
  39	size_t i, n1 = strlen(str), n2 = strlen(suffix);
  40
  41	if (n1 < n2)
  42		return false;
  43
  44	for (i = 0; i < n2; i++) {
  45		if (str[n1 - i - 1] != suffix[n2 - i - 1])
  46			return false;
  47	}
  48
  49	return true;
  50}
  51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52static void get_obj_name(char *name, const char *file)
  53{
  54	/* Using basename() GNU version which doesn't modify arg. */
  55	strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
  56	name[MAX_OBJ_NAME_LEN - 1] = '\0';
 
 
  57	if (str_has_suffix(name, ".o"))
  58		name[strlen(name) - 2] = '\0';
  59	sanitize_identifier(name);
  60}
  61
  62static void get_header_guard(char *guard, const char *obj_name)
  63{
  64	int i;
  65
  66	sprintf(guard, "__%s_SKEL_H__", obj_name);
  67	for (i = 0; guard[i]; i++)
  68		guard[i] = toupper(guard[i]);
  69}
  70
  71static const char *get_map_ident(const struct bpf_map *map)
  72{
 
  73	const char *name = bpf_map__name(map);
 
 
 
 
 
 
  74
  75	if (!bpf_map__is_internal(map))
  76		return name;
  77
  78	if (str_has_suffix(name, ".data"))
  79		return "data";
  80	else if (str_has_suffix(name, ".rodata"))
  81		return "rodata";
  82	else if (str_has_suffix(name, ".bss"))
  83		return "bss";
  84	else if (str_has_suffix(name, ".kconfig"))
  85		return "kconfig";
  86	else
  87		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88}
  89
  90static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
  91{
  92	vprintf(fmt, args);
  93}
  94
  95static int codegen_datasec_def(struct bpf_object *obj,
  96			       struct btf *btf,
  97			       struct btf_dump *d,
  98			       const struct btf_type *sec,
  99			       const char *obj_name)
 100{
 101	const char *sec_name = btf__name_by_offset(btf, sec->name_off);
 102	const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
 103	int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
 104	const char *sec_ident;
 105	char var_ident[256];
 106	bool strip_mods = false;
 107
 108	if (strcmp(sec_name, ".data") == 0) {
 109		sec_ident = "data";
 110		strip_mods = true;
 111	} else if (strcmp(sec_name, ".bss") == 0) {
 112		sec_ident = "bss";
 113		strip_mods = true;
 114	} else if (strcmp(sec_name, ".rodata") == 0) {
 115		sec_ident = "rodata";
 116		strip_mods = true;
 117	} else if (strcmp(sec_name, ".kconfig") == 0) {
 118		sec_ident = "kconfig";
 119	} else {
 120		return 0;
 121	}
 
 
 122
 123	printf("	struct %s__%s {\n", obj_name, sec_ident);
 124	for (i = 0; i < vlen; i++, sec_var++) {
 125		const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
 126		const char *var_name = btf__name_by_offset(btf, var->name_off);
 127		DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
 128			.field_name = var_ident,
 129			.indent_level = 2,
 130			.strip_mods = strip_mods,
 131		);
 132		int need_off = sec_var->offset, align_off, align;
 133		__u32 var_type_id = var->type;
 134
 135		/* static variables are not exposed through BPF skeleton */
 136		if (btf_var(var)->linkage == BTF_VAR_STATIC)
 137			continue;
 138
 139		if (off > need_off) {
 140			p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
 141			      sec_name, i, need_off, off);
 142			return -EINVAL;
 143		}
 144
 145		align = btf__align_of(btf, var->type);
 146		if (align <= 0) {
 147			p_err("Failed to determine alignment of variable '%s': %d",
 148			      var_name, align);
 149			return -EINVAL;
 150		}
 151		/* Assume 32-bit architectures when generating data section
 152		 * struct memory layout. Given bpftool can't know which target
 153		 * host architecture it's emitting skeleton for, we need to be
 154		 * conservative and assume 32-bit one to ensure enough padding
 155		 * bytes are generated for pointer and long types. This will
 156		 * still work correctly for 64-bit architectures, because in
 157		 * the worst case we'll generate unnecessary padding field,
 158		 * which on 64-bit architectures is not strictly necessary and
 159		 * would be handled by natural 8-byte alignment. But it still
 160		 * will be a correct memory layout, based on recorded offsets
 161		 * in BTF.
 162		 */
 163		if (align > 4)
 164			align = 4;
 165
 166		align_off = (off + align - 1) / align * align;
 167		if (align_off != need_off) {
 168			printf("\t\tchar __pad%d[%d];\n",
 169			       pad_cnt, need_off - off);
 170			pad_cnt++;
 171		}
 172
 173		/* sanitize variable name, e.g., for static vars inside
 174		 * a function, it's name is '<function name>.<variable name>',
 175		 * which we'll turn into a '<function name>_<variable name>'
 176		 */
 177		var_ident[0] = '\0';
 178		strncat(var_ident, var_name, sizeof(var_ident) - 1);
 179		sanitize_identifier(var_ident);
 180
 181		printf("\t\t");
 182		err = btf_dump__emit_type_decl(d, var_type_id, &opts);
 183		if (err)
 184			return err;
 185		printf(";\n");
 186
 187		off = sec_var->offset + sec_var->size;
 188	}
 189	printf("	} *%s;\n", sec_ident);
 190	return 0;
 191}
 192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
 194{
 195	struct btf *btf = bpf_object__btf(obj);
 196	int n = btf__get_nr_types(btf);
 197	struct btf_dump *d;
 198	int i, err = 0;
 
 
 
 199
 200	d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf);
 201	if (IS_ERR(d))
 202		return PTR_ERR(d);
 203
 204	for (i = 1; i <= n; i++) {
 205		const struct btf_type *t = btf__type_by_id(btf, i);
 
 
 206
 207		if (!btf_is_datasec(t))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 208			continue;
 209
 210		err = codegen_datasec_def(obj, btf, d, t, obj_name);
 211		if (err)
 212			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213	}
 
 214out:
 215	btf_dump__free(d);
 216	return err;
 217}
 218
 219static void codegen(const char *template, ...)
 220{
 221	const char *src, *end;
 222	int skip_tabs = 0, n;
 223	char *s, *dst;
 224	va_list args;
 225	char c;
 226
 227	n = strlen(template);
 228	s = malloc(n + 1);
 229	if (!s)
 230		exit(-1);
 231	src = template;
 232	dst = s;
 233
 234	/* find out "baseline" indentation to skip */
 235	while ((c = *src++)) {
 236		if (c == '\t') {
 237			skip_tabs++;
 238		} else if (c == '\n') {
 239			break;
 240		} else {
 241			p_err("unrecognized character at pos %td in template '%s'",
 242			      src - template - 1, template);
 243			free(s);
 244			exit(-1);
 245		}
 246	}
 247
 248	while (*src) {
 249		/* skip baseline indentation tabs */
 250		for (n = skip_tabs; n > 0; n--, src++) {
 251			if (*src != '\t') {
 252				p_err("not enough tabs at pos %td in template '%s'",
 253				      src - template - 1, template);
 254				free(s);
 255				exit(-1);
 256			}
 257		}
 258		/* trim trailing whitespace */
 259		end = strchrnul(src, '\n');
 260		for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
 261			;
 262		memcpy(dst, src, n);
 263		dst += n;
 264		if (*end)
 265			*dst++ = '\n';
 266		src = *end ? end + 1 : end;
 267	}
 268	*dst++ = '\0';
 269
 270	/* print out using adjusted template */
 271	va_start(args, template);
 272	n = vprintf(s, args);
 273	va_end(args);
 274
 275	free(s);
 276}
 277
 278static void print_hex(const char *data, int data_sz)
 279{
 280	int i, len;
 281
 282	for (i = 0, len = 0; i < data_sz; i++) {
 283		int w = data[i] ? 4 : 2;
 284
 285		len += w;
 286		if (len > 78) {
 287			printf("\\\n");
 288			len = w;
 289		}
 290		if (!data[i])
 291			printf("\\0");
 292		else
 293			printf("\\x%02x", (unsigned char)data[i]);
 294	}
 295}
 296
 297static size_t bpf_map_mmap_sz(const struct bpf_map *map)
 298{
 299	long page_sz = sysconf(_SC_PAGE_SIZE);
 300	size_t map_sz;
 301
 302	map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
 303	map_sz = roundup(map_sz, page_sz);
 304	return map_sz;
 305}
 306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
 308{
 309	struct bpf_program *prog;
 310
 311	bpf_object__for_each_program(prog, obj) {
 312		const char *tp_name;
 313
 314		codegen("\
 315			\n\
 316			\n\
 317			static inline int					    \n\
 318			%1$s__%2$s__attach(struct %1$s *skel)			    \n\
 319			{							    \n\
 320				int prog_fd = skel->progs.%2$s.prog_fd;		    \n\
 321			", obj_name, bpf_program__name(prog));
 322
 323		switch (bpf_program__get_type(prog)) {
 324		case BPF_PROG_TYPE_RAW_TRACEPOINT:
 325			tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
 326			printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
 327			break;
 328		case BPF_PROG_TYPE_TRACING:
 329			printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n");
 
 
 
 
 330			break;
 331		default:
 332			printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
 333			break;
 334		}
 335		codegen("\
 336			\n\
 337										    \n\
 338				if (fd > 0)					    \n\
 339					skel->links.%1$s_fd = fd;		    \n\
 340				return fd;					    \n\
 341			}							    \n\
 342			", bpf_program__name(prog));
 343	}
 344
 345	codegen("\
 346		\n\
 347									    \n\
 348		static inline int					    \n\
 349		%1$s__attach(struct %1$s *skel)				    \n\
 350		{							    \n\
 351			int ret = 0;					    \n\
 352									    \n\
 353		", obj_name);
 354
 355	bpf_object__for_each_program(prog, obj) {
 356		codegen("\
 357			\n\
 358				ret = ret < 0 ? ret : %1$s__%2$s__attach(skel);   \n\
 359			", obj_name, bpf_program__name(prog));
 360	}
 361
 362	codegen("\
 363		\n\
 364			return ret < 0 ? ret : 0;			    \n\
 365		}							    \n\
 366									    \n\
 367		static inline void					    \n\
 368		%1$s__detach(struct %1$s *skel)				    \n\
 369		{							    \n\
 370		", obj_name);
 371
 372	bpf_object__for_each_program(prog, obj) {
 373		codegen("\
 374			\n\
 375				skel_closenz(skel->links.%1$s_fd);	    \n\
 376			", bpf_program__name(prog));
 377	}
 378
 379	codegen("\
 380		\n\
 381		}							    \n\
 382		");
 383}
 384
 385static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
 386{
 387	struct bpf_program *prog;
 388	struct bpf_map *map;
 
 389
 390	codegen("\
 391		\n\
 392		static void						    \n\
 393		%1$s__destroy(struct %1$s *skel)			    \n\
 394		{							    \n\
 395			if (!skel)					    \n\
 396				return;					    \n\
 397			%1$s__detach(skel);				    \n\
 398		",
 399		obj_name);
 400
 401	bpf_object__for_each_program(prog, obj) {
 402		codegen("\
 403			\n\
 404				skel_closenz(skel->progs.%1$s.prog_fd);	    \n\
 405			", bpf_program__name(prog));
 406	}
 407
 408	bpf_object__for_each_map(map, obj) {
 409		const char * ident;
 410
 411		ident = get_map_ident(map);
 412		if (!ident)
 413			continue;
 414		if (bpf_map__is_internal(map) &&
 415		    (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
 416			printf("\tmunmap(skel->%1$s, %2$zd);\n",
 417			       ident, bpf_map_mmap_sz(map));
 418		codegen("\
 419			\n\
 420				skel_closenz(skel->maps.%1$s.map_fd);	    \n\
 421			", ident);
 422	}
 423	codegen("\
 424		\n\
 425			free(skel);					    \n\
 426		}							    \n\
 427		",
 428		obj_name);
 429}
 430
 431static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
 432{
 433	struct bpf_object_load_attr load_attr = {};
 434	DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
 435	struct bpf_map *map;
 
 436	int err = 0;
 437
 438	err = bpf_object__gen_loader(obj, &opts);
 439	if (err)
 440		return err;
 441
 442	load_attr.obj = obj;
 443	if (verifier_logs)
 444		/* log_level1 + log_level2 + stats, but not stable UAPI */
 445		load_attr.log_level = 1 + 2 + 4;
 446
 447	err = bpf_object__load_xattr(&load_attr);
 448	if (err) {
 449		p_err("failed to load object file");
 450		goto out;
 451	}
 452	/* If there was no error during load then gen_loader_opts
 453	 * are populated with the loader program.
 454	 */
 455
 456	/* finish generating 'struct skel' */
 457	codegen("\
 458		\n\
 459		};							    \n\
 460		", obj_name);
 461
 462
 463	codegen_attach_detach(obj, obj_name);
 464
 465	codegen_destroy(obj, obj_name);
 466
 467	codegen("\
 468		\n\
 469		static inline struct %1$s *				    \n\
 470		%1$s__open(void)					    \n\
 471		{							    \n\
 472			struct %1$s *skel;				    \n\
 473									    \n\
 474			skel = calloc(sizeof(*skel), 1);		    \n\
 475			if (!skel)					    \n\
 476				goto cleanup;				    \n\
 477			skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
 478		",
 479		obj_name, opts.data_sz);
 480	bpf_object__for_each_map(map, obj) {
 481		const char *ident;
 482		const void *mmap_data = NULL;
 483		size_t mmap_size = 0;
 484
 485		ident = get_map_ident(map);
 486		if (!ident)
 487			continue;
 488
 489		if (!bpf_map__is_internal(map) ||
 490		    !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
 491			continue;
 492
 493		codegen("\
 494			\n\
 495				skel->%1$s =					 \n\
 496					mmap(NULL, %2$zd, PROT_READ | PROT_WRITE,\n\
 497					     MAP_SHARED | MAP_ANONYMOUS, -1, 0); \n\
 498				if (skel->%1$s == (void *) -1)			 \n\
 499					goto cleanup;				 \n\
 500				memcpy(skel->%1$s, (void *)\"\\			 \n\
 501			", ident, bpf_map_mmap_sz(map));
 502		mmap_data = bpf_map__initial_value(map, &mmap_size);
 503		print_hex(mmap_data, mmap_size);
 504		printf("\", %2$zd);\n"
 505		       "\tskel->maps.%1$s.initial_value = (__u64)(long)skel->%1$s;\n",
 506		       ident, mmap_size);
 
 
 
 
 
 
 
 
 507	}
 508	codegen("\
 509		\n\
 510			return skel;					    \n\
 511		cleanup:						    \n\
 512			%1$s__destroy(skel);				    \n\
 513			return NULL;					    \n\
 514		}							    \n\
 515									    \n\
 516		static inline int					    \n\
 517		%1$s__load(struct %1$s *skel)				    \n\
 518		{							    \n\
 519			struct bpf_load_and_run_opts opts = {};		    \n\
 520			int err;					    \n\
 521									    \n\
 522			opts.ctx = (struct bpf_loader_ctx *)skel;	    \n\
 523			opts.data_sz = %2$d;				    \n\
 524			opts.data = (void *)\"\\			    \n\
 525		",
 526		obj_name, opts.data_sz);
 527	print_hex(opts.data, opts.data_sz);
 528	codegen("\
 529		\n\
 530		\";							    \n\
 
 531		");
 532
 533	codegen("\
 534		\n\
 535			opts.insns_sz = %d;				    \n\
 536			opts.insns = (void *)\"\\			    \n\
 537		",
 538		opts.insns_sz);
 539	print_hex(opts.insns, opts.insns_sz);
 540	codegen("\
 541		\n\
 542		\";							    \n\
 
 
 
 
 
 
 
 543			err = bpf_load_and_run(&opts);			    \n\
 544			if (err < 0)					    \n\
 545				return err;				    \n\
 546		", obj_name);
 547	bpf_object__for_each_map(map, obj) {
 548		const char *ident, *mmap_flags;
 549
 550		ident = get_map_ident(map);
 551		if (!ident)
 552			continue;
 553
 554		if (!bpf_map__is_internal(map) ||
 555		    !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
 556			continue;
 557		if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
 558			mmap_flags = "PROT_READ";
 559		else
 560			mmap_flags = "PROT_READ | PROT_WRITE";
 561
 562		printf("\tskel->%1$s =\n"
 563		       "\t\tmmap(skel->%1$s, %2$zd, %3$s, MAP_SHARED | MAP_FIXED,\n"
 564		       "\t\t\tskel->maps.%1$s.map_fd, 0);\n",
 
 
 
 
 565		       ident, bpf_map_mmap_sz(map), mmap_flags);
 566	}
 567	codegen("\
 568		\n\
 569			return 0;					    \n\
 570		}							    \n\
 571									    \n\
 572		static inline struct %1$s *				    \n\
 573		%1$s__open_and_load(void)				    \n\
 574		{							    \n\
 575			struct %1$s *skel;				    \n\
 576									    \n\
 577			skel = %1$s__open();				    \n\
 578			if (!skel)					    \n\
 579				return NULL;				    \n\
 580			if (%1$s__load(skel)) {				    \n\
 581				%1$s__destroy(skel);			    \n\
 582				return NULL;				    \n\
 583			}						    \n\
 584			return skel;					    \n\
 585		}							    \n\
 
 586		", obj_name);
 587
 
 
 588	codegen("\
 589		\n\
 590									    \n\
 591		#endif /* %s */						    \n\
 592		",
 593		header_guard);
 594	err = 0;
 595out:
 596	return err;
 597}
 598
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 599static int do_skeleton(int argc, char **argv)
 600{
 601	char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
 602	size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
 603	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
 604	char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
 605	struct bpf_object *obj = NULL;
 606	const char *file, *ident;
 
 607	struct bpf_program *prog;
 608	int fd, err = -1;
 609	struct bpf_map *map;
 610	struct btf *btf;
 611	struct stat st;
 612
 613	if (!REQ_ARGS(1)) {
 614		usage();
 615		return -1;
 616	}
 617	file = GET_ARG();
 618
 619	while (argc) {
 620		if (!REQ_ARGS(2))
 621			return -1;
 622
 623		if (is_prefix(*argv, "name")) {
 624			NEXT_ARG();
 625
 626			if (obj_name[0] != '\0') {
 627				p_err("object name already specified");
 628				return -1;
 629			}
 630
 631			strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
 632			obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
 633		} else {
 634			p_err("unknown arg %s", *argv);
 635			return -1;
 636		}
 637
 638		NEXT_ARG();
 639	}
 640
 641	if (argc) {
 642		p_err("extra unknown arguments");
 643		return -1;
 644	}
 645
 646	if (stat(file, &st)) {
 647		p_err("failed to stat() %s: %s", file, strerror(errno));
 648		return -1;
 649	}
 650	file_sz = st.st_size;
 651	mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
 652	fd = open(file, O_RDONLY);
 653	if (fd < 0) {
 654		p_err("failed to open() %s: %s", file, strerror(errno));
 655		return -1;
 656	}
 657	obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
 658	if (obj_data == MAP_FAILED) {
 659		obj_data = NULL;
 660		p_err("failed to mmap() %s: %s", file, strerror(errno));
 661		goto out;
 662	}
 663	if (obj_name[0] == '\0')
 664		get_obj_name(obj_name, file);
 665	opts.object_name = obj_name;
 
 
 
 666	obj = bpf_object__open_mem(obj_data, file_sz, &opts);
 667	if (IS_ERR(obj)) {
 668		char err_buf[256];
 669
 670		libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf));
 
 671		p_err("failed to open BPF object file: %s", err_buf);
 672		obj = NULL;
 673		goto out;
 674	}
 675
 676	bpf_object__for_each_map(map, obj) {
 677		ident = get_map_ident(map);
 678		if (!ident) {
 679			p_err("ignoring unrecognized internal map '%s'...",
 680			      bpf_map__name(map));
 681			continue;
 682		}
 683		map_cnt++;
 684	}
 685	bpf_object__for_each_program(prog, obj) {
 686		prog_cnt++;
 687	}
 688
 689	get_header_guard(header_guard, obj_name);
 690	if (use_loader) {
 691		codegen("\
 692		\n\
 693		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
 694		/* THIS FILE IS AUTOGENERATED! */			    \n\
 695		#ifndef %2$s						    \n\
 696		#define %2$s						    \n\
 697									    \n\
 698		#include <stdlib.h>					    \n\
 699		#include <bpf/bpf.h>					    \n\
 700		#include <bpf/skel_internal.h>				    \n\
 701									    \n\
 702		struct %1$s {						    \n\
 703			struct bpf_loader_ctx ctx;			    \n\
 704		",
 705		obj_name, header_guard
 706		);
 707	} else {
 708		codegen("\
 709		\n\
 710		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
 711									    \n\
 712		/* THIS FILE IS AUTOGENERATED! */			    \n\
 713		#ifndef %2$s						    \n\
 714		#define %2$s						    \n\
 715									    \n\
 716		#include <errno.h>					    \n\
 717		#include <stdlib.h>					    \n\
 718		#include <bpf/libbpf.h>					    \n\
 719									    \n\
 720		struct %1$s {						    \n\
 721			struct bpf_object_skeleton *skeleton;		    \n\
 722			struct bpf_object *obj;				    \n\
 723		",
 724		obj_name, header_guard
 725		);
 726	}
 727
 728	if (map_cnt) {
 729		printf("\tstruct {\n");
 730		bpf_object__for_each_map(map, obj) {
 731			ident = get_map_ident(map);
 732			if (!ident)
 733				continue;
 734			if (use_loader)
 735				printf("\t\tstruct bpf_map_desc %s;\n", ident);
 736			else
 737				printf("\t\tstruct bpf_map *%s;\n", ident);
 738		}
 739		printf("\t} maps;\n");
 740	}
 741
 
 
 
 
 
 742	if (prog_cnt) {
 743		printf("\tstruct {\n");
 744		bpf_object__for_each_program(prog, obj) {
 745			if (use_loader)
 746				printf("\t\tstruct bpf_prog_desc %s;\n",
 747				       bpf_program__name(prog));
 748			else
 749				printf("\t\tstruct bpf_program *%s;\n",
 750				       bpf_program__name(prog));
 751		}
 752		printf("\t} progs;\n");
 753		printf("\tstruct {\n");
 754		bpf_object__for_each_program(prog, obj) {
 755			if (use_loader)
 756				printf("\t\tint %s_fd;\n",
 757				       bpf_program__name(prog));
 758			else
 759				printf("\t\tstruct bpf_link *%s;\n",
 760				       bpf_program__name(prog));
 761		}
 762		printf("\t} links;\n");
 763	}
 764
 765	btf = bpf_object__btf(obj);
 766	if (btf) {
 767		err = codegen_datasecs(obj, obj_name);
 768		if (err)
 769			goto out;
 770	}
 771	if (use_loader) {
 772		err = gen_trace(obj, obj_name, header_guard);
 773		goto out;
 774	}
 775
 776	codegen("\
 777		\n\
 
 
 
 
 
 
 
 
 
 
 778		};							    \n\
 779									    \n\
 780		static void						    \n\
 781		%1$s__destroy(struct %1$s *obj)				    \n\
 782		{							    \n\
 783			if (!obj)					    \n\
 784				return;					    \n\
 785			if (obj->skeleton)				    \n\
 786				bpf_object__destroy_skeleton(obj->skeleton);\n\
 787			free(obj);					    \n\
 788		}							    \n\
 789									    \n\
 790		static inline int					    \n\
 791		%1$s__create_skeleton(struct %1$s *obj);		    \n\
 792									    \n\
 793		static inline struct %1$s *				    \n\
 794		%1$s__open_opts(const struct bpf_object_open_opts *opts)    \n\
 795		{							    \n\
 796			struct %1$s *obj;				    \n\
 797			int err;					    \n\
 798									    \n\
 799			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\
 800			if (!obj) {					    \n\
 801				errno = ENOMEM;				    \n\
 802				return NULL;				    \n\
 803			}						    \n\
 804									    \n\
 805			err = %1$s__create_skeleton(obj);		    \n\
 806			err = err ?: bpf_object__open_skeleton(obj->skeleton, opts);\n\
 807			if (err)					    \n\
 808				goto err_out;				    \n\
 809									    \n\
 
 
 
 
 
 
 
 
 
 
 810			return obj;					    \n\
 811		err_out:						    \n\
 812			%1$s__destroy(obj);				    \n\
 813			errno = -err;					    \n\
 814			return NULL;					    \n\
 815		}							    \n\
 816									    \n\
 817		static inline struct %1$s *				    \n\
 818		%1$s__open(void)					    \n\
 819		{							    \n\
 820			return %1$s__open_opts(NULL);			    \n\
 821		}							    \n\
 822									    \n\
 823		static inline int					    \n\
 824		%1$s__load(struct %1$s *obj)				    \n\
 825		{							    \n\
 826			return bpf_object__load_skeleton(obj->skeleton);    \n\
 827		}							    \n\
 828									    \n\
 829		static inline struct %1$s *				    \n\
 830		%1$s__open_and_load(void)				    \n\
 831		{							    \n\
 832			struct %1$s *obj;				    \n\
 833			int err;					    \n\
 834									    \n\
 835			obj = %1$s__open();				    \n\
 836			if (!obj)					    \n\
 837				return NULL;				    \n\
 838			err = %1$s__load(obj);				    \n\
 839			if (err) {					    \n\
 840				%1$s__destroy(obj);			    \n\
 841				errno = -err;				    \n\
 842				return NULL;				    \n\
 843			}						    \n\
 844			return obj;					    \n\
 845		}							    \n\
 846									    \n\
 847		static inline int					    \n\
 848		%1$s__attach(struct %1$s *obj)				    \n\
 849		{							    \n\
 850			return bpf_object__attach_skeleton(obj->skeleton);  \n\
 851		}							    \n\
 852									    \n\
 853		static inline void					    \n\
 854		%1$s__detach(struct %1$s *obj)				    \n\
 855		{							    \n\
 856			return bpf_object__detach_skeleton(obj->skeleton);  \n\
 857		}							    \n\
 858		",
 859		obj_name
 860	);
 861
 862	codegen("\
 863		\n\
 864									    \n\
 
 
 865		static inline int					    \n\
 866		%1$s__create_skeleton(struct %1$s *obj)			    \n\
 867		{							    \n\
 868			struct bpf_object_skeleton *s;			    \n\
 
 869									    \n\
 870			s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
 871			if (!s)						    \n\
 
 872				goto err;				    \n\
 873			obj->skeleton = s;				    \n\
 874									    \n\
 875			s->sz = sizeof(*s);				    \n\
 876			s->name = \"%1$s\";				    \n\
 877			s->obj = &obj->obj;				    \n\
 878		",
 879		obj_name
 880	);
 881	if (map_cnt) {
 882		codegen("\
 883			\n\
 
 
 
 884									    \n\
 885				/* maps */				    \n\
 886				s->map_cnt = %zu;			    \n\
 887				s->map_skel_sz = sizeof(*s->maps);	    \n\
 888				s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
 889				if (!s->maps)				    \n\
 890					goto err;			    \n\
 891			",
 892			map_cnt
 893		);
 894		i = 0;
 895		bpf_object__for_each_map(map, obj) {
 896			ident = get_map_ident(map);
 
 
 
 897
 898			if (!ident)
 899				continue;
 900
 901			codegen("\
 902				\n\
 
 903									    \n\
 904					s->maps[%zu].name = \"%s\";	    \n\
 905					s->maps[%zu].map = &obj->maps.%s;   \n\
 906				",
 907				i, bpf_map__name(map), i, ident);
 908			/* memory-mapped internal maps */
 909			if (bpf_map__is_internal(map) &&
 910			    (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) {
 911				printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
 912				       i, ident);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913			}
 914			i++;
 
 
 
 
 
 915		}
 
 
 916	}
 917	if (prog_cnt) {
 918		codegen("\
 919			\n\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920									    \n\
 921				/* programs */				    \n\
 922				s->prog_cnt = %zu;			    \n\
 923				s->prog_skel_sz = sizeof(*s->progs);	    \n\
 924				s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
 925				if (!s->progs)				    \n\
 926					goto err;			    \n\
 927			",
 928			prog_cnt
 929		);
 930		i = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931		bpf_object__for_each_program(prog, obj) {
 932			codegen("\
 933				\n\
 934									    \n\
 935					s->progs[%1$zu].name = \"%2$s\";    \n\
 936					s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
 937					s->progs[%1$zu].link = &obj->links.%2$s;\n\
 938				",
 939				i, bpf_program__name(prog));
 940			i++;
 941		}
 
 942	}
 
 
 
 
 
 
 943	codegen("\
 944		\n\
 945									    \n\
 946			s->data_sz = %d;				    \n\
 947			s->data = (void *)\"\\				    \n\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 948		",
 949		file_sz);
 
 950
 951	/* embed contents of BPF object file */
 952	print_hex(obj_data, file_sz);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953
 954	codegen("\
 955		\n\
 956		\";							    \n\
 957									    \n\
 958			return 0;					    \n\
 
 
 
 
 
 
 
 
 
 
 959		err:							    \n\
 960			bpf_object__destroy_skeleton(s);		    \n\
 961			return -ENOMEM;					    \n\
 
 962		}							    \n\
 963									    \n\
 964		#endif /* %s */						    \n\
 
 
 
 
 
 965		",
 966		header_guard);
 967	err = 0;
 968out:
 969	bpf_object__close(obj);
 970	if (obj_data)
 971		munmap(obj_data, mmap_sz);
 972	close(fd);
 973	return err;
 974}
 975
 976static int do_object(int argc, char **argv)
 977{
 978	struct bpf_linker *linker;
 979	const char *output_file, *file;
 980	int err = 0;
 981
 982	if (!REQ_ARGS(2)) {
 983		usage();
 984		return -1;
 985	}
 986
 987	output_file = GET_ARG();
 988
 989	linker = bpf_linker__new(output_file, NULL);
 990	if (!linker) {
 991		p_err("failed to create BPF linker instance");
 992		return -1;
 993	}
 994
 995	while (argc) {
 996		file = GET_ARG();
 997
 998		err = bpf_linker__add_file(linker, file, NULL);
 999		if (err) {
1000			p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
1001			goto out;
1002		}
1003	}
1004
1005	err = bpf_linker__finalize(linker);
1006	if (err) {
1007		p_err("failed to finalize ELF file: %s (%d)", strerror(err), err);
1008		goto out;
1009	}
1010
1011	err = 0;
1012out:
1013	bpf_linker__free(linker);
1014	return err;
1015}
1016
1017static int do_help(int argc, char **argv)
1018{
1019	if (json_output) {
1020		jsonw_null(json_wtr);
1021		return 0;
1022	}
1023
1024	fprintf(stderr,
1025		"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1026		"       %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
 
 
1027		"       %1$s %2$s help\n"
1028		"\n"
1029		"       " HELP_SPEC_OPTIONS "\n"
 
1030		"",
1031		bin_name, "gen");
1032
1033	return 0;
1034}
1035
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036static const struct cmd cmds[] = {
1037	{ "object",	do_object },
1038	{ "skeleton",	do_skeleton },
1039	{ "help",	do_help },
 
 
1040	{ 0 }
1041};
1042
1043int do_gen(int argc, char **argv)
1044{
1045	return cmd_select(cmds, argc, argv, do_help);
1046}
v6.9.4
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2019 Facebook */
   3
   4#ifndef _GNU_SOURCE
   5#define _GNU_SOURCE
   6#endif
   7#include <ctype.h>
   8#include <errno.h>
   9#include <fcntl.h>
  10#include <libgen.h>
  11#include <linux/err.h>
  12#include <stdbool.h>
  13#include <stdio.h>
  14#include <string.h>
  15#include <unistd.h>
  16#include <bpf/bpf.h>
  17#include <bpf/libbpf.h>
  18#include <bpf/libbpf_internal.h>
  19#include <sys/types.h>
  20#include <sys/stat.h>
  21#include <sys/mman.h>
  22#include <bpf/btf.h>
 
  23
  24#include "json_writer.h"
  25#include "main.h"
  26
  27#define MAX_OBJ_NAME_LEN 64
  28
  29static void sanitize_identifier(char *name)
  30{
  31	int i;
  32
  33	for (i = 0; name[i]; i++)
  34		if (!isalnum(name[i]) && name[i] != '_')
  35			name[i] = '_';
  36}
  37
  38static bool str_has_prefix(const char *str, const char *prefix)
  39{
  40	return strncmp(str, prefix, strlen(prefix)) == 0;
  41}
  42
  43static bool str_has_suffix(const char *str, const char *suffix)
  44{
  45	size_t i, n1 = strlen(str), n2 = strlen(suffix);
  46
  47	if (n1 < n2)
  48		return false;
  49
  50	for (i = 0; i < n2; i++) {
  51		if (str[n1 - i - 1] != suffix[n2 - i - 1])
  52			return false;
  53	}
  54
  55	return true;
  56}
  57
  58static const struct btf_type *
  59resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
  60{
  61	const struct btf_type *t;
  62
  63	t = skip_mods_and_typedefs(btf, id, NULL);
  64	if (!btf_is_ptr(t))
  65		return NULL;
  66
  67	t = skip_mods_and_typedefs(btf, t->type, res_id);
  68
  69	return btf_is_func_proto(t) ? t : NULL;
  70}
  71
  72static void get_obj_name(char *name, const char *file)
  73{
  74	char file_copy[PATH_MAX];
  75
  76	/* Using basename() POSIX version to be more portable. */
  77	strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0';
  78	strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0';
  79	if (str_has_suffix(name, ".o"))
  80		name[strlen(name) - 2] = '\0';
  81	sanitize_identifier(name);
  82}
  83
  84static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
  85{
  86	int i;
  87
  88	sprintf(guard, "__%s_%s__", obj_name, suffix);
  89	for (i = 0; guard[i]; i++)
  90		guard[i] = toupper(guard[i]);
  91}
  92
  93static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
  94{
  95	static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
  96	const char *name = bpf_map__name(map);
  97	int i, n;
  98
  99	if (!bpf_map__is_internal(map)) {
 100		snprintf(buf, buf_sz, "%s", name);
 101		return true;
 102	}
 103
 104	for  (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
 105		const char *sfx = sfxs[i], *p;
 106
 107		p = strstr(name, sfx);
 108		if (p) {
 109			snprintf(buf, buf_sz, "%s", p + 1);
 110			sanitize_identifier(buf);
 111			return true;
 112		}
 113	}
 114
 115	return false;
 116}
 117
 118static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
 119{
 120	static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
 121	int i, n;
 122
 123	/* recognize hard coded LLVM section name */
 124	if (strcmp(sec_name, ".addr_space.1") == 0) {
 125		/* this is the name to use in skeleton */
 126		snprintf(buf, buf_sz, "arena");
 127		return true;
 128	}
 129	for  (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
 130		const char *pfx = pfxs[i];
 131
 132		if (str_has_prefix(sec_name, pfx)) {
 133			snprintf(buf, buf_sz, "%s", sec_name + 1);
 134			sanitize_identifier(buf);
 135			return true;
 136		}
 137	}
 138
 139	return false;
 140}
 141
 142static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
 143{
 144	vprintf(fmt, args);
 145}
 146
 147static int codegen_datasec_def(struct bpf_object *obj,
 148			       struct btf *btf,
 149			       struct btf_dump *d,
 150			       const struct btf_type *sec,
 151			       const char *obj_name)
 152{
 153	const char *sec_name = btf__name_by_offset(btf, sec->name_off);
 154	const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
 155	int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
 156	char var_ident[256], sec_ident[256];
 
 157	bool strip_mods = false;
 158
 159	if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
 
 
 
 
 
 
 
 
 
 
 
 160		return 0;
 161
 162	if (strcmp(sec_name, ".kconfig") != 0)
 163		strip_mods = true;
 164
 165	printf("	struct %s__%s {\n", obj_name, sec_ident);
 166	for (i = 0; i < vlen; i++, sec_var++) {
 167		const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
 168		const char *var_name = btf__name_by_offset(btf, var->name_off);
 169		DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
 170			.field_name = var_ident,
 171			.indent_level = 2,
 172			.strip_mods = strip_mods,
 173		);
 174		int need_off = sec_var->offset, align_off, align;
 175		__u32 var_type_id = var->type;
 176
 177		/* static variables are not exposed through BPF skeleton */
 178		if (btf_var(var)->linkage == BTF_VAR_STATIC)
 179			continue;
 180
 181		if (off > need_off) {
 182			p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
 183			      sec_name, i, need_off, off);
 184			return -EINVAL;
 185		}
 186
 187		align = btf__align_of(btf, var->type);
 188		if (align <= 0) {
 189			p_err("Failed to determine alignment of variable '%s': %d",
 190			      var_name, align);
 191			return -EINVAL;
 192		}
 193		/* Assume 32-bit architectures when generating data section
 194		 * struct memory layout. Given bpftool can't know which target
 195		 * host architecture it's emitting skeleton for, we need to be
 196		 * conservative and assume 32-bit one to ensure enough padding
 197		 * bytes are generated for pointer and long types. This will
 198		 * still work correctly for 64-bit architectures, because in
 199		 * the worst case we'll generate unnecessary padding field,
 200		 * which on 64-bit architectures is not strictly necessary and
 201		 * would be handled by natural 8-byte alignment. But it still
 202		 * will be a correct memory layout, based on recorded offsets
 203		 * in BTF.
 204		 */
 205		if (align > 4)
 206			align = 4;
 207
 208		align_off = (off + align - 1) / align * align;
 209		if (align_off != need_off) {
 210			printf("\t\tchar __pad%d[%d];\n",
 211			       pad_cnt, need_off - off);
 212			pad_cnt++;
 213		}
 214
 215		/* sanitize variable name, e.g., for static vars inside
 216		 * a function, it's name is '<function name>.<variable name>',
 217		 * which we'll turn into a '<function name>_<variable name>'
 218		 */
 219		var_ident[0] = '\0';
 220		strncat(var_ident, var_name, sizeof(var_ident) - 1);
 221		sanitize_identifier(var_ident);
 222
 223		printf("\t\t");
 224		err = btf_dump__emit_type_decl(d, var_type_id, &opts);
 225		if (err)
 226			return err;
 227		printf(";\n");
 228
 229		off = sec_var->offset + sec_var->size;
 230	}
 231	printf("	} *%s;\n", sec_ident);
 232	return 0;
 233}
 234
 235static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
 236{
 237	int n = btf__type_cnt(btf), i;
 238	char sec_ident[256];
 239
 240	for (i = 1; i < n; i++) {
 241		const struct btf_type *t = btf__type_by_id(btf, i);
 242		const char *name;
 243
 244		if (!btf_is_datasec(t))
 245			continue;
 246
 247		name = btf__str_by_offset(btf, t->name_off);
 248		if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
 249			continue;
 250
 251		if (strcmp(sec_ident, map_ident) == 0)
 252			return t;
 253	}
 254	return NULL;
 255}
 256
 257static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
 258{
 259	size_t tmp_sz;
 260
 261	if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) {
 262		snprintf(buf, sz, "arena");
 263		return true;
 264	}
 265
 266	if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
 267		return false;
 268
 269	if (!get_map_ident(map, buf, sz))
 270		return false;
 271
 272	return true;
 273}
 274
 275static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
 276{
 277	struct btf *btf = bpf_object__btf(obj);
 
 278	struct btf_dump *d;
 279	struct bpf_map *map;
 280	const struct btf_type *sec;
 281	char map_ident[256];
 282	int err = 0;
 283
 284	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
 285	if (!d)
 286		return -errno;
 287
 288	bpf_object__for_each_map(map, obj) {
 289		/* only generate definitions for memory-mapped internal maps */
 290		if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
 291			continue;
 292
 293		sec = find_type_for_map(btf, map_ident);
 294
 295		/* In some cases (e.g., sections like .rodata.cst16 containing
 296		 * compiler allocated string constants only) there will be
 297		 * special internal maps with no corresponding DATASEC BTF
 298		 * type. In such case, generate empty structs for each such
 299		 * map. It will still be memory-mapped and its contents
 300		 * accessible from user-space through BPF skeleton.
 301		 */
 302		if (!sec) {
 303			printf("	struct %s__%s {\n", obj_name, map_ident);
 304			printf("	} *%s;\n", map_ident);
 305		} else {
 306			err = codegen_datasec_def(obj, btf, d, sec, obj_name);
 307			if (err)
 308				goto out;
 309		}
 310	}
 311
 312
 313out:
 314	btf_dump__free(d);
 315	return err;
 316}
 317
 318static bool btf_is_ptr_to_func_proto(const struct btf *btf,
 319				     const struct btf_type *v)
 320{
 321	return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
 322}
 323
 324static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
 325{
 326	struct btf *btf = bpf_object__btf(obj);
 327	struct btf_dump *d;
 328	struct bpf_map *map;
 329	const struct btf_type *sec, *var;
 330	const struct btf_var_secinfo *sec_var;
 331	int i, err = 0, vlen;
 332	char map_ident[256], sec_ident[256];
 333	bool strip_mods = false, needs_typeof = false;
 334	const char *sec_name, *var_name;
 335	__u32 var_type_id;
 336
 337	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
 338	if (!d)
 339		return -errno;
 340
 341	bpf_object__for_each_map(map, obj) {
 342		/* only generate definitions for memory-mapped internal maps */
 343		if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
 344			continue;
 345
 346		sec = find_type_for_map(btf, map_ident);
 347		if (!sec)
 348			continue;
 349
 350		sec_name = btf__name_by_offset(btf, sec->name_off);
 351		if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
 352			continue;
 353
 354		strip_mods = strcmp(sec_name, ".kconfig") != 0;
 355		printf("	struct %s__%s {\n", obj_name, sec_ident);
 356
 357		sec_var = btf_var_secinfos(sec);
 358		vlen = btf_vlen(sec);
 359		for (i = 0; i < vlen; i++, sec_var++) {
 360			DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
 361				.indent_level = 2,
 362				.strip_mods = strip_mods,
 363				/* we'll print the name separately */
 364				.field_name = "",
 365			);
 366
 367			var = btf__type_by_id(btf, sec_var->type);
 368			var_name = btf__name_by_offset(btf, var->name_off);
 369			var_type_id = var->type;
 370
 371			/* static variables are not exposed through BPF skeleton */
 372			if (btf_var(var)->linkage == BTF_VAR_STATIC)
 373				continue;
 374
 375			/* The datasec member has KIND_VAR but we want the
 376			 * underlying type of the variable (e.g. KIND_INT).
 377			 */
 378			var = skip_mods_and_typedefs(btf, var->type, NULL);
 379
 380			printf("\t\t");
 381			/* Func and array members require special handling.
 382			 * Instead of producing `typename *var`, they produce
 383			 * `typeof(typename) *var`. This allows us to keep a
 384			 * similar syntax where the identifier is just prefixed
 385			 * by *, allowing us to ignore C declaration minutiae.
 386			 */
 387			needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
 388			if (needs_typeof)
 389				printf("typeof(");
 390
 391			err = btf_dump__emit_type_decl(d, var_type_id, &opts);
 392			if (err)
 393				goto out;
 394
 395			if (needs_typeof)
 396				printf(")");
 397
 398			printf(" *%s;\n", var_name);
 399		}
 400		printf("	} %s;\n", sec_ident);
 401	}
 402
 403out:
 404	btf_dump__free(d);
 405	return err;
 406}
 407
 408static void codegen(const char *template, ...)
 409{
 410	const char *src, *end;
 411	int skip_tabs = 0, n;
 412	char *s, *dst;
 413	va_list args;
 414	char c;
 415
 416	n = strlen(template);
 417	s = malloc(n + 1);
 418	if (!s)
 419		exit(-1);
 420	src = template;
 421	dst = s;
 422
 423	/* find out "baseline" indentation to skip */
 424	while ((c = *src++)) {
 425		if (c == '\t') {
 426			skip_tabs++;
 427		} else if (c == '\n') {
 428			break;
 429		} else {
 430			p_err("unrecognized character at pos %td in template '%s': '%c'",
 431			      src - template - 1, template, c);
 432			free(s);
 433			exit(-1);
 434		}
 435	}
 436
 437	while (*src) {
 438		/* skip baseline indentation tabs */
 439		for (n = skip_tabs; n > 0; n--, src++) {
 440			if (*src != '\t') {
 441				p_err("not enough tabs at pos %td in template '%s'",
 442				      src - template - 1, template);
 443				free(s);
 444				exit(-1);
 445			}
 446		}
 447		/* trim trailing whitespace */
 448		end = strchrnul(src, '\n');
 449		for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
 450			;
 451		memcpy(dst, src, n);
 452		dst += n;
 453		if (*end)
 454			*dst++ = '\n';
 455		src = *end ? end + 1 : end;
 456	}
 457	*dst++ = '\0';
 458
 459	/* print out using adjusted template */
 460	va_start(args, template);
 461	n = vprintf(s, args);
 462	va_end(args);
 463
 464	free(s);
 465}
 466
 467static void print_hex(const char *data, int data_sz)
 468{
 469	int i, len;
 470
 471	for (i = 0, len = 0; i < data_sz; i++) {
 472		int w = data[i] ? 4 : 2;
 473
 474		len += w;
 475		if (len > 78) {
 476			printf("\\\n");
 477			len = w;
 478		}
 479		if (!data[i])
 480			printf("\\0");
 481		else
 482			printf("\\x%02x", (unsigned char)data[i]);
 483	}
 484}
 485
 486static size_t bpf_map_mmap_sz(const struct bpf_map *map)
 487{
 488	long page_sz = sysconf(_SC_PAGE_SIZE);
 489	size_t map_sz;
 490
 491	map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
 492	map_sz = roundup(map_sz, page_sz);
 493	return map_sz;
 494}
 495
 496/* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
 497static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
 498{
 499	struct btf *btf = bpf_object__btf(obj);
 500	struct bpf_map *map;
 501	struct btf_var_secinfo *sec_var;
 502	int i, vlen;
 503	const struct btf_type *sec;
 504	char map_ident[256], var_ident[256];
 505
 506	if (!btf)
 507		return;
 508
 509	codegen("\
 510		\n\
 511		__attribute__((unused)) static void			    \n\
 512		%1$s__assert(struct %1$s *s __attribute__((unused)))	    \n\
 513		{							    \n\
 514		#ifdef __cplusplus					    \n\
 515		#define _Static_assert static_assert			    \n\
 516		#endif							    \n\
 517		", obj_name);
 518
 519	bpf_object__for_each_map(map, obj) {
 520		if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
 521			continue;
 522
 523		sec = find_type_for_map(btf, map_ident);
 524		if (!sec) {
 525			/* best effort, couldn't find the type for this map */
 526			continue;
 527		}
 528
 529		sec_var = btf_var_secinfos(sec);
 530		vlen =  btf_vlen(sec);
 531
 532		for (i = 0; i < vlen; i++, sec_var++) {
 533			const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
 534			const char *var_name = btf__name_by_offset(btf, var->name_off);
 535			long var_size;
 536
 537			/* static variables are not exposed through BPF skeleton */
 538			if (btf_var(var)->linkage == BTF_VAR_STATIC)
 539				continue;
 540
 541			var_size = btf__resolve_size(btf, var->type);
 542			if (var_size < 0)
 543				continue;
 544
 545			var_ident[0] = '\0';
 546			strncat(var_ident, var_name, sizeof(var_ident) - 1);
 547			sanitize_identifier(var_ident);
 548
 549			printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
 550			       map_ident, var_ident, var_size, var_ident);
 551		}
 552	}
 553	codegen("\
 554		\n\
 555		#ifdef __cplusplus					    \n\
 556		#undef _Static_assert					    \n\
 557		#endif							    \n\
 558		}							    \n\
 559		");
 560}
 561
 562static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
 563{
 564	struct bpf_program *prog;
 565
 566	bpf_object__for_each_program(prog, obj) {
 567		const char *tp_name;
 568
 569		codegen("\
 570			\n\
 571			\n\
 572			static inline int					    \n\
 573			%1$s__%2$s__attach(struct %1$s *skel)			    \n\
 574			{							    \n\
 575				int prog_fd = skel->progs.%2$s.prog_fd;		    \n\
 576			", obj_name, bpf_program__name(prog));
 577
 578		switch (bpf_program__type(prog)) {
 579		case BPF_PROG_TYPE_RAW_TRACEPOINT:
 580			tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
 581			printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
 582			break;
 583		case BPF_PROG_TYPE_TRACING:
 584		case BPF_PROG_TYPE_LSM:
 585			if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
 586				printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
 587			else
 588				printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
 589			break;
 590		default:
 591			printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
 592			break;
 593		}
 594		codegen("\
 595			\n\
 596										    \n\
 597				if (fd > 0)					    \n\
 598					skel->links.%1$s_fd = fd;		    \n\
 599				return fd;					    \n\
 600			}							    \n\
 601			", bpf_program__name(prog));
 602	}
 603
 604	codegen("\
 605		\n\
 606									    \n\
 607		static inline int					    \n\
 608		%1$s__attach(struct %1$s *skel)				    \n\
 609		{							    \n\
 610			int ret = 0;					    \n\
 611									    \n\
 612		", obj_name);
 613
 614	bpf_object__for_each_program(prog, obj) {
 615		codegen("\
 616			\n\
 617				ret = ret < 0 ? ret : %1$s__%2$s__attach(skel);   \n\
 618			", obj_name, bpf_program__name(prog));
 619	}
 620
 621	codegen("\
 622		\n\
 623			return ret < 0 ? ret : 0;			    \n\
 624		}							    \n\
 625									    \n\
 626		static inline void					    \n\
 627		%1$s__detach(struct %1$s *skel)				    \n\
 628		{							    \n\
 629		", obj_name);
 630
 631	bpf_object__for_each_program(prog, obj) {
 632		codegen("\
 633			\n\
 634				skel_closenz(skel->links.%1$s_fd);	    \n\
 635			", bpf_program__name(prog));
 636	}
 637
 638	codegen("\
 639		\n\
 640		}							    \n\
 641		");
 642}
 643
 644static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
 645{
 646	struct bpf_program *prog;
 647	struct bpf_map *map;
 648	char ident[256];
 649
 650	codegen("\
 651		\n\
 652		static void						    \n\
 653		%1$s__destroy(struct %1$s *skel)			    \n\
 654		{							    \n\
 655			if (!skel)					    \n\
 656				return;					    \n\
 657			%1$s__detach(skel);				    \n\
 658		",
 659		obj_name);
 660
 661	bpf_object__for_each_program(prog, obj) {
 662		codegen("\
 663			\n\
 664				skel_closenz(skel->progs.%1$s.prog_fd);	    \n\
 665			", bpf_program__name(prog));
 666	}
 667
 668	bpf_object__for_each_map(map, obj) {
 669		if (!get_map_ident(map, ident, sizeof(ident)))
 
 
 
 670			continue;
 671		if (bpf_map__is_internal(map) &&
 672		    (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
 673			printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
 674			       ident, bpf_map_mmap_sz(map));
 675		codegen("\
 676			\n\
 677				skel_closenz(skel->maps.%1$s.map_fd);	    \n\
 678			", ident);
 679	}
 680	codegen("\
 681		\n\
 682			skel_free(skel);				    \n\
 683		}							    \n\
 684		",
 685		obj_name);
 686}
 687
 688static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
 689{
 
 690	DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
 691	struct bpf_map *map;
 692	char ident[256];
 693	int err = 0;
 694
 695	err = bpf_object__gen_loader(obj, &opts);
 696	if (err)
 697		return err;
 698
 699	err = bpf_object__load(obj);
 
 
 
 
 
 700	if (err) {
 701		p_err("failed to load object file");
 702		goto out;
 703	}
 704	/* If there was no error during load then gen_loader_opts
 705	 * are populated with the loader program.
 706	 */
 707
 708	/* finish generating 'struct skel' */
 709	codegen("\
 710		\n\
 711		};							    \n\
 712		", obj_name);
 713
 714
 715	codegen_attach_detach(obj, obj_name);
 716
 717	codegen_destroy(obj, obj_name);
 718
 719	codegen("\
 720		\n\
 721		static inline struct %1$s *				    \n\
 722		%1$s__open(void)					    \n\
 723		{							    \n\
 724			struct %1$s *skel;				    \n\
 725									    \n\
 726			skel = skel_alloc(sizeof(*skel));		    \n\
 727			if (!skel)					    \n\
 728				goto cleanup;				    \n\
 729			skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
 730		",
 731		obj_name, opts.data_sz);
 732	bpf_object__for_each_map(map, obj) {
 
 733		const void *mmap_data = NULL;
 734		size_t mmap_size = 0;
 735
 736		if (!is_mmapable_map(map, ident, sizeof(ident)))
 
 
 
 
 
 737			continue;
 738
 739		codegen("\
 740		\n\
 741			{						    \n\
 742				static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
 743		");
 
 
 
 
 744		mmap_data = bpf_map__initial_value(map, &mmap_size);
 745		print_hex(mmap_data, mmap_size);
 746		codegen("\
 747		\n\
 748		\";							    \n\
 749									    \n\
 750				skel->%1$s = skel_prep_map_data((void *)data, %2$zd,\n\
 751								sizeof(data) - 1);\n\
 752				if (!skel->%1$s)			    \n\
 753					goto cleanup;			    \n\
 754				skel->maps.%1$s.initial_value = (__u64) (long) skel->%1$s;\n\
 755			}						    \n\
 756			", ident, bpf_map_mmap_sz(map));
 757	}
 758	codegen("\
 759		\n\
 760			return skel;					    \n\
 761		cleanup:						    \n\
 762			%1$s__destroy(skel);				    \n\
 763			return NULL;					    \n\
 764		}							    \n\
 765									    \n\
 766		static inline int					    \n\
 767		%1$s__load(struct %1$s *skel)				    \n\
 768		{							    \n\
 769			struct bpf_load_and_run_opts opts = {};		    \n\
 770			int err;					    \n\
 771			static const char opts_data[] __attribute__((__aligned__(8))) = \"\\\n\
 
 
 
 772		",
 773		obj_name);
 774	print_hex(opts.data, opts.data_sz);
 775	codegen("\
 776		\n\
 777		\";							    \n\
 778			static const char opts_insn[] __attribute__((__aligned__(8))) = \"\\\n\
 779		");
 
 
 
 
 
 
 
 780	print_hex(opts.insns, opts.insns_sz);
 781	codegen("\
 782		\n\
 783		\";							    \n\
 784									    \n\
 785			opts.ctx = (struct bpf_loader_ctx *)skel;	    \n\
 786			opts.data_sz = sizeof(opts_data) - 1;		    \n\
 787			opts.data = (void *)opts_data;			    \n\
 788			opts.insns_sz = sizeof(opts_insn) - 1;		    \n\
 789			opts.insns = (void *)opts_insn;			    \n\
 790									    \n\
 791			err = bpf_load_and_run(&opts);			    \n\
 792			if (err < 0)					    \n\
 793				return err;				    \n\
 794		");
 795	bpf_object__for_each_map(map, obj) {
 796		const char *mmap_flags;
 797
 798		if (!is_mmapable_map(map, ident, sizeof(ident)))
 
 799			continue;
 800
 801		if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
 
 
 
 802			mmap_flags = "PROT_READ";
 803		else
 804			mmap_flags = "PROT_READ | PROT_WRITE";
 805
 806		codegen("\
 807		\n\
 808			skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value,  \n\
 809							%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
 810			if (!skel->%1$s)				    \n\
 811				return -ENOMEM;				    \n\
 812			",
 813		       ident, bpf_map_mmap_sz(map), mmap_flags);
 814	}
 815	codegen("\
 816		\n\
 817			return 0;					    \n\
 818		}							    \n\
 819									    \n\
 820		static inline struct %1$s *				    \n\
 821		%1$s__open_and_load(void)				    \n\
 822		{							    \n\
 823			struct %1$s *skel;				    \n\
 824									    \n\
 825			skel = %1$s__open();				    \n\
 826			if (!skel)					    \n\
 827				return NULL;				    \n\
 828			if (%1$s__load(skel)) {				    \n\
 829				%1$s__destroy(skel);			    \n\
 830				return NULL;				    \n\
 831			}						    \n\
 832			return skel;					    \n\
 833		}							    \n\
 834									    \n\
 835		", obj_name);
 836
 837	codegen_asserts(obj, obj_name);
 838
 839	codegen("\
 840		\n\
 841									    \n\
 842		#endif /* %s */						    \n\
 843		",
 844		header_guard);
 845	err = 0;
 846out:
 847	return err;
 848}
 849
 850static void
 851codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
 852{
 853	struct bpf_map *map;
 854	char ident[256];
 855	size_t i;
 856
 857	if (!map_cnt)
 858		return;
 859
 860	codegen("\
 861		\n\
 862									\n\
 863			/* maps */				    \n\
 864			s->map_cnt = %zu;			    \n\
 865			s->map_skel_sz = sizeof(*s->maps);	    \n\
 866			s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
 867			if (!s->maps) {				    \n\
 868				err = -ENOMEM;			    \n\
 869				goto err;			    \n\
 870			}					    \n\
 871		",
 872		map_cnt
 873	);
 874	i = 0;
 875	bpf_object__for_each_map(map, obj) {
 876		if (!get_map_ident(map, ident, sizeof(ident)))
 877			continue;
 878
 879		codegen("\
 880			\n\
 881									\n\
 882				s->maps[%zu].name = \"%s\";	    \n\
 883				s->maps[%zu].map = &obj->maps.%s;   \n\
 884			",
 885			i, bpf_map__name(map), i, ident);
 886		/* memory-mapped internal maps */
 887		if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
 888			printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
 889				i, ident);
 890		}
 891		i++;
 892	}
 893}
 894
 895static void
 896codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
 897{
 898	struct bpf_program *prog;
 899	int i;
 900
 901	if (!prog_cnt)
 902		return;
 903
 904	codegen("\
 905		\n\
 906									\n\
 907			/* programs */				    \n\
 908			s->prog_cnt = %zu;			    \n\
 909			s->prog_skel_sz = sizeof(*s->progs);	    \n\
 910			s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
 911			if (!s->progs) {			    \n\
 912				err = -ENOMEM;			    \n\
 913				goto err;			    \n\
 914			}					    \n\
 915		",
 916		prog_cnt
 917	);
 918	i = 0;
 919	bpf_object__for_each_program(prog, obj) {
 920		codegen("\
 921			\n\
 922									\n\
 923				s->progs[%1$zu].name = \"%2$s\";    \n\
 924				s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
 925			",
 926			i, bpf_program__name(prog));
 927
 928		if (populate_links) {
 929			codegen("\
 930				\n\
 931					s->progs[%1$zu].link = &obj->links.%2$s;\n\
 932				",
 933				i, bpf_program__name(prog));
 934		}
 935		i++;
 936	}
 937}
 938
 939static int walk_st_ops_shadow_vars(struct btf *btf, const char *ident,
 940				   const struct btf_type *map_type, __u32 map_type_id)
 941{
 942	LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .indent_level = 3);
 943	const struct btf_type *member_type;
 944	__u32 offset, next_offset = 0;
 945	const struct btf_member *m;
 946	struct btf_dump *d = NULL;
 947	const char *member_name;
 948	__u32 member_type_id;
 949	int i, err = 0, n;
 950	int size;
 951
 952	d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
 953	if (!d)
 954		return -errno;
 955
 956	n = btf_vlen(map_type);
 957	for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
 958		member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
 959		member_name = btf__name_by_offset(btf, m->name_off);
 960
 961		offset = m->offset / 8;
 962		if (next_offset < offset)
 963			printf("\t\t\tchar __padding_%d[%d];\n", i, offset - next_offset);
 964
 965		switch (btf_kind(member_type)) {
 966		case BTF_KIND_INT:
 967		case BTF_KIND_FLOAT:
 968		case BTF_KIND_ENUM:
 969		case BTF_KIND_ENUM64:
 970			/* scalar type */
 971			printf("\t\t\t");
 972			opts.field_name = member_name;
 973			err = btf_dump__emit_type_decl(d, member_type_id, &opts);
 974			if (err) {
 975				p_err("Failed to emit type declaration for %s: %d", member_name, err);
 976				goto out;
 977			}
 978			printf(";\n");
 979
 980			size = btf__resolve_size(btf, member_type_id);
 981			if (size < 0) {
 982				p_err("Failed to resolve size of %s: %d\n", member_name, size);
 983				err = size;
 984				goto out;
 985			}
 986
 987			next_offset = offset + size;
 988			break;
 989
 990		case BTF_KIND_PTR:
 991			if (resolve_func_ptr(btf, m->type, NULL)) {
 992				/* Function pointer */
 993				printf("\t\t\tstruct bpf_program *%s;\n", member_name);
 994
 995				next_offset = offset + sizeof(void *);
 996				break;
 997			}
 998			/* All pointer types are unsupported except for
 999			 * function pointers.
1000			 */
1001			fallthrough;
1002
1003		default:
1004			/* Unsupported types
1005			 *
1006			 * Types other than scalar types and function
1007			 * pointers are currently not supported in order to
1008			 * prevent conflicts in the generated code caused
1009			 * by multiple definitions. For instance, if the
1010			 * struct type FOO is used in a struct_ops map,
1011			 * bpftool has to generate definitions for FOO,
1012			 * which may result in conflicts if FOO is defined
1013			 * in different skeleton files.
1014			 */
1015			size = btf__resolve_size(btf, member_type_id);
1016			if (size < 0) {
1017				p_err("Failed to resolve size of %s: %d\n", member_name, size);
1018				err = size;
1019				goto out;
1020			}
1021			printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
1022
1023			next_offset = offset + size;
1024			break;
1025		}
1026	}
1027
1028	/* Cannot fail since it must be a struct type */
1029	size = btf__resolve_size(btf, map_type_id);
1030	if (next_offset < (__u32)size)
1031		printf("\t\t\tchar __padding_end[%d];\n", size - next_offset);
1032
1033out:
1034	btf_dump__free(d);
1035
1036	return err;
1037}
1038
1039/* Generate the pointer of the shadow type for a struct_ops map.
1040 *
1041 * This function adds a pointer of the shadow type for a struct_ops map.
1042 * The members of a struct_ops map can be exported through a pointer to a
1043 * shadow type. The user can access these members through the pointer.
1044 *
1045 * A shadow type includes not all members, only members of some types.
1046 * They are scalar types and function pointers. The function pointers are
1047 * translated to the pointer of the struct bpf_program. The scalar types
1048 * are translated to the original type without any modifiers.
1049 *
1050 * Unsupported types will be translated to a char array to occupy the same
1051 * space as the original field, being renamed as __unsupported_*.  The user
1052 * should treat these fields as opaque data.
1053 */
1054static int gen_st_ops_shadow_type(const char *obj_name, struct btf *btf, const char *ident,
1055				  const struct bpf_map *map)
1056{
1057	const struct btf_type *map_type;
1058	const char *type_name;
1059	__u32 map_type_id;
1060	int err;
1061
1062	map_type_id = bpf_map__btf_value_type_id(map);
1063	if (map_type_id == 0)
1064		return -EINVAL;
1065	map_type = btf__type_by_id(btf, map_type_id);
1066	if (!map_type)
1067		return -EINVAL;
1068
1069	type_name = btf__name_by_offset(btf, map_type->name_off);
1070
1071	printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name);
1072
1073	err = walk_st_ops_shadow_vars(btf, ident, map_type, map_type_id);
1074	if (err)
1075		return err;
1076
1077	printf("\t\t} *%s;\n", ident);
1078
1079	return 0;
1080}
1081
1082static int gen_st_ops_shadow(const char *obj_name, struct btf *btf, struct bpf_object *obj)
1083{
1084	int err, st_ops_cnt = 0;
1085	struct bpf_map *map;
1086	char ident[256];
1087
1088	if (!btf)
1089		return 0;
1090
1091	/* Generate the pointers to shadow types of
1092	 * struct_ops maps.
1093	 */
1094	bpf_object__for_each_map(map, obj) {
1095		if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1096			continue;
1097		if (!get_map_ident(map, ident, sizeof(ident)))
1098			continue;
1099
1100		if (st_ops_cnt == 0) /* first struct_ops map */
1101			printf("\tstruct {\n");
1102		st_ops_cnt++;
1103
1104		err = gen_st_ops_shadow_type(obj_name, btf, ident, map);
1105		if (err)
1106			return err;
1107	}
1108
1109	if (st_ops_cnt)
1110		printf("\t} struct_ops;\n");
1111
1112	return 0;
1113}
1114
1115/* Generate the code to initialize the pointers of shadow types. */
1116static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
1117{
1118	struct bpf_map *map;
1119	char ident[256];
1120
1121	if (!btf)
1122		return;
1123
1124	/* Initialize the pointers to_ops shadow types of
1125	 * struct_ops maps.
1126	 */
1127	bpf_object__for_each_map(map, obj) {
1128		if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1129			continue;
1130		if (!get_map_ident(map, ident, sizeof(ident)))
1131			continue;
1132		codegen("\
1133			\n\
1134				obj->struct_ops.%1$s = bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
1135			\n\
1136			", ident);
1137	}
1138}
1139
1140static int do_skeleton(int argc, char **argv)
1141{
1142	char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
1143	size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
1144	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1145	char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1146	struct bpf_object *obj = NULL;
1147	const char *file;
1148	char ident[256];
1149	struct bpf_program *prog;
1150	int fd, err = -1;
1151	struct bpf_map *map;
1152	struct btf *btf;
1153	struct stat st;
1154
1155	if (!REQ_ARGS(1)) {
1156		usage();
1157		return -1;
1158	}
1159	file = GET_ARG();
1160
1161	while (argc) {
1162		if (!REQ_ARGS(2))
1163			return -1;
1164
1165		if (is_prefix(*argv, "name")) {
1166			NEXT_ARG();
1167
1168			if (obj_name[0] != '\0') {
1169				p_err("object name already specified");
1170				return -1;
1171			}
1172
1173			strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1174			obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1175		} else {
1176			p_err("unknown arg %s", *argv);
1177			return -1;
1178		}
1179
1180		NEXT_ARG();
1181	}
1182
1183	if (argc) {
1184		p_err("extra unknown arguments");
1185		return -1;
1186	}
1187
1188	if (stat(file, &st)) {
1189		p_err("failed to stat() %s: %s", file, strerror(errno));
1190		return -1;
1191	}
1192	file_sz = st.st_size;
1193	mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1194	fd = open(file, O_RDONLY);
1195	if (fd < 0) {
1196		p_err("failed to open() %s: %s", file, strerror(errno));
1197		return -1;
1198	}
1199	obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1200	if (obj_data == MAP_FAILED) {
1201		obj_data = NULL;
1202		p_err("failed to mmap() %s: %s", file, strerror(errno));
1203		goto out;
1204	}
1205	if (obj_name[0] == '\0')
1206		get_obj_name(obj_name, file);
1207	opts.object_name = obj_name;
1208	if (verifier_logs)
1209		/* log_level1 + log_level2 + stats, but not stable UAPI */
1210		opts.kernel_log_level = 1 + 2 + 4;
1211	obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1212	if (!obj) {
1213		char err_buf[256];
1214
1215		err = -errno;
1216		libbpf_strerror(err, err_buf, sizeof(err_buf));
1217		p_err("failed to open BPF object file: %s", err_buf);
 
1218		goto out;
1219	}
1220
1221	bpf_object__for_each_map(map, obj) {
1222		if (!get_map_ident(map, ident, sizeof(ident))) {
 
1223			p_err("ignoring unrecognized internal map '%s'...",
1224			      bpf_map__name(map));
1225			continue;
1226		}
1227		map_cnt++;
1228	}
1229	bpf_object__for_each_program(prog, obj) {
1230		prog_cnt++;
1231	}
1232
1233	get_header_guard(header_guard, obj_name, "SKEL_H");
1234	if (use_loader) {
1235		codegen("\
1236		\n\
1237		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
1238		/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */		    \n\
1239		#ifndef %2$s						    \n\
1240		#define %2$s						    \n\
1241									    \n\
 
 
1242		#include <bpf/skel_internal.h>				    \n\
1243									    \n\
1244		struct %1$s {						    \n\
1245			struct bpf_loader_ctx ctx;			    \n\
1246		",
1247		obj_name, header_guard
1248		);
1249	} else {
1250		codegen("\
1251		\n\
1252		/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */   \n\
1253									    \n\
1254		/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */		    \n\
1255		#ifndef %2$s						    \n\
1256		#define %2$s						    \n\
1257									    \n\
1258		#include <errno.h>					    \n\
1259		#include <stdlib.h>					    \n\
1260		#include <bpf/libbpf.h>					    \n\
1261									    \n\
1262		struct %1$s {						    \n\
1263			struct bpf_object_skeleton *skeleton;		    \n\
1264			struct bpf_object *obj;				    \n\
1265		",
1266		obj_name, header_guard
1267		);
1268	}
1269
1270	if (map_cnt) {
1271		printf("\tstruct {\n");
1272		bpf_object__for_each_map(map, obj) {
1273			if (!get_map_ident(map, ident, sizeof(ident)))
 
1274				continue;
1275			if (use_loader)
1276				printf("\t\tstruct bpf_map_desc %s;\n", ident);
1277			else
1278				printf("\t\tstruct bpf_map *%s;\n", ident);
1279		}
1280		printf("\t} maps;\n");
1281	}
1282
1283	btf = bpf_object__btf(obj);
1284	err = gen_st_ops_shadow(obj_name, btf, obj);
1285	if (err)
1286		goto out;
1287
1288	if (prog_cnt) {
1289		printf("\tstruct {\n");
1290		bpf_object__for_each_program(prog, obj) {
1291			if (use_loader)
1292				printf("\t\tstruct bpf_prog_desc %s;\n",
1293				       bpf_program__name(prog));
1294			else
1295				printf("\t\tstruct bpf_program *%s;\n",
1296				       bpf_program__name(prog));
1297		}
1298		printf("\t} progs;\n");
1299		printf("\tstruct {\n");
1300		bpf_object__for_each_program(prog, obj) {
1301			if (use_loader)
1302				printf("\t\tint %s_fd;\n",
1303				       bpf_program__name(prog));
1304			else
1305				printf("\t\tstruct bpf_link *%s;\n",
1306				       bpf_program__name(prog));
1307		}
1308		printf("\t} links;\n");
1309	}
1310
 
1311	if (btf) {
1312		err = codegen_datasecs(obj, obj_name);
1313		if (err)
1314			goto out;
1315	}
1316	if (use_loader) {
1317		err = gen_trace(obj, obj_name, header_guard);
1318		goto out;
1319	}
1320
1321	codegen("\
1322		\n\
1323									    \n\
1324		#ifdef __cplusplus					    \n\
1325			static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1326			static inline struct %1$s *open_and_load();	    \n\
1327			static inline int load(struct %1$s *skel);	    \n\
1328			static inline int attach(struct %1$s *skel);	    \n\
1329			static inline void detach(struct %1$s *skel);	    \n\
1330			static inline void destroy(struct %1$s *skel);	    \n\
1331			static inline const void *elf_bytes(size_t *sz);    \n\
1332		#endif /* __cplusplus */				    \n\
1333		};							    \n\
1334									    \n\
1335		static void						    \n\
1336		%1$s__destroy(struct %1$s *obj)				    \n\
1337		{							    \n\
1338			if (!obj)					    \n\
1339				return;					    \n\
1340			if (obj->skeleton)				    \n\
1341				bpf_object__destroy_skeleton(obj->skeleton);\n\
1342			free(obj);					    \n\
1343		}							    \n\
1344									    \n\
1345		static inline int					    \n\
1346		%1$s__create_skeleton(struct %1$s *obj);		    \n\
1347									    \n\
1348		static inline struct %1$s *				    \n\
1349		%1$s__open_opts(const struct bpf_object_open_opts *opts)    \n\
1350		{							    \n\
1351			struct %1$s *obj;				    \n\
1352			int err;					    \n\
1353									    \n\
1354			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\
1355			if (!obj) {					    \n\
1356				errno = ENOMEM;				    \n\
1357				return NULL;				    \n\
1358			}						    \n\
1359									    \n\
1360			err = %1$s__create_skeleton(obj);		    \n\
 
1361			if (err)					    \n\
1362				goto err_out;				    \n\
1363									    \n\
1364			err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1365			if (err)					    \n\
1366				goto err_out;				    \n\
1367									    \n\
1368		", obj_name);
1369
1370	gen_st_ops_shadow_init(btf, obj);
1371
1372	codegen("\
1373		\n\
1374			return obj;					    \n\
1375		err_out:						    \n\
1376			%1$s__destroy(obj);				    \n\
1377			errno = -err;					    \n\
1378			return NULL;					    \n\
1379		}							    \n\
1380									    \n\
1381		static inline struct %1$s *				    \n\
1382		%1$s__open(void)					    \n\
1383		{							    \n\
1384			return %1$s__open_opts(NULL);			    \n\
1385		}							    \n\
1386									    \n\
1387		static inline int					    \n\
1388		%1$s__load(struct %1$s *obj)				    \n\
1389		{							    \n\
1390			return bpf_object__load_skeleton(obj->skeleton);    \n\
1391		}							    \n\
1392									    \n\
1393		static inline struct %1$s *				    \n\
1394		%1$s__open_and_load(void)				    \n\
1395		{							    \n\
1396			struct %1$s *obj;				    \n\
1397			int err;					    \n\
1398									    \n\
1399			obj = %1$s__open();				    \n\
1400			if (!obj)					    \n\
1401				return NULL;				    \n\
1402			err = %1$s__load(obj);				    \n\
1403			if (err) {					    \n\
1404				%1$s__destroy(obj);			    \n\
1405				errno = -err;				    \n\
1406				return NULL;				    \n\
1407			}						    \n\
1408			return obj;					    \n\
1409		}							    \n\
1410									    \n\
1411		static inline int					    \n\
1412		%1$s__attach(struct %1$s *obj)				    \n\
1413		{							    \n\
1414			return bpf_object__attach_skeleton(obj->skeleton);  \n\
1415		}							    \n\
1416									    \n\
1417		static inline void					    \n\
1418		%1$s__detach(struct %1$s *obj)				    \n\
1419		{							    \n\
1420			bpf_object__detach_skeleton(obj->skeleton);	    \n\
1421		}							    \n\
1422		",
1423		obj_name
1424	);
1425
1426	codegen("\
1427		\n\
1428									    \n\
1429		static inline const void *%1$s__elf_bytes(size_t *sz);	    \n\
1430									    \n\
1431		static inline int					    \n\
1432		%1$s__create_skeleton(struct %1$s *obj)			    \n\
1433		{							    \n\
1434			struct bpf_object_skeleton *s;			    \n\
1435			int err;					    \n\
1436									    \n\
1437			s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1438			if (!s)	{					    \n\
1439				err = -ENOMEM;				    \n\
1440				goto err;				    \n\
1441			}						    \n\
1442									    \n\
1443			s->sz = sizeof(*s);				    \n\
1444			s->name = \"%1$s\";				    \n\
1445			s->obj = &obj->obj;				    \n\
1446		",
1447		obj_name
1448	);
1449
1450	codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
1451	codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
1452
1453	codegen("\
1454		\n\
1455									    \n\
1456			s->data = %1$s__elf_bytes(&s->data_sz);		    \n\
1457									    \n\
1458			obj->skeleton = s;				    \n\
1459			return 0;					    \n\
1460		err:							    \n\
1461			bpf_object__destroy_skeleton(s);		    \n\
1462			return err;					    \n\
1463		}							    \n\
1464									    \n\
1465		static inline const void *%1$s__elf_bytes(size_t *sz)	    \n\
1466		{							    \n\
1467			static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
1468		",
1469		obj_name
1470	);
1471
1472	/* embed contents of BPF object file */
1473	print_hex(obj_data, file_sz);
1474
1475	codegen("\
1476		\n\
1477		\";							    \n\
1478									    \n\
1479			*sz = sizeof(data) - 1;				    \n\
1480			return (const void *)data;			    \n\
1481		}							    \n\
1482									    \n\
1483		#ifdef __cplusplus					    \n\
1484		struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1485		struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); }	\n\
1486		int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); }		\n\
1487		int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); }	\n\
1488		void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); }		\n\
1489		void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }		\n\
1490		const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1491		#endif /* __cplusplus */				    \n\
1492									    \n\
1493		",
1494		obj_name);
1495
1496	codegen_asserts(obj, obj_name);
1497
1498	codegen("\
1499		\n\
1500									    \n\
1501		#endif /* %1$s */					    \n\
1502		",
1503		header_guard);
1504	err = 0;
1505out:
1506	bpf_object__close(obj);
1507	if (obj_data)
1508		munmap(obj_data, mmap_sz);
1509	close(fd);
1510	return err;
1511}
1512
1513/* Subskeletons are like skeletons, except they don't own the bpf_object,
1514 * associated maps, links, etc. Instead, they know about the existence of
1515 * variables, maps, programs and are able to find their locations
1516 * _at runtime_ from an already loaded bpf_object.
1517 *
1518 * This allows for library-like BPF objects to have userspace counterparts
1519 * with access to their own items without having to know anything about the
1520 * final BPF object that the library was linked into.
1521 */
1522static int do_subskeleton(int argc, char **argv)
1523{
1524	char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
1525	size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
1526	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1527	char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1528	struct bpf_object *obj = NULL;
1529	const char *file, *var_name;
1530	char ident[256];
1531	int fd, err = -1, map_type_id;
1532	const struct bpf_map *map;
1533	struct bpf_program *prog;
1534	struct btf *btf;
1535	const struct btf_type *map_type, *var_type;
1536	const struct btf_var_secinfo *var;
1537	struct stat st;
1538
1539	if (!REQ_ARGS(1)) {
1540		usage();
1541		return -1;
1542	}
1543	file = GET_ARG();
1544
1545	while (argc) {
1546		if (!REQ_ARGS(2))
1547			return -1;
1548
1549		if (is_prefix(*argv, "name")) {
1550			NEXT_ARG();
1551
1552			if (obj_name[0] != '\0') {
1553				p_err("object name already specified");
1554				return -1;
1555			}
1556
1557			strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1558			obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1559		} else {
1560			p_err("unknown arg %s", *argv);
1561			return -1;
1562		}
1563
1564		NEXT_ARG();
1565	}
1566
1567	if (argc) {
1568		p_err("extra unknown arguments");
1569		return -1;
1570	}
1571
1572	if (use_loader) {
1573		p_err("cannot use loader for subskeletons");
1574		return -1;
1575	}
1576
1577	if (stat(file, &st)) {
1578		p_err("failed to stat() %s: %s", file, strerror(errno));
1579		return -1;
1580	}
1581	file_sz = st.st_size;
1582	mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1583	fd = open(file, O_RDONLY);
1584	if (fd < 0) {
1585		p_err("failed to open() %s: %s", file, strerror(errno));
1586		return -1;
1587	}
1588	obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1589	if (obj_data == MAP_FAILED) {
1590		obj_data = NULL;
1591		p_err("failed to mmap() %s: %s", file, strerror(errno));
1592		goto out;
1593	}
1594	if (obj_name[0] == '\0')
1595		get_obj_name(obj_name, file);
1596
1597	/* The empty object name allows us to use bpf_map__name and produce
1598	 * ELF section names out of it. (".data" instead of "obj.data")
1599	 */
1600	opts.object_name = "";
1601	obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1602	if (!obj) {
1603		char err_buf[256];
1604
1605		libbpf_strerror(errno, err_buf, sizeof(err_buf));
1606		p_err("failed to open BPF object file: %s", err_buf);
1607		obj = NULL;
1608		goto out;
1609	}
1610
1611	btf = bpf_object__btf(obj);
1612	if (!btf) {
1613		err = -1;
1614		p_err("need btf type information for %s", obj_name);
1615		goto out;
1616	}
1617
1618	bpf_object__for_each_program(prog, obj) {
1619		prog_cnt++;
1620	}
1621
1622	/* First, count how many variables we have to find.
1623	 * We need this in advance so the subskel can allocate the right
1624	 * amount of storage.
1625	 */
1626	bpf_object__for_each_map(map, obj) {
1627		if (!get_map_ident(map, ident, sizeof(ident)))
1628			continue;
1629
1630		/* Also count all maps that have a name */
1631		map_cnt++;
1632
1633		if (!is_mmapable_map(map, ident, sizeof(ident)))
1634			continue;
1635
1636		map_type_id = bpf_map__btf_value_type_id(map);
1637		if (map_type_id <= 0) {
1638			err = map_type_id;
1639			goto out;
1640		}
1641		map_type = btf__type_by_id(btf, map_type_id);
1642
1643		var = btf_var_secinfos(map_type);
1644		len = btf_vlen(map_type);
1645		for (i = 0; i < len; i++, var++) {
1646			var_type = btf__type_by_id(btf, var->type);
1647
1648			if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1649				continue;
1650
1651			var_cnt++;
1652		}
1653	}
1654
1655	get_header_guard(header_guard, obj_name, "SUBSKEL_H");
1656	codegen("\
1657	\n\
1658	/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */	    \n\
1659									    \n\
1660	/* THIS FILE IS AUTOGENERATED! */				    \n\
1661	#ifndef %2$s							    \n\
1662	#define %2$s							    \n\
1663									    \n\
1664	#include <errno.h>						    \n\
1665	#include <stdlib.h>						    \n\
1666	#include <bpf/libbpf.h>						    \n\
1667									    \n\
1668	struct %1$s {							    \n\
1669		struct bpf_object *obj;					    \n\
1670		struct bpf_object_subskeleton *subskel;			    \n\
1671	", obj_name, header_guard);
1672
1673	if (map_cnt) {
1674		printf("\tstruct {\n");
1675		bpf_object__for_each_map(map, obj) {
1676			if (!get_map_ident(map, ident, sizeof(ident)))
1677				continue;
1678			printf("\t\tstruct bpf_map *%s;\n", ident);
1679		}
1680		printf("\t} maps;\n");
1681	}
1682
1683	err = gen_st_ops_shadow(obj_name, btf, obj);
1684	if (err)
1685		goto out;
1686
1687	if (prog_cnt) {
1688		printf("\tstruct {\n");
1689		bpf_object__for_each_program(prog, obj) {
1690			printf("\t\tstruct bpf_program *%s;\n",
1691				bpf_program__name(prog));
 
 
 
 
 
 
 
1692		}
1693		printf("\t} progs;\n");
1694	}
1695
1696	err = codegen_subskel_datasecs(obj, obj_name);
1697	if (err)
1698		goto out;
1699
1700	/* emit code that will allocate enough storage for all symbols */
1701	codegen("\
1702		\n\
1703									    \n\
1704		#ifdef __cplusplus					    \n\
1705			static inline struct %1$s *open(const struct bpf_object *src);\n\
1706			static inline void destroy(struct %1$s *skel);	    \n\
1707		#endif /* __cplusplus */				    \n\
1708		};							    \n\
1709									    \n\
1710		static inline void					    \n\
1711		%1$s__destroy(struct %1$s *skel)			    \n\
1712		{							    \n\
1713			if (!skel)					    \n\
1714				return;					    \n\
1715			if (skel->subskel)				    \n\
1716				bpf_object__destroy_subskeleton(skel->subskel);\n\
1717			free(skel);					    \n\
1718		}							    \n\
1719									    \n\
1720		static inline struct %1$s *				    \n\
1721		%1$s__open(const struct bpf_object *src)		    \n\
1722		{							    \n\
1723			struct %1$s *obj;				    \n\
1724			struct bpf_object_subskeleton *s;		    \n\
1725			int err;					    \n\
1726									    \n\
1727			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\
1728			if (!obj) {					    \n\
1729				err = -ENOMEM;				    \n\
1730				goto err;				    \n\
1731			}						    \n\
1732			s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1733			if (!s) {					    \n\
1734				err = -ENOMEM;				    \n\
1735				goto err;				    \n\
1736			}						    \n\
1737			s->sz = sizeof(*s);				    \n\
1738			s->obj = src;					    \n\
1739			s->var_skel_sz = sizeof(*s->vars);		    \n\
1740			obj->subskel = s;				    \n\
1741									    \n\
1742			/* vars */					    \n\
1743			s->var_cnt = %2$d;				    \n\
1744			s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1745			if (!s->vars) {					    \n\
1746				err = -ENOMEM;				    \n\
1747				goto err;				    \n\
1748			}						    \n\
1749		",
1750		obj_name, var_cnt
1751	);
1752
1753	/* walk through each symbol and emit the runtime representation */
1754	bpf_object__for_each_map(map, obj) {
1755		if (!is_mmapable_map(map, ident, sizeof(ident)))
1756			continue;
1757
1758		map_type_id = bpf_map__btf_value_type_id(map);
1759		if (map_type_id <= 0)
1760			/* skip over internal maps with no type*/
1761			continue;
1762
1763		map_type = btf__type_by_id(btf, map_type_id);
1764		var = btf_var_secinfos(map_type);
1765		len = btf_vlen(map_type);
1766		for (i = 0; i < len; i++, var++) {
1767			var_type = btf__type_by_id(btf, var->type);
1768			var_name = btf__name_by_offset(btf, var_type->name_off);
1769
1770			if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1771				continue;
1772
1773			/* Note that we use the dot prefix in .data as the
1774			 * field access operator i.e. maps%s becomes maps.data
1775			 */
1776			codegen("\
1777			\n\
1778									    \n\
1779				s->vars[%3$d].name = \"%1$s\";		    \n\
1780				s->vars[%3$d].map = &obj->maps.%2$s;	    \n\
1781				s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1782			", var_name, ident, var_idx);
1783
1784			var_idx++;
1785		}
1786	}
1787
1788	codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
1789	codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
1790
1791	codegen("\
1792		\n\
 
1793									    \n\
1794			err = bpf_object__open_subskeleton(s);		    \n\
1795			if (err)					    \n\
1796				goto err;				    \n\
1797									    \n\
1798		");
1799
1800	gen_st_ops_shadow_init(btf, obj);
1801
1802	codegen("\
1803		\n\
1804			return obj;					    \n\
1805		err:							    \n\
1806			%1$s__destroy(obj);				    \n\
1807			errno = -err;					    \n\
1808			return NULL;					    \n\
1809		}							    \n\
1810									    \n\
1811		#ifdef __cplusplus					    \n\
1812		struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1813		void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1814		#endif /* __cplusplus */				    \n\
1815									    \n\
1816		#endif /* %2$s */					    \n\
1817		",
1818		obj_name, header_guard);
1819	err = 0;
1820out:
1821	bpf_object__close(obj);
1822	if (obj_data)
1823		munmap(obj_data, mmap_sz);
1824	close(fd);
1825	return err;
1826}
1827
1828static int do_object(int argc, char **argv)
1829{
1830	struct bpf_linker *linker;
1831	const char *output_file, *file;
1832	int err = 0;
1833
1834	if (!REQ_ARGS(2)) {
1835		usage();
1836		return -1;
1837	}
1838
1839	output_file = GET_ARG();
1840
1841	linker = bpf_linker__new(output_file, NULL);
1842	if (!linker) {
1843		p_err("failed to create BPF linker instance");
1844		return -1;
1845	}
1846
1847	while (argc) {
1848		file = GET_ARG();
1849
1850		err = bpf_linker__add_file(linker, file, NULL);
1851		if (err) {
1852			p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
1853			goto out;
1854		}
1855	}
1856
1857	err = bpf_linker__finalize(linker);
1858	if (err) {
1859		p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
1860		goto out;
1861	}
1862
1863	err = 0;
1864out:
1865	bpf_linker__free(linker);
1866	return err;
1867}
1868
1869static int do_help(int argc, char **argv)
1870{
1871	if (json_output) {
1872		jsonw_null(json_wtr);
1873		return 0;
1874	}
1875
1876	fprintf(stderr,
1877		"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1878		"       %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1879		"       %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1880		"       %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1881		"       %1$s %2$s help\n"
1882		"\n"
1883		"       " HELP_SPEC_OPTIONS " |\n"
1884		"                    {-L|--use-loader} }\n"
1885		"",
1886		bin_name, "gen");
1887
1888	return 0;
1889}
1890
1891static int btf_save_raw(const struct btf *btf, const char *path)
1892{
1893	const void *data;
1894	FILE *f = NULL;
1895	__u32 data_sz;
1896	int err = 0;
1897
1898	data = btf__raw_data(btf, &data_sz);
1899	if (!data)
1900		return -ENOMEM;
1901
1902	f = fopen(path, "wb");
1903	if (!f)
1904		return -errno;
1905
1906	if (fwrite(data, 1, data_sz, f) != data_sz)
1907		err = -errno;
1908
1909	fclose(f);
1910	return err;
1911}
1912
1913struct btfgen_info {
1914	struct btf *src_btf;
1915	struct btf *marked_btf; /* btf structure used to mark used types */
1916};
1917
1918static size_t btfgen_hash_fn(long key, void *ctx)
1919{
1920	return key;
1921}
1922
1923static bool btfgen_equal_fn(long k1, long k2, void *ctx)
1924{
1925	return k1 == k2;
1926}
1927
1928static void btfgen_free_info(struct btfgen_info *info)
1929{
1930	if (!info)
1931		return;
1932
1933	btf__free(info->src_btf);
1934	btf__free(info->marked_btf);
1935
1936	free(info);
1937}
1938
1939static struct btfgen_info *
1940btfgen_new_info(const char *targ_btf_path)
1941{
1942	struct btfgen_info *info;
1943	int err;
1944
1945	info = calloc(1, sizeof(*info));
1946	if (!info)
1947		return NULL;
1948
1949	info->src_btf = btf__parse(targ_btf_path, NULL);
1950	if (!info->src_btf) {
1951		err = -errno;
1952		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1953		goto err_out;
1954	}
1955
1956	info->marked_btf = btf__parse(targ_btf_path, NULL);
1957	if (!info->marked_btf) {
1958		err = -errno;
1959		p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1960		goto err_out;
1961	}
1962
1963	return info;
1964
1965err_out:
1966	btfgen_free_info(info);
1967	errno = -err;
1968	return NULL;
1969}
1970
1971#define MARKED UINT32_MAX
1972
1973static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
1974{
1975	const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
1976	struct btf_member *m = btf_members(t) + idx;
1977
1978	m->name_off = MARKED;
1979}
1980
1981static int
1982btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
1983{
1984	const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
1985	struct btf_type *cloned_type;
1986	struct btf_param *param;
1987	struct btf_array *array;
1988	int err, i;
1989
1990	if (type_id == 0)
1991		return 0;
1992
1993	/* mark type on cloned BTF as used */
1994	cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
1995	cloned_type->name_off = MARKED;
1996
1997	/* recursively mark other types needed by it */
1998	switch (btf_kind(btf_type)) {
1999	case BTF_KIND_UNKN:
2000	case BTF_KIND_INT:
2001	case BTF_KIND_FLOAT:
2002	case BTF_KIND_ENUM:
2003	case BTF_KIND_ENUM64:
2004	case BTF_KIND_STRUCT:
2005	case BTF_KIND_UNION:
2006		break;
2007	case BTF_KIND_PTR:
2008		if (follow_pointers) {
2009			err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2010			if (err)
2011				return err;
2012		}
2013		break;
2014	case BTF_KIND_CONST:
2015	case BTF_KIND_RESTRICT:
2016	case BTF_KIND_VOLATILE:
2017	case BTF_KIND_TYPEDEF:
2018		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2019		if (err)
2020			return err;
2021		break;
2022	case BTF_KIND_ARRAY:
2023		array = btf_array(btf_type);
2024
2025		/* mark array type */
2026		err = btfgen_mark_type(info, array->type, follow_pointers);
2027		/* mark array's index type */
2028		err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
2029		if (err)
2030			return err;
2031		break;
2032	case BTF_KIND_FUNC_PROTO:
2033		/* mark ret type */
2034		err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2035		if (err)
2036			return err;
2037
2038		/* mark parameters types */
2039		param = btf_params(btf_type);
2040		for (i = 0; i < btf_vlen(btf_type); i++) {
2041			err = btfgen_mark_type(info, param->type, follow_pointers);
2042			if (err)
2043				return err;
2044			param++;
2045		}
2046		break;
2047	/* tells if some other type needs to be handled */
2048	default:
2049		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
2050		return -EINVAL;
2051	}
2052
2053	return 0;
2054}
2055
2056static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2057{
2058	struct btf *btf = info->src_btf;
2059	const struct btf_type *btf_type;
2060	struct btf_member *btf_member;
2061	struct btf_array *array;
2062	unsigned int type_id = targ_spec->root_type_id;
2063	int idx, err;
2064
2065	/* mark root type */
2066	btf_type = btf__type_by_id(btf, type_id);
2067	err = btfgen_mark_type(info, type_id, false);
2068	if (err)
2069		return err;
2070
2071	/* mark types for complex types (arrays, unions, structures) */
2072	for (int i = 1; i < targ_spec->raw_len; i++) {
2073		/* skip typedefs and mods */
2074		while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
2075			type_id = btf_type->type;
2076			btf_type = btf__type_by_id(btf, type_id);
2077		}
2078
2079		switch (btf_kind(btf_type)) {
2080		case BTF_KIND_STRUCT:
2081		case BTF_KIND_UNION:
2082			idx = targ_spec->raw_spec[i];
2083			btf_member = btf_members(btf_type) + idx;
2084
2085			/* mark member */
2086			btfgen_mark_member(info, type_id, idx);
2087
2088			/* mark member's type */
2089			type_id = btf_member->type;
2090			btf_type = btf__type_by_id(btf, type_id);
2091			err = btfgen_mark_type(info, type_id, false);
2092			if (err)
2093				return err;
2094			break;
2095		case BTF_KIND_ARRAY:
2096			array = btf_array(btf_type);
2097			type_id = array->type;
2098			btf_type = btf__type_by_id(btf, type_id);
2099			break;
2100		default:
2101			p_err("unsupported kind: %s (%d)",
2102			      btf_kind_str(btf_type), btf_type->type);
2103			return -EINVAL;
2104		}
2105	}
2106
2107	return 0;
2108}
2109
2110/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2111 * this function does not rely on the target spec for inferring members, but
2112 * uses the associated BTF.
2113 *
2114 * The `behind_ptr` argument is used to stop marking of composite types reached
2115 * through a pointer. This way, we can keep BTF size in check while providing
2116 * reasonable match semantics.
2117 */
2118static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
2119{
2120	const struct btf_type *btf_type;
2121	struct btf *btf = info->src_btf;
2122	struct btf_type *cloned_type;
2123	int i, err;
2124
2125	if (type_id == 0)
2126		return 0;
2127
2128	btf_type = btf__type_by_id(btf, type_id);
2129	/* mark type on cloned BTF as used */
2130	cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
2131	cloned_type->name_off = MARKED;
2132
2133	switch (btf_kind(btf_type)) {
2134	case BTF_KIND_UNKN:
2135	case BTF_KIND_INT:
2136	case BTF_KIND_FLOAT:
2137	case BTF_KIND_ENUM:
2138	case BTF_KIND_ENUM64:
2139		break;
2140	case BTF_KIND_STRUCT:
2141	case BTF_KIND_UNION: {
2142		struct btf_member *m = btf_members(btf_type);
2143		__u16 vlen = btf_vlen(btf_type);
2144
2145		if (behind_ptr)
2146			break;
2147
2148		for (i = 0; i < vlen; i++, m++) {
2149			/* mark member */
2150			btfgen_mark_member(info, type_id, i);
2151
2152			/* mark member's type */
2153			err = btfgen_mark_type_match(info, m->type, false);
2154			if (err)
2155				return err;
2156		}
2157		break;
2158	}
2159	case BTF_KIND_CONST:
2160	case BTF_KIND_FWD:
2161	case BTF_KIND_RESTRICT:
2162	case BTF_KIND_TYPEDEF:
2163	case BTF_KIND_VOLATILE:
2164		return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
2165	case BTF_KIND_PTR:
2166		return btfgen_mark_type_match(info, btf_type->type, true);
2167	case BTF_KIND_ARRAY: {
2168		struct btf_array *array;
2169
2170		array = btf_array(btf_type);
2171		/* mark array type */
2172		err = btfgen_mark_type_match(info, array->type, false);
2173		/* mark array's index type */
2174		err = err ? : btfgen_mark_type_match(info, array->index_type, false);
2175		if (err)
2176			return err;
2177		break;
2178	}
2179	case BTF_KIND_FUNC_PROTO: {
2180		__u16 vlen = btf_vlen(btf_type);
2181		struct btf_param *param;
2182
2183		/* mark ret type */
2184		err = btfgen_mark_type_match(info, btf_type->type, false);
2185		if (err)
2186			return err;
2187
2188		/* mark parameters types */
2189		param = btf_params(btf_type);
2190		for (i = 0; i < vlen; i++) {
2191			err = btfgen_mark_type_match(info, param->type, false);
2192			if (err)
2193				return err;
2194			param++;
2195		}
2196		break;
2197	}
2198	/* tells if some other type needs to be handled */
2199	default:
2200		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
2201		return -EINVAL;
2202	}
2203
2204	return 0;
2205}
2206
2207/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2208 * this function does not rely on the target spec for inferring members, but
2209 * uses the associated BTF.
2210 */
2211static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2212{
2213	return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
2214}
2215
2216static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2217{
2218	return btfgen_mark_type(info, targ_spec->root_type_id, true);
2219}
2220
2221static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2222{
2223	return btfgen_mark_type(info, targ_spec->root_type_id, false);
2224}
2225
2226static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
2227{
2228	switch (res->relo_kind) {
2229	case BPF_CORE_FIELD_BYTE_OFFSET:
2230	case BPF_CORE_FIELD_BYTE_SIZE:
2231	case BPF_CORE_FIELD_EXISTS:
2232	case BPF_CORE_FIELD_SIGNED:
2233	case BPF_CORE_FIELD_LSHIFT_U64:
2234	case BPF_CORE_FIELD_RSHIFT_U64:
2235		return btfgen_record_field_relo(info, res);
2236	case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
2237		return 0;
2238	case BPF_CORE_TYPE_ID_TARGET:
2239	case BPF_CORE_TYPE_EXISTS:
2240	case BPF_CORE_TYPE_SIZE:
2241		return btfgen_record_type_relo(info, res);
2242	case BPF_CORE_TYPE_MATCHES:
2243		return btfgen_record_type_match_relo(info, res);
2244	case BPF_CORE_ENUMVAL_EXISTS:
2245	case BPF_CORE_ENUMVAL_VALUE:
2246		return btfgen_record_enumval_relo(info, res);
2247	default:
2248		return -EINVAL;
2249	}
2250}
2251
2252static struct bpf_core_cand_list *
2253btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
2254{
2255	const struct btf_type *local_type;
2256	struct bpf_core_cand_list *cands = NULL;
2257	struct bpf_core_cand local_cand = {};
2258	size_t local_essent_len;
2259	const char *local_name;
2260	int err;
2261
2262	local_cand.btf = local_btf;
2263	local_cand.id = local_id;
2264
2265	local_type = btf__type_by_id(local_btf, local_id);
2266	if (!local_type) {
2267		err = -EINVAL;
2268		goto err_out;
2269	}
2270
2271	local_name = btf__name_by_offset(local_btf, local_type->name_off);
2272	if (!local_name) {
2273		err = -EINVAL;
2274		goto err_out;
2275	}
2276	local_essent_len = bpf_core_essential_name_len(local_name);
2277
2278	cands = calloc(1, sizeof(*cands));
2279	if (!cands)
2280		return NULL;
2281
2282	err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
2283	if (err)
2284		goto err_out;
2285
2286	return cands;
2287
2288err_out:
2289	bpf_core_free_cands(cands);
2290	errno = -err;
2291	return NULL;
2292}
2293
2294/* Record relocation information for a single BPF object */
2295static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
2296{
2297	const struct btf_ext_info_sec *sec;
2298	const struct bpf_core_relo *relo;
2299	const struct btf_ext_info *seg;
2300	struct hashmap_entry *entry;
2301	struct hashmap *cand_cache = NULL;
2302	struct btf_ext *btf_ext = NULL;
2303	unsigned int relo_idx;
2304	struct btf *btf = NULL;
2305	size_t i;
2306	int err;
2307
2308	btf = btf__parse(obj_path, &btf_ext);
2309	if (!btf) {
2310		err = -errno;
2311		p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
2312		return err;
2313	}
2314
2315	if (!btf_ext) {
2316		p_err("failed to parse BPF object '%s': section %s not found",
2317		      obj_path, BTF_EXT_ELF_SEC);
2318		err = -EINVAL;
2319		goto out;
2320	}
2321
2322	if (btf_ext->core_relo_info.len == 0) {
2323		err = 0;
2324		goto out;
2325	}
2326
2327	cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
2328	if (IS_ERR(cand_cache)) {
2329		err = PTR_ERR(cand_cache);
2330		goto out;
2331	}
2332
2333	seg = &btf_ext->core_relo_info;
2334	for_each_btf_ext_sec(seg, sec) {
2335		for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
2336			struct bpf_core_spec specs_scratch[3] = {};
2337			struct bpf_core_relo_res targ_res = {};
2338			struct bpf_core_cand_list *cands = NULL;
2339			const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
2340
2341			if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
2342			    !hashmap__find(cand_cache, relo->type_id, &cands)) {
2343				cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
2344				if (!cands) {
2345					err = -errno;
2346					goto out;
2347				}
2348
2349				err = hashmap__set(cand_cache, relo->type_id, cands,
2350						   NULL, NULL);
2351				if (err)
2352					goto out;
2353			}
2354
2355			err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
2356						      specs_scratch, &targ_res);
2357			if (err)
2358				goto out;
2359
2360			/* specs_scratch[2] is the target spec */
2361			err = btfgen_record_reloc(info, &specs_scratch[2]);
2362			if (err)
2363				goto out;
2364		}
2365	}
2366
2367out:
2368	btf__free(btf);
2369	btf_ext__free(btf_ext);
2370
2371	if (!IS_ERR_OR_NULL(cand_cache)) {
2372		hashmap__for_each_entry(cand_cache, entry, i) {
2373			bpf_core_free_cands(entry->pvalue);
2374		}
2375		hashmap__free(cand_cache);
2376	}
2377
2378	return err;
2379}
2380
2381static int btfgen_remap_id(__u32 *type_id, void *ctx)
2382{
2383	unsigned int *ids = ctx;
2384
2385	*type_id = ids[*type_id];
2386
2387	return 0;
2388}
2389
2390/* Generate BTF from relocation information previously recorded */
2391static struct btf *btfgen_get_btf(struct btfgen_info *info)
2392{
2393	struct btf *btf_new = NULL;
2394	unsigned int *ids = NULL;
2395	unsigned int i, n = btf__type_cnt(info->marked_btf);
2396	int err = 0;
2397
2398	btf_new = btf__new_empty();
2399	if (!btf_new) {
2400		err = -errno;
2401		goto err_out;
2402	}
2403
2404	ids = calloc(n, sizeof(*ids));
2405	if (!ids) {
2406		err = -errno;
2407		goto err_out;
2408	}
2409
2410	/* first pass: add all marked types to btf_new and add their new ids to the ids map */
2411	for (i = 1; i < n; i++) {
2412		const struct btf_type *cloned_type, *type;
2413		const char *name;
2414		int new_id;
2415
2416		cloned_type = btf__type_by_id(info->marked_btf, i);
2417
2418		if (cloned_type->name_off != MARKED)
2419			continue;
2420
2421		type = btf__type_by_id(info->src_btf, i);
2422
2423		/* add members for struct and union */
2424		if (btf_is_composite(type)) {
2425			struct btf_member *cloned_m, *m;
2426			unsigned short vlen;
2427			int idx_src;
2428
2429			name = btf__str_by_offset(info->src_btf, type->name_off);
2430
2431			if (btf_is_struct(type))
2432				err = btf__add_struct(btf_new, name, type->size);
2433			else
2434				err = btf__add_union(btf_new, name, type->size);
2435
2436			if (err < 0)
2437				goto err_out;
2438			new_id = err;
2439
2440			cloned_m = btf_members(cloned_type);
2441			m = btf_members(type);
2442			vlen = btf_vlen(cloned_type);
2443			for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
2444				/* add only members that are marked as used */
2445				if (cloned_m->name_off != MARKED)
2446					continue;
2447
2448				name = btf__str_by_offset(info->src_btf, m->name_off);
2449				err = btf__add_field(btf_new, name, m->type,
2450						     btf_member_bit_offset(cloned_type, idx_src),
2451						     btf_member_bitfield_size(cloned_type, idx_src));
2452				if (err < 0)
2453					goto err_out;
2454			}
2455		} else {
2456			err = btf__add_type(btf_new, info->src_btf, type);
2457			if (err < 0)
2458				goto err_out;
2459			new_id = err;
2460		}
2461
2462		/* add ID mapping */
2463		ids[i] = new_id;
2464	}
2465
2466	/* second pass: fix up type ids */
2467	for (i = 1; i < btf__type_cnt(btf_new); i++) {
2468		struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
2469
2470		err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
2471		if (err)
2472			goto err_out;
2473	}
2474
2475	free(ids);
2476	return btf_new;
2477
2478err_out:
2479	btf__free(btf_new);
2480	free(ids);
2481	errno = -err;
2482	return NULL;
2483}
2484
2485/* Create minimized BTF file for a set of BPF objects.
2486 *
2487 * The BTFGen algorithm is divided in two main parts: (1) collect the
2488 * BTF types that are involved in relocations and (2) generate the BTF
2489 * object using the collected types.
2490 *
2491 * In order to collect the types involved in the relocations, we parse
2492 * the BTF and BTF.ext sections of the BPF objects and use
2493 * bpf_core_calc_relo_insn() to get the target specification, this
2494 * indicates how the types and fields are used in a relocation.
2495 *
2496 * Types are recorded in different ways according to the kind of the
2497 * relocation. For field-based relocations only the members that are
2498 * actually used are saved in order to reduce the size of the generated
2499 * BTF file. For type-based relocations empty struct / unions are
2500 * generated and for enum-based relocations the whole type is saved.
2501 *
2502 * The second part of the algorithm generates the BTF object. It creates
2503 * an empty BTF object and fills it with the types recorded in the
2504 * previous step. This function takes care of only adding the structure
2505 * and union members that were marked as used and it also fixes up the
2506 * type IDs on the generated BTF object.
2507 */
2508static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
2509{
2510	struct btfgen_info *info;
2511	struct btf *btf_new = NULL;
2512	int err, i;
2513
2514	info = btfgen_new_info(src_btf);
2515	if (!info) {
2516		err = -errno;
2517		p_err("failed to allocate info structure: %s", strerror(errno));
2518		goto out;
2519	}
2520
2521	for (i = 0; objspaths[i] != NULL; i++) {
2522		err = btfgen_record_obj(info, objspaths[i]);
2523		if (err) {
2524			p_err("error recording relocations for %s: %s", objspaths[i],
2525			      strerror(errno));
2526			goto out;
2527		}
2528	}
2529
2530	btf_new = btfgen_get_btf(info);
2531	if (!btf_new) {
2532		err = -errno;
2533		p_err("error generating BTF: %s", strerror(errno));
2534		goto out;
2535	}
2536
2537	err = btf_save_raw(btf_new, dst_btf);
2538	if (err) {
2539		p_err("error saving btf file: %s", strerror(errno));
2540		goto out;
2541	}
2542
2543out:
2544	btf__free(btf_new);
2545	btfgen_free_info(info);
2546
2547	return err;
2548}
2549
2550static int do_min_core_btf(int argc, char **argv)
2551{
2552	const char *input, *output, **objs;
2553	int i, err;
2554
2555	if (!REQ_ARGS(3)) {
2556		usage();
2557		return -1;
2558	}
2559
2560	input = GET_ARG();
2561	output = GET_ARG();
2562
2563	objs = (const char **) calloc(argc + 1, sizeof(*objs));
2564	if (!objs) {
2565		p_err("failed to allocate array for object names");
2566		return -ENOMEM;
2567	}
2568
2569	i = 0;
2570	while (argc)
2571		objs[i++] = GET_ARG();
2572
2573	err = minimize_btf(input, output, objs);
2574	free(objs);
2575	return err;
2576}
2577
2578static const struct cmd cmds[] = {
2579	{ "object",		do_object },
2580	{ "skeleton",		do_skeleton },
2581	{ "subskeleton",	do_subskeleton },
2582	{ "min_core_btf",	do_min_core_btf},
2583	{ "help",		do_help },
2584	{ 0 }
2585};
2586
2587int do_gen(int argc, char **argv)
2588{
2589	return cmd_select(cmds, argc, argv, do_help);
2590}