Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright (c) 2018 Facebook */
   3
   4#include <uapi/linux/btf.h>
   5#include <uapi/linux/bpf.h>
   6#include <uapi/linux/bpf_perf_event.h>
   7#include <uapi/linux/types.h>
   8#include <linux/seq_file.h>
   9#include <linux/compiler.h>
  10#include <linux/ctype.h>
  11#include <linux/errno.h>
  12#include <linux/slab.h>
  13#include <linux/anon_inodes.h>
  14#include <linux/file.h>
  15#include <linux/uaccess.h>
  16#include <linux/kernel.h>
  17#include <linux/idr.h>
  18#include <linux/sort.h>
  19#include <linux/bpf_verifier.h>
  20#include <linux/btf.h>
  21#include <linux/btf_ids.h>
  22#include <linux/skmsg.h>
  23#include <linux/perf_event.h>
  24#include <net/sock.h>
  25
  26/* BTF (BPF Type Format) is the meta data format which describes
  27 * the data types of BPF program/map.  Hence, it basically focus
  28 * on the C programming language which the modern BPF is primary
  29 * using.
  30 *
  31 * ELF Section:
  32 * ~~~~~~~~~~~
  33 * The BTF data is stored under the ".BTF" ELF section
  34 *
  35 * struct btf_type:
  36 * ~~~~~~~~~~~~~~~
  37 * Each 'struct btf_type' object describes a C data type.
  38 * Depending on the type it is describing, a 'struct btf_type'
  39 * object may be followed by more data.  F.e.
  40 * To describe an array, 'struct btf_type' is followed by
  41 * 'struct btf_array'.
  42 *
  43 * 'struct btf_type' and any extra data following it are
  44 * 4 bytes aligned.
  45 *
  46 * Type section:
  47 * ~~~~~~~~~~~~~
  48 * The BTF type section contains a list of 'struct btf_type' objects.
  49 * Each one describes a C type.  Recall from the above section
  50 * that a 'struct btf_type' object could be immediately followed by extra
  51 * data in order to desribe some particular C types.
  52 *
  53 * type_id:
  54 * ~~~~~~~
  55 * Each btf_type object is identified by a type_id.  The type_id
  56 * is implicitly implied by the location of the btf_type object in
  57 * the BTF type section.  The first one has type_id 1.  The second
  58 * one has type_id 2...etc.  Hence, an earlier btf_type has
  59 * a smaller type_id.
  60 *
  61 * A btf_type object may refer to another btf_type object by using
  62 * type_id (i.e. the "type" in the "struct btf_type").
  63 *
  64 * NOTE that we cannot assume any reference-order.
  65 * A btf_type object can refer to an earlier btf_type object
  66 * but it can also refer to a later btf_type object.
  67 *
  68 * For example, to describe "const void *".  A btf_type
  69 * object describing "const" may refer to another btf_type
  70 * object describing "void *".  This type-reference is done
  71 * by specifying type_id:
  72 *
  73 * [1] CONST (anon) type_id=2
  74 * [2] PTR (anon) type_id=0
  75 *
  76 * The above is the btf_verifier debug log:
  77 *   - Each line started with "[?]" is a btf_type object
  78 *   - [?] is the type_id of the btf_type object.
  79 *   - CONST/PTR is the BTF_KIND_XXX
  80 *   - "(anon)" is the name of the type.  It just
  81 *     happens that CONST and PTR has no name.
  82 *   - type_id=XXX is the 'u32 type' in btf_type
  83 *
  84 * NOTE: "void" has type_id 0
  85 *
  86 * String section:
  87 * ~~~~~~~~~~~~~~
  88 * The BTF string section contains the names used by the type section.
  89 * Each string is referred by an "offset" from the beginning of the
  90 * string section.
  91 *
  92 * Each string is '\0' terminated.
  93 *
  94 * The first character in the string section must be '\0'
  95 * which is used to mean 'anonymous'. Some btf_type may not
  96 * have a name.
  97 */
  98
  99/* BTF verification:
 100 *
 101 * To verify BTF data, two passes are needed.
 102 *
 103 * Pass #1
 104 * ~~~~~~~
 105 * The first pass is to collect all btf_type objects to
 106 * an array: "btf->types".
 107 *
 108 * Depending on the C type that a btf_type is describing,
 109 * a btf_type may be followed by extra data.  We don't know
 110 * how many btf_type is there, and more importantly we don't
 111 * know where each btf_type is located in the type section.
 112 *
 113 * Without knowing the location of each type_id, most verifications
 114 * cannot be done.  e.g. an earlier btf_type may refer to a later
 115 * btf_type (recall the "const void *" above), so we cannot
 116 * check this type-reference in the first pass.
 117 *
 118 * In the first pass, it still does some verifications (e.g.
 119 * checking the name is a valid offset to the string section).
 120 *
 121 * Pass #2
 122 * ~~~~~~~
 123 * The main focus is to resolve a btf_type that is referring
 124 * to another type.
 125 *
 126 * We have to ensure the referring type:
 127 * 1) does exist in the BTF (i.e. in btf->types[])
 128 * 2) does not cause a loop:
 129 *	struct A {
 130 *		struct B b;
 131 *	};
 132 *
 133 *	struct B {
 134 *		struct A a;
 135 *	};
 136 *
 137 * btf_type_needs_resolve() decides if a btf_type needs
 138 * to be resolved.
 139 *
 140 * The needs_resolve type implements the "resolve()" ops which
 141 * essentially does a DFS and detects backedge.
 142 *
 143 * During resolve (or DFS), different C types have different
 144 * "RESOLVED" conditions.
 145 *
 146 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
 147 * members because a member is always referring to another
 148 * type.  A struct's member can be treated as "RESOLVED" if
 149 * it is referring to a BTF_KIND_PTR.  Otherwise, the
 150 * following valid C struct would be rejected:
 151 *
 152 *	struct A {
 153 *		int m;
 154 *		struct A *a;
 155 *	};
 156 *
 157 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
 158 * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
 159 * detect a pointer loop, e.g.:
 160 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
 161 *                        ^                                         |
 162 *                        +-----------------------------------------+
 163 *
 164 */
 165
 166#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
 167#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
 168#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
 169#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
 170#define BITS_ROUNDUP_BYTES(bits) \
 171	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
 172
 173#define BTF_INFO_MASK 0x8f00ffff
 174#define BTF_INT_MASK 0x0fffffff
 175#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
 176#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
 177
 178/* 16MB for 64k structs and each has 16 members and
 179 * a few MB spaces for the string section.
 180 * The hard limit is S32_MAX.
 181 */
 182#define BTF_MAX_SIZE (16 * 1024 * 1024)
 183
 184#define for_each_member_from(i, from, struct_type, member)		\
 185	for (i = from, member = btf_type_member(struct_type) + from;	\
 186	     i < btf_type_vlen(struct_type);				\
 187	     i++, member++)
 188
 189#define for_each_vsi(i, struct_type, member)			\
 190	for (i = 0, member = btf_type_var_secinfo(struct_type);	\
 191	     i < btf_type_vlen(struct_type);			\
 192	     i++, member++)
 193
 194#define for_each_vsi_from(i, from, struct_type, member)				\
 195	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
 196	     i < btf_type_vlen(struct_type);					\
 197	     i++, member++)
 198
 199DEFINE_IDR(btf_idr);
 200DEFINE_SPINLOCK(btf_idr_lock);
 201
 202struct btf {
 203	void *data;
 204	struct btf_type **types;
 205	u32 *resolved_ids;
 206	u32 *resolved_sizes;
 207	const char *strings;
 208	void *nohdr_data;
 209	struct btf_header hdr;
 210	u32 nr_types;
 211	u32 types_size;
 212	u32 data_size;
 213	refcount_t refcnt;
 214	u32 id;
 215	struct rcu_head rcu;
 216};
 217
 218enum verifier_phase {
 219	CHECK_META,
 220	CHECK_TYPE,
 221};
 222
 223struct resolve_vertex {
 224	const struct btf_type *t;
 225	u32 type_id;
 226	u16 next_member;
 227};
 228
 229enum visit_state {
 230	NOT_VISITED,
 231	VISITED,
 232	RESOLVED,
 233};
 234
 235enum resolve_mode {
 236	RESOLVE_TBD,	/* To Be Determined */
 237	RESOLVE_PTR,	/* Resolving for Pointer */
 238	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
 239					 * or array
 240					 */
 241};
 242
 243#define MAX_RESOLVE_DEPTH 32
 244
 245struct btf_sec_info {
 246	u32 off;
 247	u32 len;
 248};
 249
 250struct btf_verifier_env {
 251	struct btf *btf;
 252	u8 *visit_states;
 253	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
 254	struct bpf_verifier_log log;
 255	u32 log_type_id;
 256	u32 top_stack;
 257	enum verifier_phase phase;
 258	enum resolve_mode resolve_mode;
 259};
 260
 261static const char * const btf_kind_str[NR_BTF_KINDS] = {
 262	[BTF_KIND_UNKN]		= "UNKNOWN",
 263	[BTF_KIND_INT]		= "INT",
 264	[BTF_KIND_PTR]		= "PTR",
 265	[BTF_KIND_ARRAY]	= "ARRAY",
 266	[BTF_KIND_STRUCT]	= "STRUCT",
 267	[BTF_KIND_UNION]	= "UNION",
 268	[BTF_KIND_ENUM]		= "ENUM",
 269	[BTF_KIND_FWD]		= "FWD",
 270	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
 271	[BTF_KIND_VOLATILE]	= "VOLATILE",
 272	[BTF_KIND_CONST]	= "CONST",
 273	[BTF_KIND_RESTRICT]	= "RESTRICT",
 274	[BTF_KIND_FUNC]		= "FUNC",
 275	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
 276	[BTF_KIND_VAR]		= "VAR",
 277	[BTF_KIND_DATASEC]	= "DATASEC",
 278};
 279
 280static const char *btf_type_str(const struct btf_type *t)
 281{
 282	return btf_kind_str[BTF_INFO_KIND(t->info)];
 283}
 284
 285struct btf_kind_operations {
 286	s32 (*check_meta)(struct btf_verifier_env *env,
 287			  const struct btf_type *t,
 288			  u32 meta_left);
 289	int (*resolve)(struct btf_verifier_env *env,
 290		       const struct resolve_vertex *v);
 291	int (*check_member)(struct btf_verifier_env *env,
 292			    const struct btf_type *struct_type,
 293			    const struct btf_member *member,
 294			    const struct btf_type *member_type);
 295	int (*check_kflag_member)(struct btf_verifier_env *env,
 296				  const struct btf_type *struct_type,
 297				  const struct btf_member *member,
 298				  const struct btf_type *member_type);
 299	void (*log_details)(struct btf_verifier_env *env,
 300			    const struct btf_type *t);
 301	void (*seq_show)(const struct btf *btf, const struct btf_type *t,
 302			 u32 type_id, void *data, u8 bits_offsets,
 303			 struct seq_file *m);
 304};
 305
 306static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
 307static struct btf_type btf_void;
 308
 309static int btf_resolve(struct btf_verifier_env *env,
 310		       const struct btf_type *t, u32 type_id);
 311
 312static bool btf_type_is_modifier(const struct btf_type *t)
 313{
 314	/* Some of them is not strictly a C modifier
 315	 * but they are grouped into the same bucket
 316	 * for BTF concern:
 317	 *   A type (t) that refers to another
 318	 *   type through t->type AND its size cannot
 319	 *   be determined without following the t->type.
 320	 *
 321	 * ptr does not fall into this bucket
 322	 * because its size is always sizeof(void *).
 323	 */
 324	switch (BTF_INFO_KIND(t->info)) {
 325	case BTF_KIND_TYPEDEF:
 326	case BTF_KIND_VOLATILE:
 327	case BTF_KIND_CONST:
 328	case BTF_KIND_RESTRICT:
 329		return true;
 330	}
 331
 332	return false;
 333}
 334
 335bool btf_type_is_void(const struct btf_type *t)
 336{
 337	return t == &btf_void;
 338}
 339
 340static bool btf_type_is_fwd(const struct btf_type *t)
 341{
 342	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
 343}
 344
 345static bool btf_type_nosize(const struct btf_type *t)
 346{
 347	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
 348	       btf_type_is_func(t) || btf_type_is_func_proto(t);
 349}
 350
 351static bool btf_type_nosize_or_null(const struct btf_type *t)
 352{
 353	return !t || btf_type_nosize(t);
 354}
 355
 356/* union is only a special case of struct:
 357 * all its offsetof(member) == 0
 358 */
 359static bool btf_type_is_struct(const struct btf_type *t)
 360{
 361	u8 kind = BTF_INFO_KIND(t->info);
 362
 363	return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
 364}
 365
 366static bool __btf_type_is_struct(const struct btf_type *t)
 367{
 368	return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
 369}
 370
 371static bool btf_type_is_array(const struct btf_type *t)
 372{
 373	return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
 374}
 375
 376static bool btf_type_is_var(const struct btf_type *t)
 377{
 378	return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
 379}
 380
 381static bool btf_type_is_datasec(const struct btf_type *t)
 382{
 383	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
 384}
 385
 386s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
 387{
 388	const struct btf_type *t;
 389	const char *tname;
 390	u32 i;
 391
 392	for (i = 1; i <= btf->nr_types; i++) {
 393		t = btf->types[i];
 394		if (BTF_INFO_KIND(t->info) != kind)
 395			continue;
 396
 397		tname = btf_name_by_offset(btf, t->name_off);
 398		if (!strcmp(tname, name))
 399			return i;
 400	}
 401
 402	return -ENOENT;
 403}
 404
 405const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
 406					       u32 id, u32 *res_id)
 407{
 408	const struct btf_type *t = btf_type_by_id(btf, id);
 409
 410	while (btf_type_is_modifier(t)) {
 411		id = t->type;
 412		t = btf_type_by_id(btf, t->type);
 413	}
 414
 415	if (res_id)
 416		*res_id = id;
 417
 418	return t;
 419}
 420
 421const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
 422					    u32 id, u32 *res_id)
 423{
 424	const struct btf_type *t;
 425
 426	t = btf_type_skip_modifiers(btf, id, NULL);
 427	if (!btf_type_is_ptr(t))
 428		return NULL;
 429
 430	return btf_type_skip_modifiers(btf, t->type, res_id);
 431}
 432
 433const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
 434						 u32 id, u32 *res_id)
 435{
 436	const struct btf_type *ptype;
 437
 438	ptype = btf_type_resolve_ptr(btf, id, res_id);
 439	if (ptype && btf_type_is_func_proto(ptype))
 440		return ptype;
 441
 442	return NULL;
 443}
 444
 445/* Types that act only as a source, not sink or intermediate
 446 * type when resolving.
 447 */
 448static bool btf_type_is_resolve_source_only(const struct btf_type *t)
 449{
 450	return btf_type_is_var(t) ||
 451	       btf_type_is_datasec(t);
 452}
 453
 454/* What types need to be resolved?
 455 *
 456 * btf_type_is_modifier() is an obvious one.
 457 *
 458 * btf_type_is_struct() because its member refers to
 459 * another type (through member->type).
 460 *
 461 * btf_type_is_var() because the variable refers to
 462 * another type. btf_type_is_datasec() holds multiple
 463 * btf_type_is_var() types that need resolving.
 464 *
 465 * btf_type_is_array() because its element (array->type)
 466 * refers to another type.  Array can be thought of a
 467 * special case of struct while array just has the same
 468 * member-type repeated by array->nelems of times.
 469 */
 470static bool btf_type_needs_resolve(const struct btf_type *t)
 471{
 472	return btf_type_is_modifier(t) ||
 473	       btf_type_is_ptr(t) ||
 474	       btf_type_is_struct(t) ||
 475	       btf_type_is_array(t) ||
 476	       btf_type_is_var(t) ||
 477	       btf_type_is_datasec(t);
 478}
 479
 480/* t->size can be used */
 481static bool btf_type_has_size(const struct btf_type *t)
 482{
 483	switch (BTF_INFO_KIND(t->info)) {
 484	case BTF_KIND_INT:
 485	case BTF_KIND_STRUCT:
 486	case BTF_KIND_UNION:
 487	case BTF_KIND_ENUM:
 488	case BTF_KIND_DATASEC:
 489		return true;
 490	}
 491
 492	return false;
 493}
 494
 495static const char *btf_int_encoding_str(u8 encoding)
 496{
 497	if (encoding == 0)
 498		return "(none)";
 499	else if (encoding == BTF_INT_SIGNED)
 500		return "SIGNED";
 501	else if (encoding == BTF_INT_CHAR)
 502		return "CHAR";
 503	else if (encoding == BTF_INT_BOOL)
 504		return "BOOL";
 505	else
 506		return "UNKN";
 507}
 508
 509static u32 btf_type_int(const struct btf_type *t)
 510{
 511	return *(u32 *)(t + 1);
 512}
 513
 514static const struct btf_array *btf_type_array(const struct btf_type *t)
 515{
 516	return (const struct btf_array *)(t + 1);
 517}
 518
 519static const struct btf_enum *btf_type_enum(const struct btf_type *t)
 520{
 521	return (const struct btf_enum *)(t + 1);
 522}
 523
 524static const struct btf_var *btf_type_var(const struct btf_type *t)
 525{
 526	return (const struct btf_var *)(t + 1);
 527}
 528
 529static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
 530{
 531	return (const struct btf_var_secinfo *)(t + 1);
 532}
 533
 534static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
 535{
 536	return kind_ops[BTF_INFO_KIND(t->info)];
 537}
 538
 539static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
 540{
 541	return BTF_STR_OFFSET_VALID(offset) &&
 542		offset < btf->hdr.str_len;
 543}
 544
 545static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
 546{
 547	if ((first ? !isalpha(c) :
 548		     !isalnum(c)) &&
 549	    c != '_' &&
 550	    ((c == '.' && !dot_ok) ||
 551	      c != '.'))
 552		return false;
 553	return true;
 554}
 555
 556static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
 557{
 558	/* offset must be valid */
 559	const char *src = &btf->strings[offset];
 560	const char *src_limit;
 561
 562	if (!__btf_name_char_ok(*src, true, dot_ok))
 563		return false;
 564
 565	/* set a limit on identifier length */
 566	src_limit = src + KSYM_NAME_LEN;
 567	src++;
 568	while (*src && src < src_limit) {
 569		if (!__btf_name_char_ok(*src, false, dot_ok))
 570			return false;
 571		src++;
 572	}
 573
 574	return !*src;
 575}
 576
 577/* Only C-style identifier is permitted. This can be relaxed if
 578 * necessary.
 579 */
 580static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
 581{
 582	return __btf_name_valid(btf, offset, false);
 583}
 584
 585static bool btf_name_valid_section(const struct btf *btf, u32 offset)
 586{
 587	return __btf_name_valid(btf, offset, true);
 588}
 589
 590static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
 591{
 592	if (!offset)
 593		return "(anon)";
 594	else if (offset < btf->hdr.str_len)
 595		return &btf->strings[offset];
 596	else
 597		return "(invalid-name-offset)";
 598}
 599
 600const char *btf_name_by_offset(const struct btf *btf, u32 offset)
 601{
 602	if (offset < btf->hdr.str_len)
 603		return &btf->strings[offset];
 604
 605	return NULL;
 606}
 607
 608const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
 609{
 610	if (type_id > btf->nr_types)
 611		return NULL;
 612
 613	return btf->types[type_id];
 614}
 615
 616/*
 617 * Regular int is not a bit field and it must be either
 618 * u8/u16/u32/u64 or __int128.
 619 */
 620static bool btf_type_int_is_regular(const struct btf_type *t)
 621{
 622	u8 nr_bits, nr_bytes;
 623	u32 int_data;
 624
 625	int_data = btf_type_int(t);
 626	nr_bits = BTF_INT_BITS(int_data);
 627	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
 628	if (BITS_PER_BYTE_MASKED(nr_bits) ||
 629	    BTF_INT_OFFSET(int_data) ||
 630	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
 631	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
 632	     nr_bytes != (2 * sizeof(u64)))) {
 633		return false;
 634	}
 635
 636	return true;
 637}
 638
 639/*
 640 * Check that given struct member is a regular int with expected
 641 * offset and size.
 642 */
 643bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
 644			   const struct btf_member *m,
 645			   u32 expected_offset, u32 expected_size)
 646{
 647	const struct btf_type *t;
 648	u32 id, int_data;
 649	u8 nr_bits;
 650
 651	id = m->type;
 652	t = btf_type_id_size(btf, &id, NULL);
 653	if (!t || !btf_type_is_int(t))
 654		return false;
 655
 656	int_data = btf_type_int(t);
 657	nr_bits = BTF_INT_BITS(int_data);
 658	if (btf_type_kflag(s)) {
 659		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
 660		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
 661
 662		/* if kflag set, int should be a regular int and
 663		 * bit offset should be at byte boundary.
 664		 */
 665		return !bitfield_size &&
 666		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
 667		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
 668	}
 669
 670	if (BTF_INT_OFFSET(int_data) ||
 671	    BITS_PER_BYTE_MASKED(m->offset) ||
 672	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
 673	    BITS_PER_BYTE_MASKED(nr_bits) ||
 674	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
 675		return false;
 676
 677	return true;
 678}
 679
 680__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
 681					      const char *fmt, ...)
 682{
 683	va_list args;
 684
 685	va_start(args, fmt);
 686	bpf_verifier_vlog(log, fmt, args);
 687	va_end(args);
 688}
 689
 690__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
 691					    const char *fmt, ...)
 692{
 693	struct bpf_verifier_log *log = &env->log;
 694	va_list args;
 695
 696	if (!bpf_verifier_log_needed(log))
 697		return;
 698
 699	va_start(args, fmt);
 700	bpf_verifier_vlog(log, fmt, args);
 701	va_end(args);
 702}
 703
 704__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
 705						   const struct btf_type *t,
 706						   bool log_details,
 707						   const char *fmt, ...)
 708{
 709	struct bpf_verifier_log *log = &env->log;
 710	u8 kind = BTF_INFO_KIND(t->info);
 711	struct btf *btf = env->btf;
 712	va_list args;
 713
 714	if (!bpf_verifier_log_needed(log))
 715		return;
 716
 717	/* btf verifier prints all types it is processing via
 718	 * btf_verifier_log_type(..., fmt = NULL).
 719	 * Skip those prints for in-kernel BTF verification.
 720	 */
 721	if (log->level == BPF_LOG_KERNEL && !fmt)
 722		return;
 723
 724	__btf_verifier_log(log, "[%u] %s %s%s",
 725			   env->log_type_id,
 726			   btf_kind_str[kind],
 727			   __btf_name_by_offset(btf, t->name_off),
 728			   log_details ? " " : "");
 729
 730	if (log_details)
 731		btf_type_ops(t)->log_details(env, t);
 732
 733	if (fmt && *fmt) {
 734		__btf_verifier_log(log, " ");
 735		va_start(args, fmt);
 736		bpf_verifier_vlog(log, fmt, args);
 737		va_end(args);
 738	}
 739
 740	__btf_verifier_log(log, "\n");
 741}
 742
 743#define btf_verifier_log_type(env, t, ...) \
 744	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
 745#define btf_verifier_log_basic(env, t, ...) \
 746	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
 747
 748__printf(4, 5)
 749static void btf_verifier_log_member(struct btf_verifier_env *env,
 750				    const struct btf_type *struct_type,
 751				    const struct btf_member *member,
 752				    const char *fmt, ...)
 753{
 754	struct bpf_verifier_log *log = &env->log;
 755	struct btf *btf = env->btf;
 756	va_list args;
 757
 758	if (!bpf_verifier_log_needed(log))
 759		return;
 760
 761	if (log->level == BPF_LOG_KERNEL && !fmt)
 762		return;
 763	/* The CHECK_META phase already did a btf dump.
 764	 *
 765	 * If member is logged again, it must hit an error in
 766	 * parsing this member.  It is useful to print out which
 767	 * struct this member belongs to.
 768	 */
 769	if (env->phase != CHECK_META)
 770		btf_verifier_log_type(env, struct_type, NULL);
 771
 772	if (btf_type_kflag(struct_type))
 773		__btf_verifier_log(log,
 774				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
 775				   __btf_name_by_offset(btf, member->name_off),
 776				   member->type,
 777				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
 778				   BTF_MEMBER_BIT_OFFSET(member->offset));
 779	else
 780		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
 781				   __btf_name_by_offset(btf, member->name_off),
 782				   member->type, member->offset);
 783
 784	if (fmt && *fmt) {
 785		__btf_verifier_log(log, " ");
 786		va_start(args, fmt);
 787		bpf_verifier_vlog(log, fmt, args);
 788		va_end(args);
 789	}
 790
 791	__btf_verifier_log(log, "\n");
 792}
 793
 794__printf(4, 5)
 795static void btf_verifier_log_vsi(struct btf_verifier_env *env,
 796				 const struct btf_type *datasec_type,
 797				 const struct btf_var_secinfo *vsi,
 798				 const char *fmt, ...)
 799{
 800	struct bpf_verifier_log *log = &env->log;
 801	va_list args;
 802
 803	if (!bpf_verifier_log_needed(log))
 804		return;
 805	if (log->level == BPF_LOG_KERNEL && !fmt)
 806		return;
 807	if (env->phase != CHECK_META)
 808		btf_verifier_log_type(env, datasec_type, NULL);
 809
 810	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
 811			   vsi->type, vsi->offset, vsi->size);
 812	if (fmt && *fmt) {
 813		__btf_verifier_log(log, " ");
 814		va_start(args, fmt);
 815		bpf_verifier_vlog(log, fmt, args);
 816		va_end(args);
 817	}
 818
 819	__btf_verifier_log(log, "\n");
 820}
 821
 822static void btf_verifier_log_hdr(struct btf_verifier_env *env,
 823				 u32 btf_data_size)
 824{
 825	struct bpf_verifier_log *log = &env->log;
 826	const struct btf *btf = env->btf;
 827	const struct btf_header *hdr;
 828
 829	if (!bpf_verifier_log_needed(log))
 830		return;
 831
 832	if (log->level == BPF_LOG_KERNEL)
 833		return;
 834	hdr = &btf->hdr;
 835	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
 836	__btf_verifier_log(log, "version: %u\n", hdr->version);
 837	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
 838	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
 839	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
 840	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
 841	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
 842	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
 843	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
 844}
 845
 846static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
 847{
 848	struct btf *btf = env->btf;
 849
 850	/* < 2 because +1 for btf_void which is always in btf->types[0].
 851	 * btf_void is not accounted in btf->nr_types because btf_void
 852	 * does not come from the BTF file.
 853	 */
 854	if (btf->types_size - btf->nr_types < 2) {
 855		/* Expand 'types' array */
 856
 857		struct btf_type **new_types;
 858		u32 expand_by, new_size;
 859
 860		if (btf->types_size == BTF_MAX_TYPE) {
 861			btf_verifier_log(env, "Exceeded max num of types");
 862			return -E2BIG;
 863		}
 864
 865		expand_by = max_t(u32, btf->types_size >> 2, 16);
 866		new_size = min_t(u32, BTF_MAX_TYPE,
 867				 btf->types_size + expand_by);
 868
 869		new_types = kvcalloc(new_size, sizeof(*new_types),
 870				     GFP_KERNEL | __GFP_NOWARN);
 871		if (!new_types)
 872			return -ENOMEM;
 873
 874		if (btf->nr_types == 0)
 875			new_types[0] = &btf_void;
 876		else
 877			memcpy(new_types, btf->types,
 878			       sizeof(*btf->types) * (btf->nr_types + 1));
 879
 880		kvfree(btf->types);
 881		btf->types = new_types;
 882		btf->types_size = new_size;
 883	}
 884
 885	btf->types[++(btf->nr_types)] = t;
 886
 887	return 0;
 888}
 889
 890static int btf_alloc_id(struct btf *btf)
 891{
 892	int id;
 893
 894	idr_preload(GFP_KERNEL);
 895	spin_lock_bh(&btf_idr_lock);
 896	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
 897	if (id > 0)
 898		btf->id = id;
 899	spin_unlock_bh(&btf_idr_lock);
 900	idr_preload_end();
 901
 902	if (WARN_ON_ONCE(!id))
 903		return -ENOSPC;
 904
 905	return id > 0 ? 0 : id;
 906}
 907
 908static void btf_free_id(struct btf *btf)
 909{
 910	unsigned long flags;
 911
 912	/*
 913	 * In map-in-map, calling map_delete_elem() on outer
 914	 * map will call bpf_map_put on the inner map.
 915	 * It will then eventually call btf_free_id()
 916	 * on the inner map.  Some of the map_delete_elem()
 917	 * implementation may have irq disabled, so
 918	 * we need to use the _irqsave() version instead
 919	 * of the _bh() version.
 920	 */
 921	spin_lock_irqsave(&btf_idr_lock, flags);
 922	idr_remove(&btf_idr, btf->id);
 923	spin_unlock_irqrestore(&btf_idr_lock, flags);
 924}
 925
 926static void btf_free(struct btf *btf)
 927{
 928	kvfree(btf->types);
 929	kvfree(btf->resolved_sizes);
 930	kvfree(btf->resolved_ids);
 931	kvfree(btf->data);
 932	kfree(btf);
 933}
 934
 935static void btf_free_rcu(struct rcu_head *rcu)
 936{
 937	struct btf *btf = container_of(rcu, struct btf, rcu);
 938
 939	btf_free(btf);
 940}
 941
 942void btf_put(struct btf *btf)
 943{
 944	if (btf && refcount_dec_and_test(&btf->refcnt)) {
 945		btf_free_id(btf);
 946		call_rcu(&btf->rcu, btf_free_rcu);
 947	}
 948}
 949
 950static int env_resolve_init(struct btf_verifier_env *env)
 951{
 952	struct btf *btf = env->btf;
 953	u32 nr_types = btf->nr_types;
 954	u32 *resolved_sizes = NULL;
 955	u32 *resolved_ids = NULL;
 956	u8 *visit_states = NULL;
 957
 958	/* +1 for btf_void */
 959	resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
 960				  GFP_KERNEL | __GFP_NOWARN);
 961	if (!resolved_sizes)
 962		goto nomem;
 963
 964	resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
 965				GFP_KERNEL | __GFP_NOWARN);
 966	if (!resolved_ids)
 967		goto nomem;
 968
 969	visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
 970				GFP_KERNEL | __GFP_NOWARN);
 971	if (!visit_states)
 972		goto nomem;
 973
 974	btf->resolved_sizes = resolved_sizes;
 975	btf->resolved_ids = resolved_ids;
 976	env->visit_states = visit_states;
 977
 978	return 0;
 979
 980nomem:
 981	kvfree(resolved_sizes);
 982	kvfree(resolved_ids);
 983	kvfree(visit_states);
 984	return -ENOMEM;
 985}
 986
 987static void btf_verifier_env_free(struct btf_verifier_env *env)
 988{
 989	kvfree(env->visit_states);
 990	kfree(env);
 991}
 992
 993static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
 994				     const struct btf_type *next_type)
 995{
 996	switch (env->resolve_mode) {
 997	case RESOLVE_TBD:
 998		/* int, enum or void is a sink */
 999		return !btf_type_needs_resolve(next_type);
1000	case RESOLVE_PTR:
1001		/* int, enum, void, struct, array, func or func_proto is a sink
1002		 * for ptr
1003		 */
1004		return !btf_type_is_modifier(next_type) &&
1005			!btf_type_is_ptr(next_type);
1006	case RESOLVE_STRUCT_OR_ARRAY:
1007		/* int, enum, void, ptr, func or func_proto is a sink
1008		 * for struct and array
1009		 */
1010		return !btf_type_is_modifier(next_type) &&
1011			!btf_type_is_array(next_type) &&
1012			!btf_type_is_struct(next_type);
1013	default:
1014		BUG();
1015	}
1016}
1017
1018static bool env_type_is_resolved(const struct btf_verifier_env *env,
1019				 u32 type_id)
1020{
1021	return env->visit_states[type_id] == RESOLVED;
1022}
1023
1024static int env_stack_push(struct btf_verifier_env *env,
1025			  const struct btf_type *t, u32 type_id)
1026{
1027	struct resolve_vertex *v;
1028
1029	if (env->top_stack == MAX_RESOLVE_DEPTH)
1030		return -E2BIG;
1031
1032	if (env->visit_states[type_id] != NOT_VISITED)
1033		return -EEXIST;
1034
1035	env->visit_states[type_id] = VISITED;
1036
1037	v = &env->stack[env->top_stack++];
1038	v->t = t;
1039	v->type_id = type_id;
1040	v->next_member = 0;
1041
1042	if (env->resolve_mode == RESOLVE_TBD) {
1043		if (btf_type_is_ptr(t))
1044			env->resolve_mode = RESOLVE_PTR;
1045		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1046			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1047	}
1048
1049	return 0;
1050}
1051
1052static void env_stack_set_next_member(struct btf_verifier_env *env,
1053				      u16 next_member)
1054{
1055	env->stack[env->top_stack - 1].next_member = next_member;
1056}
1057
1058static void env_stack_pop_resolved(struct btf_verifier_env *env,
1059				   u32 resolved_type_id,
1060				   u32 resolved_size)
1061{
1062	u32 type_id = env->stack[--(env->top_stack)].type_id;
1063	struct btf *btf = env->btf;
1064
1065	btf->resolved_sizes[type_id] = resolved_size;
1066	btf->resolved_ids[type_id] = resolved_type_id;
1067	env->visit_states[type_id] = RESOLVED;
1068}
1069
1070static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1071{
1072	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1073}
1074
1075/* Resolve the size of a passed-in "type"
1076 *
1077 * type: is an array (e.g. u32 array[x][y])
1078 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1079 * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1080 *             corresponds to the return type.
1081 * *elem_type: u32
1082 * *total_nelems: (x * y).  Hence, individual elem size is
1083 *                (*type_size / *total_nelems)
1084 *
1085 * type: is not an array (e.g. const struct X)
1086 * return type: type "struct X"
1087 * *type_size: sizeof(struct X)
1088 * *elem_type: same as return type ("struct X")
1089 * *total_nelems: 1
1090 */
1091const struct btf_type *
1092btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1093		 u32 *type_size, const struct btf_type **elem_type,
1094		 u32 *total_nelems)
1095{
1096	const struct btf_type *array_type = NULL;
1097	const struct btf_array *array;
1098	u32 i, size, nelems = 1;
1099
1100	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1101		switch (BTF_INFO_KIND(type->info)) {
1102		/* type->size can be used */
1103		case BTF_KIND_INT:
1104		case BTF_KIND_STRUCT:
1105		case BTF_KIND_UNION:
1106		case BTF_KIND_ENUM:
1107			size = type->size;
1108			goto resolved;
1109
1110		case BTF_KIND_PTR:
1111			size = sizeof(void *);
1112			goto resolved;
1113
1114		/* Modifiers */
1115		case BTF_KIND_TYPEDEF:
1116		case BTF_KIND_VOLATILE:
1117		case BTF_KIND_CONST:
1118		case BTF_KIND_RESTRICT:
1119			type = btf_type_by_id(btf, type->type);
1120			break;
1121
1122		case BTF_KIND_ARRAY:
1123			if (!array_type)
1124				array_type = type;
1125			array = btf_type_array(type);
1126			if (nelems && array->nelems > U32_MAX / nelems)
1127				return ERR_PTR(-EINVAL);
1128			nelems *= array->nelems;
1129			type = btf_type_by_id(btf, array->type);
1130			break;
1131
1132		/* type without size */
1133		default:
1134			return ERR_PTR(-EINVAL);
1135		}
1136	}
1137
1138	return ERR_PTR(-EINVAL);
1139
1140resolved:
1141	if (nelems && size > U32_MAX / nelems)
1142		return ERR_PTR(-EINVAL);
1143
1144	*type_size = nelems * size;
1145	if (total_nelems)
1146		*total_nelems = nelems;
1147	if (elem_type)
1148		*elem_type = type;
1149
1150	return array_type ? : type;
1151}
1152
1153/* The input param "type_id" must point to a needs_resolve type */
1154static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1155						  u32 *type_id)
1156{
1157	*type_id = btf->resolved_ids[*type_id];
1158	return btf_type_by_id(btf, *type_id);
1159}
1160
1161const struct btf_type *btf_type_id_size(const struct btf *btf,
1162					u32 *type_id, u32 *ret_size)
1163{
1164	const struct btf_type *size_type;
1165	u32 size_type_id = *type_id;
1166	u32 size = 0;
1167
1168	size_type = btf_type_by_id(btf, size_type_id);
1169	if (btf_type_nosize_or_null(size_type))
1170		return NULL;
1171
1172	if (btf_type_has_size(size_type)) {
1173		size = size_type->size;
1174	} else if (btf_type_is_array(size_type)) {
1175		size = btf->resolved_sizes[size_type_id];
1176	} else if (btf_type_is_ptr(size_type)) {
1177		size = sizeof(void *);
1178	} else {
1179		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1180				 !btf_type_is_var(size_type)))
1181			return NULL;
1182
1183		size_type_id = btf->resolved_ids[size_type_id];
1184		size_type = btf_type_by_id(btf, size_type_id);
1185		if (btf_type_nosize_or_null(size_type))
1186			return NULL;
1187		else if (btf_type_has_size(size_type))
1188			size = size_type->size;
1189		else if (btf_type_is_array(size_type))
1190			size = btf->resolved_sizes[size_type_id];
1191		else if (btf_type_is_ptr(size_type))
1192			size = sizeof(void *);
1193		else
1194			return NULL;
1195	}
1196
1197	*type_id = size_type_id;
1198	if (ret_size)
1199		*ret_size = size;
1200
1201	return size_type;
1202}
1203
1204static int btf_df_check_member(struct btf_verifier_env *env,
1205			       const struct btf_type *struct_type,
1206			       const struct btf_member *member,
1207			       const struct btf_type *member_type)
1208{
1209	btf_verifier_log_basic(env, struct_type,
1210			       "Unsupported check_member");
1211	return -EINVAL;
1212}
1213
1214static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1215				     const struct btf_type *struct_type,
1216				     const struct btf_member *member,
1217				     const struct btf_type *member_type)
1218{
1219	btf_verifier_log_basic(env, struct_type,
1220			       "Unsupported check_kflag_member");
1221	return -EINVAL;
1222}
1223
1224/* Used for ptr, array and struct/union type members.
1225 * int, enum and modifier types have their specific callback functions.
1226 */
1227static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1228					  const struct btf_type *struct_type,
1229					  const struct btf_member *member,
1230					  const struct btf_type *member_type)
1231{
1232	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1233		btf_verifier_log_member(env, struct_type, member,
1234					"Invalid member bitfield_size");
1235		return -EINVAL;
1236	}
1237
1238	/* bitfield size is 0, so member->offset represents bit offset only.
1239	 * It is safe to call non kflag check_member variants.
1240	 */
1241	return btf_type_ops(member_type)->check_member(env, struct_type,
1242						       member,
1243						       member_type);
1244}
1245
1246static int btf_df_resolve(struct btf_verifier_env *env,
1247			  const struct resolve_vertex *v)
1248{
1249	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1250	return -EINVAL;
1251}
1252
1253static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1254			    u32 type_id, void *data, u8 bits_offsets,
1255			    struct seq_file *m)
1256{
1257	seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1258}
1259
1260static int btf_int_check_member(struct btf_verifier_env *env,
1261				const struct btf_type *struct_type,
1262				const struct btf_member *member,
1263				const struct btf_type *member_type)
1264{
1265	u32 int_data = btf_type_int(member_type);
1266	u32 struct_bits_off = member->offset;
1267	u32 struct_size = struct_type->size;
1268	u32 nr_copy_bits;
1269	u32 bytes_offset;
1270
1271	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1272		btf_verifier_log_member(env, struct_type, member,
1273					"bits_offset exceeds U32_MAX");
1274		return -EINVAL;
1275	}
1276
1277	struct_bits_off += BTF_INT_OFFSET(int_data);
1278	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1279	nr_copy_bits = BTF_INT_BITS(int_data) +
1280		BITS_PER_BYTE_MASKED(struct_bits_off);
1281
1282	if (nr_copy_bits > BITS_PER_U128) {
1283		btf_verifier_log_member(env, struct_type, member,
1284					"nr_copy_bits exceeds 128");
1285		return -EINVAL;
1286	}
1287
1288	if (struct_size < bytes_offset ||
1289	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1290		btf_verifier_log_member(env, struct_type, member,
1291					"Member exceeds struct_size");
1292		return -EINVAL;
1293	}
1294
1295	return 0;
1296}
1297
1298static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1299				      const struct btf_type *struct_type,
1300				      const struct btf_member *member,
1301				      const struct btf_type *member_type)
1302{
1303	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1304	u32 int_data = btf_type_int(member_type);
1305	u32 struct_size = struct_type->size;
1306	u32 nr_copy_bits;
1307
1308	/* a regular int type is required for the kflag int member */
1309	if (!btf_type_int_is_regular(member_type)) {
1310		btf_verifier_log_member(env, struct_type, member,
1311					"Invalid member base type");
1312		return -EINVAL;
1313	}
1314
1315	/* check sanity of bitfield size */
1316	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1317	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1318	nr_int_data_bits = BTF_INT_BITS(int_data);
1319	if (!nr_bits) {
1320		/* Not a bitfield member, member offset must be at byte
1321		 * boundary.
1322		 */
1323		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1324			btf_verifier_log_member(env, struct_type, member,
1325						"Invalid member offset");
1326			return -EINVAL;
1327		}
1328
1329		nr_bits = nr_int_data_bits;
1330	} else if (nr_bits > nr_int_data_bits) {
1331		btf_verifier_log_member(env, struct_type, member,
1332					"Invalid member bitfield_size");
1333		return -EINVAL;
1334	}
1335
1336	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1337	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1338	if (nr_copy_bits > BITS_PER_U128) {
1339		btf_verifier_log_member(env, struct_type, member,
1340					"nr_copy_bits exceeds 128");
1341		return -EINVAL;
1342	}
1343
1344	if (struct_size < bytes_offset ||
1345	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1346		btf_verifier_log_member(env, struct_type, member,
1347					"Member exceeds struct_size");
1348		return -EINVAL;
1349	}
1350
1351	return 0;
1352}
1353
1354static s32 btf_int_check_meta(struct btf_verifier_env *env,
1355			      const struct btf_type *t,
1356			      u32 meta_left)
1357{
1358	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1359	u16 encoding;
1360
1361	if (meta_left < meta_needed) {
1362		btf_verifier_log_basic(env, t,
1363				       "meta_left:%u meta_needed:%u",
1364				       meta_left, meta_needed);
1365		return -EINVAL;
1366	}
1367
1368	if (btf_type_vlen(t)) {
1369		btf_verifier_log_type(env, t, "vlen != 0");
1370		return -EINVAL;
1371	}
1372
1373	if (btf_type_kflag(t)) {
1374		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1375		return -EINVAL;
1376	}
1377
1378	int_data = btf_type_int(t);
1379	if (int_data & ~BTF_INT_MASK) {
1380		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1381				       int_data);
1382		return -EINVAL;
1383	}
1384
1385	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1386
1387	if (nr_bits > BITS_PER_U128) {
1388		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1389				      BITS_PER_U128);
1390		return -EINVAL;
1391	}
1392
1393	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1394		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1395		return -EINVAL;
1396	}
1397
1398	/*
1399	 * Only one of the encoding bits is allowed and it
1400	 * should be sufficient for the pretty print purpose (i.e. decoding).
1401	 * Multiple bits can be allowed later if it is found
1402	 * to be insufficient.
1403	 */
1404	encoding = BTF_INT_ENCODING(int_data);
1405	if (encoding &&
1406	    encoding != BTF_INT_SIGNED &&
1407	    encoding != BTF_INT_CHAR &&
1408	    encoding != BTF_INT_BOOL) {
1409		btf_verifier_log_type(env, t, "Unsupported encoding");
1410		return -ENOTSUPP;
1411	}
1412
1413	btf_verifier_log_type(env, t, NULL);
1414
1415	return meta_needed;
1416}
1417
1418static void btf_int_log(struct btf_verifier_env *env,
1419			const struct btf_type *t)
1420{
1421	int int_data = btf_type_int(t);
1422
1423	btf_verifier_log(env,
1424			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1425			 t->size, BTF_INT_OFFSET(int_data),
1426			 BTF_INT_BITS(int_data),
1427			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1428}
1429
1430static void btf_int128_print(struct seq_file *m, void *data)
1431{
1432	/* data points to a __int128 number.
1433	 * Suppose
1434	 *     int128_num = *(__int128 *)data;
1435	 * The below formulas shows what upper_num and lower_num represents:
1436	 *     upper_num = int128_num >> 64;
1437	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1438	 */
1439	u64 upper_num, lower_num;
1440
1441#ifdef __BIG_ENDIAN_BITFIELD
1442	upper_num = *(u64 *)data;
1443	lower_num = *(u64 *)(data + 8);
1444#else
1445	upper_num = *(u64 *)(data + 8);
1446	lower_num = *(u64 *)data;
1447#endif
1448	if (upper_num == 0)
1449		seq_printf(m, "0x%llx", lower_num);
1450	else
1451		seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1452}
1453
1454static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1455			     u16 right_shift_bits)
1456{
1457	u64 upper_num, lower_num;
1458
1459#ifdef __BIG_ENDIAN_BITFIELD
1460	upper_num = print_num[0];
1461	lower_num = print_num[1];
1462#else
1463	upper_num = print_num[1];
1464	lower_num = print_num[0];
1465#endif
1466
1467	/* shake out un-needed bits by shift/or operations */
1468	if (left_shift_bits >= 64) {
1469		upper_num = lower_num << (left_shift_bits - 64);
1470		lower_num = 0;
1471	} else {
1472		upper_num = (upper_num << left_shift_bits) |
1473			    (lower_num >> (64 - left_shift_bits));
1474		lower_num = lower_num << left_shift_bits;
1475	}
1476
1477	if (right_shift_bits >= 64) {
1478		lower_num = upper_num >> (right_shift_bits - 64);
1479		upper_num = 0;
1480	} else {
1481		lower_num = (lower_num >> right_shift_bits) |
1482			    (upper_num << (64 - right_shift_bits));
1483		upper_num = upper_num >> right_shift_bits;
1484	}
1485
1486#ifdef __BIG_ENDIAN_BITFIELD
1487	print_num[0] = upper_num;
1488	print_num[1] = lower_num;
1489#else
1490	print_num[0] = lower_num;
1491	print_num[1] = upper_num;
1492#endif
1493}
1494
1495static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1496				  u8 nr_bits, struct seq_file *m)
1497{
1498	u16 left_shift_bits, right_shift_bits;
1499	u8 nr_copy_bytes;
1500	u8 nr_copy_bits;
1501	u64 print_num[2] = {};
1502
1503	nr_copy_bits = nr_bits + bits_offset;
1504	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1505
1506	memcpy(print_num, data, nr_copy_bytes);
1507
1508#ifdef __BIG_ENDIAN_BITFIELD
1509	left_shift_bits = bits_offset;
1510#else
1511	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1512#endif
1513	right_shift_bits = BITS_PER_U128 - nr_bits;
1514
1515	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1516	btf_int128_print(m, print_num);
1517}
1518
1519
1520static void btf_int_bits_seq_show(const struct btf *btf,
1521				  const struct btf_type *t,
1522				  void *data, u8 bits_offset,
1523				  struct seq_file *m)
1524{
1525	u32 int_data = btf_type_int(t);
1526	u8 nr_bits = BTF_INT_BITS(int_data);
1527	u8 total_bits_offset;
1528
1529	/*
1530	 * bits_offset is at most 7.
1531	 * BTF_INT_OFFSET() cannot exceed 128 bits.
1532	 */
1533	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1534	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1535	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1536	btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1537}
1538
1539static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1540			     u32 type_id, void *data, u8 bits_offset,
1541			     struct seq_file *m)
1542{
1543	u32 int_data = btf_type_int(t);
1544	u8 encoding = BTF_INT_ENCODING(int_data);
1545	bool sign = encoding & BTF_INT_SIGNED;
1546	u8 nr_bits = BTF_INT_BITS(int_data);
1547
1548	if (bits_offset || BTF_INT_OFFSET(int_data) ||
1549	    BITS_PER_BYTE_MASKED(nr_bits)) {
1550		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1551		return;
1552	}
1553
1554	switch (nr_bits) {
1555	case 128:
1556		btf_int128_print(m, data);
1557		break;
1558	case 64:
1559		if (sign)
1560			seq_printf(m, "%lld", *(s64 *)data);
1561		else
1562			seq_printf(m, "%llu", *(u64 *)data);
1563		break;
1564	case 32:
1565		if (sign)
1566			seq_printf(m, "%d", *(s32 *)data);
1567		else
1568			seq_printf(m, "%u", *(u32 *)data);
1569		break;
1570	case 16:
1571		if (sign)
1572			seq_printf(m, "%d", *(s16 *)data);
1573		else
1574			seq_printf(m, "%u", *(u16 *)data);
1575		break;
1576	case 8:
1577		if (sign)
1578			seq_printf(m, "%d", *(s8 *)data);
1579		else
1580			seq_printf(m, "%u", *(u8 *)data);
1581		break;
1582	default:
1583		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1584	}
1585}
1586
1587static const struct btf_kind_operations int_ops = {
1588	.check_meta = btf_int_check_meta,
1589	.resolve = btf_df_resolve,
1590	.check_member = btf_int_check_member,
1591	.check_kflag_member = btf_int_check_kflag_member,
1592	.log_details = btf_int_log,
1593	.seq_show = btf_int_seq_show,
1594};
1595
1596static int btf_modifier_check_member(struct btf_verifier_env *env,
1597				     const struct btf_type *struct_type,
1598				     const struct btf_member *member,
1599				     const struct btf_type *member_type)
1600{
1601	const struct btf_type *resolved_type;
1602	u32 resolved_type_id = member->type;
1603	struct btf_member resolved_member;
1604	struct btf *btf = env->btf;
1605
1606	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1607	if (!resolved_type) {
1608		btf_verifier_log_member(env, struct_type, member,
1609					"Invalid member");
1610		return -EINVAL;
1611	}
1612
1613	resolved_member = *member;
1614	resolved_member.type = resolved_type_id;
1615
1616	return btf_type_ops(resolved_type)->check_member(env, struct_type,
1617							 &resolved_member,
1618							 resolved_type);
1619}
1620
1621static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1622					   const struct btf_type *struct_type,
1623					   const struct btf_member *member,
1624					   const struct btf_type *member_type)
1625{
1626	const struct btf_type *resolved_type;
1627	u32 resolved_type_id = member->type;
1628	struct btf_member resolved_member;
1629	struct btf *btf = env->btf;
1630
1631	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1632	if (!resolved_type) {
1633		btf_verifier_log_member(env, struct_type, member,
1634					"Invalid member");
1635		return -EINVAL;
1636	}
1637
1638	resolved_member = *member;
1639	resolved_member.type = resolved_type_id;
1640
1641	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1642							       &resolved_member,
1643							       resolved_type);
1644}
1645
1646static int btf_ptr_check_member(struct btf_verifier_env *env,
1647				const struct btf_type *struct_type,
1648				const struct btf_member *member,
1649				const struct btf_type *member_type)
1650{
1651	u32 struct_size, struct_bits_off, bytes_offset;
1652
1653	struct_size = struct_type->size;
1654	struct_bits_off = member->offset;
1655	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1656
1657	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1658		btf_verifier_log_member(env, struct_type, member,
1659					"Member is not byte aligned");
1660		return -EINVAL;
1661	}
1662
1663	if (struct_size - bytes_offset < sizeof(void *)) {
1664		btf_verifier_log_member(env, struct_type, member,
1665					"Member exceeds struct_size");
1666		return -EINVAL;
1667	}
1668
1669	return 0;
1670}
1671
1672static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1673				   const struct btf_type *t,
1674				   u32 meta_left)
1675{
1676	if (btf_type_vlen(t)) {
1677		btf_verifier_log_type(env, t, "vlen != 0");
1678		return -EINVAL;
1679	}
1680
1681	if (btf_type_kflag(t)) {
1682		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1683		return -EINVAL;
1684	}
1685
1686	if (!BTF_TYPE_ID_VALID(t->type)) {
1687		btf_verifier_log_type(env, t, "Invalid type_id");
1688		return -EINVAL;
1689	}
1690
1691	/* typedef type must have a valid name, and other ref types,
1692	 * volatile, const, restrict, should have a null name.
1693	 */
1694	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1695		if (!t->name_off ||
1696		    !btf_name_valid_identifier(env->btf, t->name_off)) {
1697			btf_verifier_log_type(env, t, "Invalid name");
1698			return -EINVAL;
1699		}
1700	} else {
1701		if (t->name_off) {
1702			btf_verifier_log_type(env, t, "Invalid name");
1703			return -EINVAL;
1704		}
1705	}
1706
1707	btf_verifier_log_type(env, t, NULL);
1708
1709	return 0;
1710}
1711
1712static int btf_modifier_resolve(struct btf_verifier_env *env,
1713				const struct resolve_vertex *v)
1714{
1715	const struct btf_type *t = v->t;
1716	const struct btf_type *next_type;
1717	u32 next_type_id = t->type;
1718	struct btf *btf = env->btf;
1719
1720	next_type = btf_type_by_id(btf, next_type_id);
1721	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1722		btf_verifier_log_type(env, v->t, "Invalid type_id");
1723		return -EINVAL;
1724	}
1725
1726	if (!env_type_is_resolve_sink(env, next_type) &&
1727	    !env_type_is_resolved(env, next_type_id))
1728		return env_stack_push(env, next_type, next_type_id);
1729
1730	/* Figure out the resolved next_type_id with size.
1731	 * They will be stored in the current modifier's
1732	 * resolved_ids and resolved_sizes such that it can
1733	 * save us a few type-following when we use it later (e.g. in
1734	 * pretty print).
1735	 */
1736	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1737		if (env_type_is_resolved(env, next_type_id))
1738			next_type = btf_type_id_resolve(btf, &next_type_id);
1739
1740		/* "typedef void new_void", "const void"...etc */
1741		if (!btf_type_is_void(next_type) &&
1742		    !btf_type_is_fwd(next_type) &&
1743		    !btf_type_is_func_proto(next_type)) {
1744			btf_verifier_log_type(env, v->t, "Invalid type_id");
1745			return -EINVAL;
1746		}
1747	}
1748
1749	env_stack_pop_resolved(env, next_type_id, 0);
1750
1751	return 0;
1752}
1753
1754static int btf_var_resolve(struct btf_verifier_env *env,
1755			   const struct resolve_vertex *v)
1756{
1757	const struct btf_type *next_type;
1758	const struct btf_type *t = v->t;
1759	u32 next_type_id = t->type;
1760	struct btf *btf = env->btf;
1761
1762	next_type = btf_type_by_id(btf, next_type_id);
1763	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1764		btf_verifier_log_type(env, v->t, "Invalid type_id");
1765		return -EINVAL;
1766	}
1767
1768	if (!env_type_is_resolve_sink(env, next_type) &&
1769	    !env_type_is_resolved(env, next_type_id))
1770		return env_stack_push(env, next_type, next_type_id);
1771
1772	if (btf_type_is_modifier(next_type)) {
1773		const struct btf_type *resolved_type;
1774		u32 resolved_type_id;
1775
1776		resolved_type_id = next_type_id;
1777		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1778
1779		if (btf_type_is_ptr(resolved_type) &&
1780		    !env_type_is_resolve_sink(env, resolved_type) &&
1781		    !env_type_is_resolved(env, resolved_type_id))
1782			return env_stack_push(env, resolved_type,
1783					      resolved_type_id);
1784	}
1785
1786	/* We must resolve to something concrete at this point, no
1787	 * forward types or similar that would resolve to size of
1788	 * zero is allowed.
1789	 */
1790	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1791		btf_verifier_log_type(env, v->t, "Invalid type_id");
1792		return -EINVAL;
1793	}
1794
1795	env_stack_pop_resolved(env, next_type_id, 0);
1796
1797	return 0;
1798}
1799
1800static int btf_ptr_resolve(struct btf_verifier_env *env,
1801			   const struct resolve_vertex *v)
1802{
1803	const struct btf_type *next_type;
1804	const struct btf_type *t = v->t;
1805	u32 next_type_id = t->type;
1806	struct btf *btf = env->btf;
1807
1808	next_type = btf_type_by_id(btf, next_type_id);
1809	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1810		btf_verifier_log_type(env, v->t, "Invalid type_id");
1811		return -EINVAL;
1812	}
1813
1814	if (!env_type_is_resolve_sink(env, next_type) &&
1815	    !env_type_is_resolved(env, next_type_id))
1816		return env_stack_push(env, next_type, next_type_id);
1817
1818	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1819	 * the modifier may have stopped resolving when it was resolved
1820	 * to a ptr (last-resolved-ptr).
1821	 *
1822	 * We now need to continue from the last-resolved-ptr to
1823	 * ensure the last-resolved-ptr will not referring back to
1824	 * the currenct ptr (t).
1825	 */
1826	if (btf_type_is_modifier(next_type)) {
1827		const struct btf_type *resolved_type;
1828		u32 resolved_type_id;
1829
1830		resolved_type_id = next_type_id;
1831		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1832
1833		if (btf_type_is_ptr(resolved_type) &&
1834		    !env_type_is_resolve_sink(env, resolved_type) &&
1835		    !env_type_is_resolved(env, resolved_type_id))
1836			return env_stack_push(env, resolved_type,
1837					      resolved_type_id);
1838	}
1839
1840	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1841		if (env_type_is_resolved(env, next_type_id))
1842			next_type = btf_type_id_resolve(btf, &next_type_id);
1843
1844		if (!btf_type_is_void(next_type) &&
1845		    !btf_type_is_fwd(next_type) &&
1846		    !btf_type_is_func_proto(next_type)) {
1847			btf_verifier_log_type(env, v->t, "Invalid type_id");
1848			return -EINVAL;
1849		}
1850	}
1851
1852	env_stack_pop_resolved(env, next_type_id, 0);
1853
1854	return 0;
1855}
1856
1857static void btf_modifier_seq_show(const struct btf *btf,
1858				  const struct btf_type *t,
1859				  u32 type_id, void *data,
1860				  u8 bits_offset, struct seq_file *m)
1861{
1862	if (btf->resolved_ids)
1863		t = btf_type_id_resolve(btf, &type_id);
1864	else
1865		t = btf_type_skip_modifiers(btf, type_id, NULL);
1866
1867	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1868}
1869
1870static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1871			     u32 type_id, void *data, u8 bits_offset,
1872			     struct seq_file *m)
1873{
1874	t = btf_type_id_resolve(btf, &type_id);
1875
1876	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1877}
1878
1879static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1880			     u32 type_id, void *data, u8 bits_offset,
1881			     struct seq_file *m)
1882{
1883	/* It is a hashed value */
1884	seq_printf(m, "%p", *(void **)data);
1885}
1886
1887static void btf_ref_type_log(struct btf_verifier_env *env,
1888			     const struct btf_type *t)
1889{
1890	btf_verifier_log(env, "type_id=%u", t->type);
1891}
1892
1893static struct btf_kind_operations modifier_ops = {
1894	.check_meta = btf_ref_type_check_meta,
1895	.resolve = btf_modifier_resolve,
1896	.check_member = btf_modifier_check_member,
1897	.check_kflag_member = btf_modifier_check_kflag_member,
1898	.log_details = btf_ref_type_log,
1899	.seq_show = btf_modifier_seq_show,
1900};
1901
1902static struct btf_kind_operations ptr_ops = {
1903	.check_meta = btf_ref_type_check_meta,
1904	.resolve = btf_ptr_resolve,
1905	.check_member = btf_ptr_check_member,
1906	.check_kflag_member = btf_generic_check_kflag_member,
1907	.log_details = btf_ref_type_log,
1908	.seq_show = btf_ptr_seq_show,
1909};
1910
1911static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1912			      const struct btf_type *t,
1913			      u32 meta_left)
1914{
1915	if (btf_type_vlen(t)) {
1916		btf_verifier_log_type(env, t, "vlen != 0");
1917		return -EINVAL;
1918	}
1919
1920	if (t->type) {
1921		btf_verifier_log_type(env, t, "type != 0");
1922		return -EINVAL;
1923	}
1924
1925	/* fwd type must have a valid name */
1926	if (!t->name_off ||
1927	    !btf_name_valid_identifier(env->btf, t->name_off)) {
1928		btf_verifier_log_type(env, t, "Invalid name");
1929		return -EINVAL;
1930	}
1931
1932	btf_verifier_log_type(env, t, NULL);
1933
1934	return 0;
1935}
1936
1937static void btf_fwd_type_log(struct btf_verifier_env *env,
1938			     const struct btf_type *t)
1939{
1940	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1941}
1942
1943static struct btf_kind_operations fwd_ops = {
1944	.check_meta = btf_fwd_check_meta,
1945	.resolve = btf_df_resolve,
1946	.check_member = btf_df_check_member,
1947	.check_kflag_member = btf_df_check_kflag_member,
1948	.log_details = btf_fwd_type_log,
1949	.seq_show = btf_df_seq_show,
1950};
1951
1952static int btf_array_check_member(struct btf_verifier_env *env,
1953				  const struct btf_type *struct_type,
1954				  const struct btf_member *member,
1955				  const struct btf_type *member_type)
1956{
1957	u32 struct_bits_off = member->offset;
1958	u32 struct_size, bytes_offset;
1959	u32 array_type_id, array_size;
1960	struct btf *btf = env->btf;
1961
1962	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1963		btf_verifier_log_member(env, struct_type, member,
1964					"Member is not byte aligned");
1965		return -EINVAL;
1966	}
1967
1968	array_type_id = member->type;
1969	btf_type_id_size(btf, &array_type_id, &array_size);
1970	struct_size = struct_type->size;
1971	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1972	if (struct_size - bytes_offset < array_size) {
1973		btf_verifier_log_member(env, struct_type, member,
1974					"Member exceeds struct_size");
1975		return -EINVAL;
1976	}
1977
1978	return 0;
1979}
1980
1981static s32 btf_array_check_meta(struct btf_verifier_env *env,
1982				const struct btf_type *t,
1983				u32 meta_left)
1984{
1985	const struct btf_array *array = btf_type_array(t);
1986	u32 meta_needed = sizeof(*array);
1987
1988	if (meta_left < meta_needed) {
1989		btf_verifier_log_basic(env, t,
1990				       "meta_left:%u meta_needed:%u",
1991				       meta_left, meta_needed);
1992		return -EINVAL;
1993	}
1994
1995	/* array type should not have a name */
1996	if (t->name_off) {
1997		btf_verifier_log_type(env, t, "Invalid name");
1998		return -EINVAL;
1999	}
2000
2001	if (btf_type_vlen(t)) {
2002		btf_verifier_log_type(env, t, "vlen != 0");
2003		return -EINVAL;
2004	}
2005
2006	if (btf_type_kflag(t)) {
2007		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2008		return -EINVAL;
2009	}
2010
2011	if (t->size) {
2012		btf_verifier_log_type(env, t, "size != 0");
2013		return -EINVAL;
2014	}
2015
2016	/* Array elem type and index type cannot be in type void,
2017	 * so !array->type and !array->index_type are not allowed.
2018	 */
2019	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2020		btf_verifier_log_type(env, t, "Invalid elem");
2021		return -EINVAL;
2022	}
2023
2024	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2025		btf_verifier_log_type(env, t, "Invalid index");
2026		return -EINVAL;
2027	}
2028
2029	btf_verifier_log_type(env, t, NULL);
2030
2031	return meta_needed;
2032}
2033
2034static int btf_array_resolve(struct btf_verifier_env *env,
2035			     const struct resolve_vertex *v)
2036{
2037	const struct btf_array *array = btf_type_array(v->t);
2038	const struct btf_type *elem_type, *index_type;
2039	u32 elem_type_id, index_type_id;
2040	struct btf *btf = env->btf;
2041	u32 elem_size;
2042
2043	/* Check array->index_type */
2044	index_type_id = array->index_type;
2045	index_type = btf_type_by_id(btf, index_type_id);
2046	if (btf_type_nosize_or_null(index_type) ||
2047	    btf_type_is_resolve_source_only(index_type)) {
2048		btf_verifier_log_type(env, v->t, "Invalid index");
2049		return -EINVAL;
2050	}
2051
2052	if (!env_type_is_resolve_sink(env, index_type) &&
2053	    !env_type_is_resolved(env, index_type_id))
2054		return env_stack_push(env, index_type, index_type_id);
2055
2056	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2057	if (!index_type || !btf_type_is_int(index_type) ||
2058	    !btf_type_int_is_regular(index_type)) {
2059		btf_verifier_log_type(env, v->t, "Invalid index");
2060		return -EINVAL;
2061	}
2062
2063	/* Check array->type */
2064	elem_type_id = array->type;
2065	elem_type = btf_type_by_id(btf, elem_type_id);
2066	if (btf_type_nosize_or_null(elem_type) ||
2067	    btf_type_is_resolve_source_only(elem_type)) {
2068		btf_verifier_log_type(env, v->t,
2069				      "Invalid elem");
2070		return -EINVAL;
2071	}
2072
2073	if (!env_type_is_resolve_sink(env, elem_type) &&
2074	    !env_type_is_resolved(env, elem_type_id))
2075		return env_stack_push(env, elem_type, elem_type_id);
2076
2077	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2078	if (!elem_type) {
2079		btf_verifier_log_type(env, v->t, "Invalid elem");
2080		return -EINVAL;
2081	}
2082
2083	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2084		btf_verifier_log_type(env, v->t, "Invalid array of int");
2085		return -EINVAL;
2086	}
2087
2088	if (array->nelems && elem_size > U32_MAX / array->nelems) {
2089		btf_verifier_log_type(env, v->t,
2090				      "Array size overflows U32_MAX");
2091		return -EINVAL;
2092	}
2093
2094	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2095
2096	return 0;
2097}
2098
2099static void btf_array_log(struct btf_verifier_env *env,
2100			  const struct btf_type *t)
2101{
2102	const struct btf_array *array = btf_type_array(t);
2103
2104	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2105			 array->type, array->index_type, array->nelems);
2106}
2107
2108static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
2109			       u32 type_id, void *data, u8 bits_offset,
2110			       struct seq_file *m)
2111{
2112	const struct btf_array *array = btf_type_array(t);
2113	const struct btf_kind_operations *elem_ops;
2114	const struct btf_type *elem_type;
2115	u32 i, elem_size, elem_type_id;
2116
2117	elem_type_id = array->type;
2118	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2119	elem_ops = btf_type_ops(elem_type);
2120	seq_puts(m, "[");
2121	for (i = 0; i < array->nelems; i++) {
2122		if (i)
2123			seq_puts(m, ",");
2124
2125		elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2126				   bits_offset, m);
2127		data += elem_size;
2128	}
2129	seq_puts(m, "]");
2130}
2131
2132static struct btf_kind_operations array_ops = {
2133	.check_meta = btf_array_check_meta,
2134	.resolve = btf_array_resolve,
2135	.check_member = btf_array_check_member,
2136	.check_kflag_member = btf_generic_check_kflag_member,
2137	.log_details = btf_array_log,
2138	.seq_show = btf_array_seq_show,
2139};
2140
2141static int btf_struct_check_member(struct btf_verifier_env *env,
2142				   const struct btf_type *struct_type,
2143				   const struct btf_member *member,
2144				   const struct btf_type *member_type)
2145{
2146	u32 struct_bits_off = member->offset;
2147	u32 struct_size, bytes_offset;
2148
2149	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2150		btf_verifier_log_member(env, struct_type, member,
2151					"Member is not byte aligned");
2152		return -EINVAL;
2153	}
2154
2155	struct_size = struct_type->size;
2156	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2157	if (struct_size - bytes_offset < member_type->size) {
2158		btf_verifier_log_member(env, struct_type, member,
2159					"Member exceeds struct_size");
2160		return -EINVAL;
2161	}
2162
2163	return 0;
2164}
2165
2166static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2167				 const struct btf_type *t,
2168				 u32 meta_left)
2169{
2170	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2171	const struct btf_member *member;
2172	u32 meta_needed, last_offset;
2173	struct btf *btf = env->btf;
2174	u32 struct_size = t->size;
2175	u32 offset;
2176	u16 i;
2177
2178	meta_needed = btf_type_vlen(t) * sizeof(*member);
2179	if (meta_left < meta_needed) {
2180		btf_verifier_log_basic(env, t,
2181				       "meta_left:%u meta_needed:%u",
2182				       meta_left, meta_needed);
2183		return -EINVAL;
2184	}
2185
2186	/* struct type either no name or a valid one */
2187	if (t->name_off &&
2188	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2189		btf_verifier_log_type(env, t, "Invalid name");
2190		return -EINVAL;
2191	}
2192
2193	btf_verifier_log_type(env, t, NULL);
2194
2195	last_offset = 0;
2196	for_each_member(i, t, member) {
2197		if (!btf_name_offset_valid(btf, member->name_off)) {
2198			btf_verifier_log_member(env, t, member,
2199						"Invalid member name_offset:%u",
2200						member->name_off);
2201			return -EINVAL;
2202		}
2203
2204		/* struct member either no name or a valid one */
2205		if (member->name_off &&
2206		    !btf_name_valid_identifier(btf, member->name_off)) {
2207			btf_verifier_log_member(env, t, member, "Invalid name");
2208			return -EINVAL;
2209		}
2210		/* A member cannot be in type void */
2211		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2212			btf_verifier_log_member(env, t, member,
2213						"Invalid type_id");
2214			return -EINVAL;
2215		}
2216
2217		offset = btf_member_bit_offset(t, member);
2218		if (is_union && offset) {
2219			btf_verifier_log_member(env, t, member,
2220						"Invalid member bits_offset");
2221			return -EINVAL;
2222		}
2223
2224		/*
2225		 * ">" instead of ">=" because the last member could be
2226		 * "char a[0];"
2227		 */
2228		if (last_offset > offset) {
2229			btf_verifier_log_member(env, t, member,
2230						"Invalid member bits_offset");
2231			return -EINVAL;
2232		}
2233
2234		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2235			btf_verifier_log_member(env, t, member,
2236						"Member bits_offset exceeds its struct size");
2237			return -EINVAL;
2238		}
2239
2240		btf_verifier_log_member(env, t, member, NULL);
2241		last_offset = offset;
2242	}
2243
2244	return meta_needed;
2245}
2246
2247static int btf_struct_resolve(struct btf_verifier_env *env,
2248			      const struct resolve_vertex *v)
2249{
2250	const struct btf_member *member;
2251	int err;
2252	u16 i;
2253
2254	/* Before continue resolving the next_member,
2255	 * ensure the last member is indeed resolved to a
2256	 * type with size info.
2257	 */
2258	if (v->next_member) {
2259		const struct btf_type *last_member_type;
2260		const struct btf_member *last_member;
2261		u16 last_member_type_id;
2262
2263		last_member = btf_type_member(v->t) + v->next_member - 1;
2264		last_member_type_id = last_member->type;
2265		if (WARN_ON_ONCE(!env_type_is_resolved(env,
2266						       last_member_type_id)))
2267			return -EINVAL;
2268
2269		last_member_type = btf_type_by_id(env->btf,
2270						  last_member_type_id);
2271		if (btf_type_kflag(v->t))
2272			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2273								last_member,
2274								last_member_type);
2275		else
2276			err = btf_type_ops(last_member_type)->check_member(env, v->t,
2277								last_member,
2278								last_member_type);
2279		if (err)
2280			return err;
2281	}
2282
2283	for_each_member_from(i, v->next_member, v->t, member) {
2284		u32 member_type_id = member->type;
2285		const struct btf_type *member_type = btf_type_by_id(env->btf,
2286								member_type_id);
2287
2288		if (btf_type_nosize_or_null(member_type) ||
2289		    btf_type_is_resolve_source_only(member_type)) {
2290			btf_verifier_log_member(env, v->t, member,
2291						"Invalid member");
2292			return -EINVAL;
2293		}
2294
2295		if (!env_type_is_resolve_sink(env, member_type) &&
2296		    !env_type_is_resolved(env, member_type_id)) {
2297			env_stack_set_next_member(env, i + 1);
2298			return env_stack_push(env, member_type, member_type_id);
2299		}
2300
2301		if (btf_type_kflag(v->t))
2302			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2303									    member,
2304									    member_type);
2305		else
2306			err = btf_type_ops(member_type)->check_member(env, v->t,
2307								      member,
2308								      member_type);
2309		if (err)
2310			return err;
2311	}
2312
2313	env_stack_pop_resolved(env, 0, 0);
2314
2315	return 0;
2316}
2317
2318static void btf_struct_log(struct btf_verifier_env *env,
2319			   const struct btf_type *t)
2320{
2321	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2322}
2323
2324/* find 'struct bpf_spin_lock' in map value.
2325 * return >= 0 offset if found
2326 * and < 0 in case of error
2327 */
2328int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2329{
2330	const struct btf_member *member;
2331	u32 i, off = -ENOENT;
2332
2333	if (!__btf_type_is_struct(t))
2334		return -EINVAL;
2335
2336	for_each_member(i, t, member) {
2337		const struct btf_type *member_type = btf_type_by_id(btf,
2338								    member->type);
2339		if (!__btf_type_is_struct(member_type))
2340			continue;
2341		if (member_type->size != sizeof(struct bpf_spin_lock))
2342			continue;
2343		if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2344			   "bpf_spin_lock"))
2345			continue;
2346		if (off != -ENOENT)
2347			/* only one 'struct bpf_spin_lock' is allowed */
2348			return -E2BIG;
2349		off = btf_member_bit_offset(t, member);
2350		if (off % 8)
2351			/* valid C code cannot generate such BTF */
2352			return -EINVAL;
2353		off /= 8;
2354		if (off % __alignof__(struct bpf_spin_lock))
2355			/* valid struct bpf_spin_lock will be 4 byte aligned */
2356			return -EINVAL;
2357	}
2358	return off;
2359}
2360
2361static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2362				u32 type_id, void *data, u8 bits_offset,
2363				struct seq_file *m)
2364{
2365	const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2366	const struct btf_member *member;
2367	u32 i;
2368
2369	seq_puts(m, "{");
2370	for_each_member(i, t, member) {
2371		const struct btf_type *member_type = btf_type_by_id(btf,
2372								member->type);
2373		const struct btf_kind_operations *ops;
2374		u32 member_offset, bitfield_size;
2375		u32 bytes_offset;
2376		u8 bits8_offset;
2377
2378		if (i)
2379			seq_puts(m, seq);
2380
2381		member_offset = btf_member_bit_offset(t, member);
2382		bitfield_size = btf_member_bitfield_size(t, member);
2383		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2384		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2385		if (bitfield_size) {
2386			btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2387					      bitfield_size, m);
2388		} else {
2389			ops = btf_type_ops(member_type);
2390			ops->seq_show(btf, member_type, member->type,
2391				      data + bytes_offset, bits8_offset, m);
2392		}
2393	}
2394	seq_puts(m, "}");
2395}
2396
2397static struct btf_kind_operations struct_ops = {
2398	.check_meta = btf_struct_check_meta,
2399	.resolve = btf_struct_resolve,
2400	.check_member = btf_struct_check_member,
2401	.check_kflag_member = btf_generic_check_kflag_member,
2402	.log_details = btf_struct_log,
2403	.seq_show = btf_struct_seq_show,
2404};
2405
2406static int btf_enum_check_member(struct btf_verifier_env *env,
2407				 const struct btf_type *struct_type,
2408				 const struct btf_member *member,
2409				 const struct btf_type *member_type)
2410{
2411	u32 struct_bits_off = member->offset;
2412	u32 struct_size, bytes_offset;
2413
2414	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2415		btf_verifier_log_member(env, struct_type, member,
2416					"Member is not byte aligned");
2417		return -EINVAL;
2418	}
2419
2420	struct_size = struct_type->size;
2421	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2422	if (struct_size - bytes_offset < member_type->size) {
2423		btf_verifier_log_member(env, struct_type, member,
2424					"Member exceeds struct_size");
2425		return -EINVAL;
2426	}
2427
2428	return 0;
2429}
2430
2431static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2432				       const struct btf_type *struct_type,
2433				       const struct btf_member *member,
2434				       const struct btf_type *member_type)
2435{
2436	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2437	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2438
2439	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2440	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2441	if (!nr_bits) {
2442		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2443			btf_verifier_log_member(env, struct_type, member,
2444						"Member is not byte aligned");
2445			return -EINVAL;
2446		}
2447
2448		nr_bits = int_bitsize;
2449	} else if (nr_bits > int_bitsize) {
2450		btf_verifier_log_member(env, struct_type, member,
2451					"Invalid member bitfield_size");
2452		return -EINVAL;
2453	}
2454
2455	struct_size = struct_type->size;
2456	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2457	if (struct_size < bytes_end) {
2458		btf_verifier_log_member(env, struct_type, member,
2459					"Member exceeds struct_size");
2460		return -EINVAL;
2461	}
2462
2463	return 0;
2464}
2465
2466static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2467			       const struct btf_type *t,
2468			       u32 meta_left)
2469{
2470	const struct btf_enum *enums = btf_type_enum(t);
2471	struct btf *btf = env->btf;
2472	u16 i, nr_enums;
2473	u32 meta_needed;
2474
2475	nr_enums = btf_type_vlen(t);
2476	meta_needed = nr_enums * sizeof(*enums);
2477
2478	if (meta_left < meta_needed) {
2479		btf_verifier_log_basic(env, t,
2480				       "meta_left:%u meta_needed:%u",
2481				       meta_left, meta_needed);
2482		return -EINVAL;
2483	}
2484
2485	if (btf_type_kflag(t)) {
2486		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2487		return -EINVAL;
2488	}
2489
2490	if (t->size > 8 || !is_power_of_2(t->size)) {
2491		btf_verifier_log_type(env, t, "Unexpected size");
2492		return -EINVAL;
2493	}
2494
2495	/* enum type either no name or a valid one */
2496	if (t->name_off &&
2497	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2498		btf_verifier_log_type(env, t, "Invalid name");
2499		return -EINVAL;
2500	}
2501
2502	btf_verifier_log_type(env, t, NULL);
2503
2504	for (i = 0; i < nr_enums; i++) {
2505		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2506			btf_verifier_log(env, "\tInvalid name_offset:%u",
2507					 enums[i].name_off);
2508			return -EINVAL;
2509		}
2510
2511		/* enum member must have a valid name */
2512		if (!enums[i].name_off ||
2513		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
2514			btf_verifier_log_type(env, t, "Invalid name");
2515			return -EINVAL;
2516		}
2517
2518		if (env->log.level == BPF_LOG_KERNEL)
2519			continue;
2520		btf_verifier_log(env, "\t%s val=%d\n",
2521				 __btf_name_by_offset(btf, enums[i].name_off),
2522				 enums[i].val);
2523	}
2524
2525	return meta_needed;
2526}
2527
2528static void btf_enum_log(struct btf_verifier_env *env,
2529			 const struct btf_type *t)
2530{
2531	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2532}
2533
2534static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2535			      u32 type_id, void *data, u8 bits_offset,
2536			      struct seq_file *m)
2537{
2538	const struct btf_enum *enums = btf_type_enum(t);
2539	u32 i, nr_enums = btf_type_vlen(t);
2540	int v = *(int *)data;
2541
2542	for (i = 0; i < nr_enums; i++) {
2543		if (v == enums[i].val) {
2544			seq_printf(m, "%s",
2545				   __btf_name_by_offset(btf,
2546							enums[i].name_off));
2547			return;
2548		}
2549	}
2550
2551	seq_printf(m, "%d", v);
2552}
2553
2554static struct btf_kind_operations enum_ops = {
2555	.check_meta = btf_enum_check_meta,
2556	.resolve = btf_df_resolve,
2557	.check_member = btf_enum_check_member,
2558	.check_kflag_member = btf_enum_check_kflag_member,
2559	.log_details = btf_enum_log,
2560	.seq_show = btf_enum_seq_show,
2561};
2562
2563static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2564				     const struct btf_type *t,
2565				     u32 meta_left)
2566{
2567	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2568
2569	if (meta_left < meta_needed) {
2570		btf_verifier_log_basic(env, t,
2571				       "meta_left:%u meta_needed:%u",
2572				       meta_left, meta_needed);
2573		return -EINVAL;
2574	}
2575
2576	if (t->name_off) {
2577		btf_verifier_log_type(env, t, "Invalid name");
2578		return -EINVAL;
2579	}
2580
2581	if (btf_type_kflag(t)) {
2582		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2583		return -EINVAL;
2584	}
2585
2586	btf_verifier_log_type(env, t, NULL);
2587
2588	return meta_needed;
2589}
2590
2591static void btf_func_proto_log(struct btf_verifier_env *env,
2592			       const struct btf_type *t)
2593{
2594	const struct btf_param *args = (const struct btf_param *)(t + 1);
2595	u16 nr_args = btf_type_vlen(t), i;
2596
2597	btf_verifier_log(env, "return=%u args=(", t->type);
2598	if (!nr_args) {
2599		btf_verifier_log(env, "void");
2600		goto done;
2601	}
2602
2603	if (nr_args == 1 && !args[0].type) {
2604		/* Only one vararg */
2605		btf_verifier_log(env, "vararg");
2606		goto done;
2607	}
2608
2609	btf_verifier_log(env, "%u %s", args[0].type,
2610			 __btf_name_by_offset(env->btf,
2611					      args[0].name_off));
2612	for (i = 1; i < nr_args - 1; i++)
2613		btf_verifier_log(env, ", %u %s", args[i].type,
2614				 __btf_name_by_offset(env->btf,
2615						      args[i].name_off));
2616
2617	if (nr_args > 1) {
2618		const struct btf_param *last_arg = &args[nr_args - 1];
2619
2620		if (last_arg->type)
2621			btf_verifier_log(env, ", %u %s", last_arg->type,
2622					 __btf_name_by_offset(env->btf,
2623							      last_arg->name_off));
2624		else
2625			btf_verifier_log(env, ", vararg");
2626	}
2627
2628done:
2629	btf_verifier_log(env, ")");
2630}
2631
2632static struct btf_kind_operations func_proto_ops = {
2633	.check_meta = btf_func_proto_check_meta,
2634	.resolve = btf_df_resolve,
2635	/*
2636	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2637	 * a struct's member.
2638	 *
2639	 * It should be a funciton pointer instead.
2640	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2641	 *
2642	 * Hence, there is no btf_func_check_member().
2643	 */
2644	.check_member = btf_df_check_member,
2645	.check_kflag_member = btf_df_check_kflag_member,
2646	.log_details = btf_func_proto_log,
2647	.seq_show = btf_df_seq_show,
2648};
2649
2650static s32 btf_func_check_meta(struct btf_verifier_env *env,
2651			       const struct btf_type *t,
2652			       u32 meta_left)
2653{
2654	if (!t->name_off ||
2655	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2656		btf_verifier_log_type(env, t, "Invalid name");
2657		return -EINVAL;
2658	}
2659
2660	if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
2661		btf_verifier_log_type(env, t, "Invalid func linkage");
2662		return -EINVAL;
2663	}
2664
2665	if (btf_type_kflag(t)) {
2666		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2667		return -EINVAL;
2668	}
2669
2670	btf_verifier_log_type(env, t, NULL);
2671
2672	return 0;
2673}
2674
2675static struct btf_kind_operations func_ops = {
2676	.check_meta = btf_func_check_meta,
2677	.resolve = btf_df_resolve,
2678	.check_member = btf_df_check_member,
2679	.check_kflag_member = btf_df_check_kflag_member,
2680	.log_details = btf_ref_type_log,
2681	.seq_show = btf_df_seq_show,
2682};
2683
2684static s32 btf_var_check_meta(struct btf_verifier_env *env,
2685			      const struct btf_type *t,
2686			      u32 meta_left)
2687{
2688	const struct btf_var *var;
2689	u32 meta_needed = sizeof(*var);
2690
2691	if (meta_left < meta_needed) {
2692		btf_verifier_log_basic(env, t,
2693				       "meta_left:%u meta_needed:%u",
2694				       meta_left, meta_needed);
2695		return -EINVAL;
2696	}
2697
2698	if (btf_type_vlen(t)) {
2699		btf_verifier_log_type(env, t, "vlen != 0");
2700		return -EINVAL;
2701	}
2702
2703	if (btf_type_kflag(t)) {
2704		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2705		return -EINVAL;
2706	}
2707
2708	if (!t->name_off ||
2709	    !__btf_name_valid(env->btf, t->name_off, true)) {
2710		btf_verifier_log_type(env, t, "Invalid name");
2711		return -EINVAL;
2712	}
2713
2714	/* A var cannot be in type void */
2715	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2716		btf_verifier_log_type(env, t, "Invalid type_id");
2717		return -EINVAL;
2718	}
2719
2720	var = btf_type_var(t);
2721	if (var->linkage != BTF_VAR_STATIC &&
2722	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2723		btf_verifier_log_type(env, t, "Linkage not supported");
2724		return -EINVAL;
2725	}
2726
2727	btf_verifier_log_type(env, t, NULL);
2728
2729	return meta_needed;
2730}
2731
2732static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2733{
2734	const struct btf_var *var = btf_type_var(t);
2735
2736	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2737}
2738
2739static const struct btf_kind_operations var_ops = {
2740	.check_meta		= btf_var_check_meta,
2741	.resolve		= btf_var_resolve,
2742	.check_member		= btf_df_check_member,
2743	.check_kflag_member	= btf_df_check_kflag_member,
2744	.log_details		= btf_var_log,
2745	.seq_show		= btf_var_seq_show,
2746};
2747
2748static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2749				  const struct btf_type *t,
2750				  u32 meta_left)
2751{
2752	const struct btf_var_secinfo *vsi;
2753	u64 last_vsi_end_off = 0, sum = 0;
2754	u32 i, meta_needed;
2755
2756	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2757	if (meta_left < meta_needed) {
2758		btf_verifier_log_basic(env, t,
2759				       "meta_left:%u meta_needed:%u",
2760				       meta_left, meta_needed);
2761		return -EINVAL;
2762	}
2763
2764	if (!btf_type_vlen(t)) {
2765		btf_verifier_log_type(env, t, "vlen == 0");
2766		return -EINVAL;
2767	}
2768
2769	if (!t->size) {
2770		btf_verifier_log_type(env, t, "size == 0");
2771		return -EINVAL;
2772	}
2773
2774	if (btf_type_kflag(t)) {
2775		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2776		return -EINVAL;
2777	}
2778
2779	if (!t->name_off ||
2780	    !btf_name_valid_section(env->btf, t->name_off)) {
2781		btf_verifier_log_type(env, t, "Invalid name");
2782		return -EINVAL;
2783	}
2784
2785	btf_verifier_log_type(env, t, NULL);
2786
2787	for_each_vsi(i, t, vsi) {
2788		/* A var cannot be in type void */
2789		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2790			btf_verifier_log_vsi(env, t, vsi,
2791					     "Invalid type_id");
2792			return -EINVAL;
2793		}
2794
2795		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2796			btf_verifier_log_vsi(env, t, vsi,
2797					     "Invalid offset");
2798			return -EINVAL;
2799		}
2800
2801		if (!vsi->size || vsi->size > t->size) {
2802			btf_verifier_log_vsi(env, t, vsi,
2803					     "Invalid size");
2804			return -EINVAL;
2805		}
2806
2807		last_vsi_end_off = vsi->offset + vsi->size;
2808		if (last_vsi_end_off > t->size) {
2809			btf_verifier_log_vsi(env, t, vsi,
2810					     "Invalid offset+size");
2811			return -EINVAL;
2812		}
2813
2814		btf_verifier_log_vsi(env, t, vsi, NULL);
2815		sum += vsi->size;
2816	}
2817
2818	if (t->size < sum) {
2819		btf_verifier_log_type(env, t, "Invalid btf_info size");
2820		return -EINVAL;
2821	}
2822
2823	return meta_needed;
2824}
2825
2826static int btf_datasec_resolve(struct btf_verifier_env *env,
2827			       const struct resolve_vertex *v)
2828{
2829	const struct btf_var_secinfo *vsi;
2830	struct btf *btf = env->btf;
2831	u16 i;
2832
2833	for_each_vsi_from(i, v->next_member, v->t, vsi) {
2834		u32 var_type_id = vsi->type, type_id, type_size = 0;
2835		const struct btf_type *var_type = btf_type_by_id(env->btf,
2836								 var_type_id);
2837		if (!var_type || !btf_type_is_var(var_type)) {
2838			btf_verifier_log_vsi(env, v->t, vsi,
2839					     "Not a VAR kind member");
2840			return -EINVAL;
2841		}
2842
2843		if (!env_type_is_resolve_sink(env, var_type) &&
2844		    !env_type_is_resolved(env, var_type_id)) {
2845			env_stack_set_next_member(env, i + 1);
2846			return env_stack_push(env, var_type, var_type_id);
2847		}
2848
2849		type_id = var_type->type;
2850		if (!btf_type_id_size(btf, &type_id, &type_size)) {
2851			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2852			return -EINVAL;
2853		}
2854
2855		if (vsi->size < type_size) {
2856			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2857			return -EINVAL;
2858		}
2859	}
2860
2861	env_stack_pop_resolved(env, 0, 0);
2862	return 0;
2863}
2864
2865static void btf_datasec_log(struct btf_verifier_env *env,
2866			    const struct btf_type *t)
2867{
2868	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2869}
2870
2871static void btf_datasec_seq_show(const struct btf *btf,
2872				 const struct btf_type *t, u32 type_id,
2873				 void *data, u8 bits_offset,
2874				 struct seq_file *m)
2875{
2876	const struct btf_var_secinfo *vsi;
2877	const struct btf_type *var;
2878	u32 i;
2879
2880	seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2881	for_each_vsi(i, t, vsi) {
2882		var = btf_type_by_id(btf, vsi->type);
2883		if (i)
2884			seq_puts(m, ",");
2885		btf_type_ops(var)->seq_show(btf, var, vsi->type,
2886					    data + vsi->offset, bits_offset, m);
2887	}
2888	seq_puts(m, "}");
2889}
2890
2891static const struct btf_kind_operations datasec_ops = {
2892	.check_meta		= btf_datasec_check_meta,
2893	.resolve		= btf_datasec_resolve,
2894	.check_member		= btf_df_check_member,
2895	.check_kflag_member	= btf_df_check_kflag_member,
2896	.log_details		= btf_datasec_log,
2897	.seq_show		= btf_datasec_seq_show,
2898};
2899
2900static int btf_func_proto_check(struct btf_verifier_env *env,
2901				const struct btf_type *t)
2902{
2903	const struct btf_type *ret_type;
2904	const struct btf_param *args;
2905	const struct btf *btf;
2906	u16 nr_args, i;
2907	int err;
2908
2909	btf = env->btf;
2910	args = (const struct btf_param *)(t + 1);
2911	nr_args = btf_type_vlen(t);
2912
2913	/* Check func return type which could be "void" (t->type == 0) */
2914	if (t->type) {
2915		u32 ret_type_id = t->type;
2916
2917		ret_type = btf_type_by_id(btf, ret_type_id);
2918		if (!ret_type) {
2919			btf_verifier_log_type(env, t, "Invalid return type");
2920			return -EINVAL;
2921		}
2922
2923		if (btf_type_needs_resolve(ret_type) &&
2924		    !env_type_is_resolved(env, ret_type_id)) {
2925			err = btf_resolve(env, ret_type, ret_type_id);
2926			if (err)
2927				return err;
2928		}
2929
2930		/* Ensure the return type is a type that has a size */
2931		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2932			btf_verifier_log_type(env, t, "Invalid return type");
2933			return -EINVAL;
2934		}
2935	}
2936
2937	if (!nr_args)
2938		return 0;
2939
2940	/* Last func arg type_id could be 0 if it is a vararg */
2941	if (!args[nr_args - 1].type) {
2942		if (args[nr_args - 1].name_off) {
2943			btf_verifier_log_type(env, t, "Invalid arg#%u",
2944					      nr_args);
2945			return -EINVAL;
2946		}
2947		nr_args--;
2948	}
2949
2950	err = 0;
2951	for (i = 0; i < nr_args; i++) {
2952		const struct btf_type *arg_type;
2953		u32 arg_type_id;
2954
2955		arg_type_id = args[i].type;
2956		arg_type = btf_type_by_id(btf, arg_type_id);
2957		if (!arg_type) {
2958			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2959			err = -EINVAL;
2960			break;
2961		}
2962
2963		if (args[i].name_off &&
2964		    (!btf_name_offset_valid(btf, args[i].name_off) ||
2965		     !btf_name_valid_identifier(btf, args[i].name_off))) {
2966			btf_verifier_log_type(env, t,
2967					      "Invalid arg#%u", i + 1);
2968			err = -EINVAL;
2969			break;
2970		}
2971
2972		if (btf_type_needs_resolve(arg_type) &&
2973		    !env_type_is_resolved(env, arg_type_id)) {
2974			err = btf_resolve(env, arg_type, arg_type_id);
2975			if (err)
2976				break;
2977		}
2978
2979		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2980			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2981			err = -EINVAL;
2982			break;
2983		}
2984	}
2985
2986	return err;
2987}
2988
2989static int btf_func_check(struct btf_verifier_env *env,
2990			  const struct btf_type *t)
2991{
2992	const struct btf_type *proto_type;
2993	const struct btf_param *args;
2994	const struct btf *btf;
2995	u16 nr_args, i;
2996
2997	btf = env->btf;
2998	proto_type = btf_type_by_id(btf, t->type);
2999
3000	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
3001		btf_verifier_log_type(env, t, "Invalid type_id");
3002		return -EINVAL;
3003	}
3004
3005	args = (const struct btf_param *)(proto_type + 1);
3006	nr_args = btf_type_vlen(proto_type);
3007	for (i = 0; i < nr_args; i++) {
3008		if (!args[i].name_off && args[i].type) {
3009			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3010			return -EINVAL;
3011		}
3012	}
3013
3014	return 0;
3015}
3016
3017static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
3018	[BTF_KIND_INT] = &int_ops,
3019	[BTF_KIND_PTR] = &ptr_ops,
3020	[BTF_KIND_ARRAY] = &array_ops,
3021	[BTF_KIND_STRUCT] = &struct_ops,
3022	[BTF_KIND_UNION] = &struct_ops,
3023	[BTF_KIND_ENUM] = &enum_ops,
3024	[BTF_KIND_FWD] = &fwd_ops,
3025	[BTF_KIND_TYPEDEF] = &modifier_ops,
3026	[BTF_KIND_VOLATILE] = &modifier_ops,
3027	[BTF_KIND_CONST] = &modifier_ops,
3028	[BTF_KIND_RESTRICT] = &modifier_ops,
3029	[BTF_KIND_FUNC] = &func_ops,
3030	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
3031	[BTF_KIND_VAR] = &var_ops,
3032	[BTF_KIND_DATASEC] = &datasec_ops,
3033};
3034
3035static s32 btf_check_meta(struct btf_verifier_env *env,
3036			  const struct btf_type *t,
3037			  u32 meta_left)
3038{
3039	u32 saved_meta_left = meta_left;
3040	s32 var_meta_size;
3041
3042	if (meta_left < sizeof(*t)) {
3043		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
3044				 env->log_type_id, meta_left, sizeof(*t));
3045		return -EINVAL;
3046	}
3047	meta_left -= sizeof(*t);
3048
3049	if (t->info & ~BTF_INFO_MASK) {
3050		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
3051				 env->log_type_id, t->info);
3052		return -EINVAL;
3053	}
3054
3055	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
3056	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
3057		btf_verifier_log(env, "[%u] Invalid kind:%u",
3058				 env->log_type_id, BTF_INFO_KIND(t->info));
3059		return -EINVAL;
3060	}
3061
3062	if (!btf_name_offset_valid(env->btf, t->name_off)) {
3063		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
3064				 env->log_type_id, t->name_off);
3065		return -EINVAL;
3066	}
3067
3068	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
3069	if (var_meta_size < 0)
3070		return var_meta_size;
3071
3072	meta_left -= var_meta_size;
3073
3074	return saved_meta_left - meta_left;
3075}
3076
3077static int btf_check_all_metas(struct btf_verifier_env *env)
3078{
3079	struct btf *btf = env->btf;
3080	struct btf_header *hdr;
3081	void *cur, *end;
3082
3083	hdr = &btf->hdr;
3084	cur = btf->nohdr_data + hdr->type_off;
3085	end = cur + hdr->type_len;
3086
3087	env->log_type_id = 1;
3088	while (cur < end) {
3089		struct btf_type *t = cur;
3090		s32 meta_size;
3091
3092		meta_size = btf_check_meta(env, t, end - cur);
3093		if (meta_size < 0)
3094			return meta_size;
3095
3096		btf_add_type(env, t);
3097		cur += meta_size;
3098		env->log_type_id++;
3099	}
3100
3101	return 0;
3102}
3103
3104static bool btf_resolve_valid(struct btf_verifier_env *env,
3105			      const struct btf_type *t,
3106			      u32 type_id)
3107{
3108	struct btf *btf = env->btf;
3109
3110	if (!env_type_is_resolved(env, type_id))
3111		return false;
3112
3113	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3114		return !btf->resolved_ids[type_id] &&
3115		       !btf->resolved_sizes[type_id];
3116
3117	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3118	    btf_type_is_var(t)) {
3119		t = btf_type_id_resolve(btf, &type_id);
3120		return t &&
3121		       !btf_type_is_modifier(t) &&
3122		       !btf_type_is_var(t) &&
3123		       !btf_type_is_datasec(t);
3124	}
3125
3126	if (btf_type_is_array(t)) {
3127		const struct btf_array *array = btf_type_array(t);
3128		const struct btf_type *elem_type;
3129		u32 elem_type_id = array->type;
3130		u32 elem_size;
3131
3132		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3133		return elem_type && !btf_type_is_modifier(elem_type) &&
3134			(array->nelems * elem_size ==
3135			 btf->resolved_sizes[type_id]);
3136	}
3137
3138	return false;
3139}
3140
3141static int btf_resolve(struct btf_verifier_env *env,
3142		       const struct btf_type *t, u32 type_id)
3143{
3144	u32 save_log_type_id = env->log_type_id;
3145	const struct resolve_vertex *v;
3146	int err = 0;
3147
3148	env->resolve_mode = RESOLVE_TBD;
3149	env_stack_push(env, t, type_id);
3150	while (!err && (v = env_stack_peak(env))) {
3151		env->log_type_id = v->type_id;
3152		err = btf_type_ops(v->t)->resolve(env, v);
3153	}
3154
3155	env->log_type_id = type_id;
3156	if (err == -E2BIG) {
3157		btf_verifier_log_type(env, t,
3158				      "Exceeded max resolving depth:%u",
3159				      MAX_RESOLVE_DEPTH);
3160	} else if (err == -EEXIST) {
3161		btf_verifier_log_type(env, t, "Loop detected");
3162	}
3163
3164	/* Final sanity check */
3165	if (!err && !btf_resolve_valid(env, t, type_id)) {
3166		btf_verifier_log_type(env, t, "Invalid resolve state");
3167		err = -EINVAL;
3168	}
3169
3170	env->log_type_id = save_log_type_id;
3171	return err;
3172}
3173
3174static int btf_check_all_types(struct btf_verifier_env *env)
3175{
3176	struct btf *btf = env->btf;
3177	u32 type_id;
3178	int err;
3179
3180	err = env_resolve_init(env);
3181	if (err)
3182		return err;
3183
3184	env->phase++;
3185	for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3186		const struct btf_type *t = btf_type_by_id(btf, type_id);
3187
3188		env->log_type_id = type_id;
3189		if (btf_type_needs_resolve(t) &&
3190		    !env_type_is_resolved(env, type_id)) {
3191			err = btf_resolve(env, t, type_id);
3192			if (err)
3193				return err;
3194		}
3195
3196		if (btf_type_is_func_proto(t)) {
3197			err = btf_func_proto_check(env, t);
3198			if (err)
3199				return err;
3200		}
3201
3202		if (btf_type_is_func(t)) {
3203			err = btf_func_check(env, t);
3204			if (err)
3205				return err;
3206		}
3207	}
3208
3209	return 0;
3210}
3211
3212static int btf_parse_type_sec(struct btf_verifier_env *env)
3213{
3214	const struct btf_header *hdr = &env->btf->hdr;
3215	int err;
3216
3217	/* Type section must align to 4 bytes */
3218	if (hdr->type_off & (sizeof(u32) - 1)) {
3219		btf_verifier_log(env, "Unaligned type_off");
3220		return -EINVAL;
3221	}
3222
3223	if (!hdr->type_len) {
3224		btf_verifier_log(env, "No type found");
3225		return -EINVAL;
3226	}
3227
3228	err = btf_check_all_metas(env);
3229	if (err)
3230		return err;
3231
3232	return btf_check_all_types(env);
3233}
3234
3235static int btf_parse_str_sec(struct btf_verifier_env *env)
3236{
3237	const struct btf_header *hdr;
3238	struct btf *btf = env->btf;
3239	const char *start, *end;
3240
3241	hdr = &btf->hdr;
3242	start = btf->nohdr_data + hdr->str_off;
3243	end = start + hdr->str_len;
3244
3245	if (end != btf->data + btf->data_size) {
3246		btf_verifier_log(env, "String section is not at the end");
3247		return -EINVAL;
3248	}
3249
3250	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3251	    start[0] || end[-1]) {
3252		btf_verifier_log(env, "Invalid string section");
3253		return -EINVAL;
3254	}
3255
3256	btf->strings = start;
3257
3258	return 0;
3259}
3260
3261static const size_t btf_sec_info_offset[] = {
3262	offsetof(struct btf_header, type_off),
3263	offsetof(struct btf_header, str_off),
3264};
3265
3266static int btf_sec_info_cmp(const void *a, const void *b)
3267{
3268	const struct btf_sec_info *x = a;
3269	const struct btf_sec_info *y = b;
3270
3271	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3272}
3273
3274static int btf_check_sec_info(struct btf_verifier_env *env,
3275			      u32 btf_data_size)
3276{
3277	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3278	u32 total, expected_total, i;
3279	const struct btf_header *hdr;
3280	const struct btf *btf;
3281
3282	btf = env->btf;
3283	hdr = &btf->hdr;
3284
3285	/* Populate the secs from hdr */
3286	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3287		secs[i] = *(struct btf_sec_info *)((void *)hdr +
3288						   btf_sec_info_offset[i]);
3289
3290	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3291	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3292
3293	/* Check for gaps and overlap among sections */
3294	total = 0;
3295	expected_total = btf_data_size - hdr->hdr_len;
3296	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3297		if (expected_total < secs[i].off) {
3298			btf_verifier_log(env, "Invalid section offset");
3299			return -EINVAL;
3300		}
3301		if (total < secs[i].off) {
3302			/* gap */
3303			btf_verifier_log(env, "Unsupported section found");
3304			return -EINVAL;
3305		}
3306		if (total > secs[i].off) {
3307			btf_verifier_log(env, "Section overlap found");
3308			return -EINVAL;
3309		}
3310		if (expected_total - total < secs[i].len) {
3311			btf_verifier_log(env,
3312					 "Total section length too long");
3313			return -EINVAL;
3314		}
3315		total += secs[i].len;
3316	}
3317
3318	/* There is data other than hdr and known sections */
3319	if (expected_total != total) {
3320		btf_verifier_log(env, "Unsupported section found");
3321		return -EINVAL;
3322	}
3323
3324	return 0;
3325}
3326
3327static int btf_parse_hdr(struct btf_verifier_env *env)
3328{
3329	u32 hdr_len, hdr_copy, btf_data_size;
3330	const struct btf_header *hdr;
3331	struct btf *btf;
3332	int err;
3333
3334	btf = env->btf;
3335	btf_data_size = btf->data_size;
3336
3337	if (btf_data_size <
3338	    offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3339		btf_verifier_log(env, "hdr_len not found");
3340		return -EINVAL;
3341	}
3342
3343	hdr = btf->data;
3344	hdr_len = hdr->hdr_len;
3345	if (btf_data_size < hdr_len) {
3346		btf_verifier_log(env, "btf_header not found");
3347		return -EINVAL;
3348	}
3349
3350	/* Ensure the unsupported header fields are zero */
3351	if (hdr_len > sizeof(btf->hdr)) {
3352		u8 *expected_zero = btf->data + sizeof(btf->hdr);
3353		u8 *end = btf->data + hdr_len;
3354
3355		for (; expected_zero < end; expected_zero++) {
3356			if (*expected_zero) {
3357				btf_verifier_log(env, "Unsupported btf_header");
3358				return -E2BIG;
3359			}
3360		}
3361	}
3362
3363	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3364	memcpy(&btf->hdr, btf->data, hdr_copy);
3365
3366	hdr = &btf->hdr;
3367
3368	btf_verifier_log_hdr(env, btf_data_size);
3369
3370	if (hdr->magic != BTF_MAGIC) {
3371		btf_verifier_log(env, "Invalid magic");
3372		return -EINVAL;
3373	}
3374
3375	if (hdr->version != BTF_VERSION) {
3376		btf_verifier_log(env, "Unsupported version");
3377		return -ENOTSUPP;
3378	}
3379
3380	if (hdr->flags) {
3381		btf_verifier_log(env, "Unsupported flags");
3382		return -ENOTSUPP;
3383	}
3384
3385	if (btf_data_size == hdr->hdr_len) {
3386		btf_verifier_log(env, "No data");
3387		return -EINVAL;
3388	}
3389
3390	err = btf_check_sec_info(env, btf_data_size);
3391	if (err)
3392		return err;
3393
3394	return 0;
3395}
3396
3397static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3398			     u32 log_level, char __user *log_ubuf, u32 log_size)
3399{
3400	struct btf_verifier_env *env = NULL;
3401	struct bpf_verifier_log *log;
3402	struct btf *btf = NULL;
3403	u8 *data;
3404	int err;
3405
3406	if (btf_data_size > BTF_MAX_SIZE)
3407		return ERR_PTR(-E2BIG);
3408
3409	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3410	if (!env)
3411		return ERR_PTR(-ENOMEM);
3412
3413	log = &env->log;
3414	if (log_level || log_ubuf || log_size) {
3415		/* user requested verbose verifier output
3416		 * and supplied buffer to store the verification trace
3417		 */
3418		log->level = log_level;
3419		log->ubuf = log_ubuf;
3420		log->len_total = log_size;
3421
3422		/* log attributes have to be sane */
3423		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3424		    !log->level || !log->ubuf) {
3425			err = -EINVAL;
3426			goto errout;
3427		}
3428	}
3429
3430	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3431	if (!btf) {
3432		err = -ENOMEM;
3433		goto errout;
3434	}
3435	env->btf = btf;
3436
3437	data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3438	if (!data) {
3439		err = -ENOMEM;
3440		goto errout;
3441	}
3442
3443	btf->data = data;
3444	btf->data_size = btf_data_size;
3445
3446	if (copy_from_user(data, btf_data, btf_data_size)) {
3447		err = -EFAULT;
3448		goto errout;
3449	}
3450
3451	err = btf_parse_hdr(env);
3452	if (err)
3453		goto errout;
3454
3455	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3456
3457	err = btf_parse_str_sec(env);
3458	if (err)
3459		goto errout;
3460
3461	err = btf_parse_type_sec(env);
3462	if (err)
3463		goto errout;
3464
3465	if (log->level && bpf_verifier_log_full(log)) {
3466		err = -ENOSPC;
3467		goto errout;
3468	}
3469
3470	btf_verifier_env_free(env);
3471	refcount_set(&btf->refcnt, 1);
3472	return btf;
3473
3474errout:
3475	btf_verifier_env_free(env);
3476	if (btf)
3477		btf_free(btf);
3478	return ERR_PTR(err);
3479}
3480
3481extern char __weak __start_BTF[];
3482extern char __weak __stop_BTF[];
3483extern struct btf *btf_vmlinux;
3484
3485#define BPF_MAP_TYPE(_id, _ops)
3486#define BPF_LINK_TYPE(_id, _name)
3487static union {
3488	struct bpf_ctx_convert {
3489#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3490	prog_ctx_type _id##_prog; \
3491	kern_ctx_type _id##_kern;
3492#include <linux/bpf_types.h>
3493#undef BPF_PROG_TYPE
3494	} *__t;
3495	/* 't' is written once under lock. Read many times. */
3496	const struct btf_type *t;
3497} bpf_ctx_convert;
3498enum {
3499#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3500	__ctx_convert##_id,
3501#include <linux/bpf_types.h>
3502#undef BPF_PROG_TYPE
3503	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
3504};
3505static u8 bpf_ctx_convert_map[] = {
3506#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3507	[_id] = __ctx_convert##_id,
3508#include <linux/bpf_types.h>
3509#undef BPF_PROG_TYPE
3510	0, /* avoid empty array */
3511};
3512#undef BPF_MAP_TYPE
3513#undef BPF_LINK_TYPE
3514
3515static const struct btf_member *
3516btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
3517		      const struct btf_type *t, enum bpf_prog_type prog_type,
3518		      int arg)
3519{
3520	const struct btf_type *conv_struct;
3521	const struct btf_type *ctx_struct;
3522	const struct btf_member *ctx_type;
3523	const char *tname, *ctx_tname;
3524
3525	conv_struct = bpf_ctx_convert.t;
3526	if (!conv_struct) {
3527		bpf_log(log, "btf_vmlinux is malformed\n");
3528		return NULL;
3529	}
3530	t = btf_type_by_id(btf, t->type);
3531	while (btf_type_is_modifier(t))
3532		t = btf_type_by_id(btf, t->type);
3533	if (!btf_type_is_struct(t)) {
3534		/* Only pointer to struct is supported for now.
3535		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
3536		 * is not supported yet.
3537		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
3538		 */
3539		if (log->level & BPF_LOG_LEVEL)
3540			bpf_log(log, "arg#%d type is not a struct\n", arg);
3541		return NULL;
3542	}
3543	tname = btf_name_by_offset(btf, t->name_off);
3544	if (!tname) {
3545		bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
3546		return NULL;
3547	}
3548	/* prog_type is valid bpf program type. No need for bounds check. */
3549	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
3550	/* ctx_struct is a pointer to prog_ctx_type in vmlinux.
3551	 * Like 'struct __sk_buff'
3552	 */
3553	ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
3554	if (!ctx_struct)
3555		/* should not happen */
3556		return NULL;
3557	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
3558	if (!ctx_tname) {
3559		/* should not happen */
3560		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
3561		return NULL;
3562	}
3563	/* only compare that prog's ctx type name is the same as
3564	 * kernel expects. No need to compare field by field.
3565	 * It's ok for bpf prog to do:
3566	 * struct __sk_buff {};
3567	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
3568	 * { // no fields of skb are ever used }
3569	 */
3570	if (strcmp(ctx_tname, tname))
3571		return NULL;
3572	return ctx_type;
3573}
3574
3575static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
3576#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3577#define BPF_LINK_TYPE(_id, _name)
3578#define BPF_MAP_TYPE(_id, _ops) \
3579	[_id] = &_ops,
3580#include <linux/bpf_types.h>
3581#undef BPF_PROG_TYPE
3582#undef BPF_LINK_TYPE
3583#undef BPF_MAP_TYPE
3584};
3585
3586static int btf_vmlinux_map_ids_init(const struct btf *btf,
3587				    struct bpf_verifier_log *log)
3588{
3589	const struct bpf_map_ops *ops;
3590	int i, btf_id;
3591
3592	for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
3593		ops = btf_vmlinux_map_ops[i];
3594		if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
3595			continue;
3596		if (!ops->map_btf_name || !ops->map_btf_id) {
3597			bpf_log(log, "map type %d is misconfigured\n", i);
3598			return -EINVAL;
3599		}
3600		btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
3601					       BTF_KIND_STRUCT);
3602		if (btf_id < 0)
3603			return btf_id;
3604		*ops->map_btf_id = btf_id;
3605	}
3606
3607	return 0;
3608}
3609
3610static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
3611				     struct btf *btf,
3612				     const struct btf_type *t,
3613				     enum bpf_prog_type prog_type,
3614				     int arg)
3615{
3616	const struct btf_member *prog_ctx_type, *kern_ctx_type;
3617
3618	prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
3619	if (!prog_ctx_type)
3620		return -ENOENT;
3621	kern_ctx_type = prog_ctx_type + 1;
3622	return kern_ctx_type->type;
3623}
3624
3625BTF_ID_LIST(bpf_ctx_convert_btf_id)
3626BTF_ID(struct, bpf_ctx_convert)
3627
3628struct btf *btf_parse_vmlinux(void)
3629{
3630	struct btf_verifier_env *env = NULL;
3631	struct bpf_verifier_log *log;
3632	struct btf *btf = NULL;
3633	int err;
3634
3635	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3636	if (!env)
3637		return ERR_PTR(-ENOMEM);
3638
3639	log = &env->log;
3640	log->level = BPF_LOG_KERNEL;
3641
3642	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3643	if (!btf) {
3644		err = -ENOMEM;
3645		goto errout;
3646	}
3647	env->btf = btf;
3648
3649	btf->data = __start_BTF;
3650	btf->data_size = __stop_BTF - __start_BTF;
3651
3652	err = btf_parse_hdr(env);
3653	if (err)
3654		goto errout;
3655
3656	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3657
3658	err = btf_parse_str_sec(env);
3659	if (err)
3660		goto errout;
3661
3662	err = btf_check_all_metas(env);
3663	if (err)
3664		goto errout;
3665
3666	/* btf_parse_vmlinux() runs under bpf_verifier_lock */
3667	bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
3668
3669	/* find bpf map structs for map_ptr access checking */
3670	err = btf_vmlinux_map_ids_init(btf, log);
3671	if (err < 0)
3672		goto errout;
3673
3674	bpf_struct_ops_init(btf, log);
3675
3676	btf_verifier_env_free(env);
3677	refcount_set(&btf->refcnt, 1);
3678	return btf;
3679
3680errout:
3681	btf_verifier_env_free(env);
3682	if (btf) {
3683		kvfree(btf->types);
3684		kfree(btf);
3685	}
3686	return ERR_PTR(err);
3687}
3688
3689struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
3690{
3691	struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3692
3693	if (tgt_prog) {
3694		return tgt_prog->aux->btf;
3695	} else {
3696		return btf_vmlinux;
3697	}
3698}
3699
3700static bool is_string_ptr(struct btf *btf, const struct btf_type *t)
3701{
3702	/* t comes in already as a pointer */
3703	t = btf_type_by_id(btf, t->type);
3704
3705	/* allow const */
3706	if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
3707		t = btf_type_by_id(btf, t->type);
3708
3709	/* char, signed char, unsigned char */
3710	return btf_type_is_int(t) && t->size == 1;
3711}
3712
3713bool btf_ctx_access(int off, int size, enum bpf_access_type type,
3714		    const struct bpf_prog *prog,
3715		    struct bpf_insn_access_aux *info)
3716{
3717	const struct btf_type *t = prog->aux->attach_func_proto;
3718	struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3719	struct btf *btf = bpf_prog_get_target_btf(prog);
3720	const char *tname = prog->aux->attach_func_name;
3721	struct bpf_verifier_log *log = info->log;
3722	const struct btf_param *args;
3723	u32 nr_args, arg;
3724	int i, ret;
3725
3726	if (off % 8) {
3727		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
3728			tname, off);
3729		return false;
3730	}
3731	arg = off / 8;
3732	args = (const struct btf_param *)(t + 1);
3733	/* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
3734	nr_args = t ? btf_type_vlen(t) : 5;
3735	if (prog->aux->attach_btf_trace) {
3736		/* skip first 'void *__data' argument in btf_trace_##name typedef */
3737		args++;
3738		nr_args--;
3739	}
3740
3741	if (arg > nr_args) {
3742		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3743			tname, arg + 1);
3744		return false;
3745	}
3746
3747	if (arg == nr_args) {
3748		switch (prog->expected_attach_type) {
3749		case BPF_LSM_MAC:
3750		case BPF_TRACE_FEXIT:
3751			/* When LSM programs are attached to void LSM hooks
3752			 * they use FEXIT trampolines and when attached to
3753			 * int LSM hooks, they use MODIFY_RETURN trampolines.
3754			 *
3755			 * While the LSM programs are BPF_MODIFY_RETURN-like
3756			 * the check:
3757			 *
3758			 *	if (ret_type != 'int')
3759			 *		return -EINVAL;
3760			 *
3761			 * is _not_ done here. This is still safe as LSM hooks
3762			 * have only void and int return types.
3763			 */
3764			if (!t)
3765				return true;
3766			t = btf_type_by_id(btf, t->type);
3767			break;
3768		case BPF_MODIFY_RETURN:
3769			/* For now the BPF_MODIFY_RETURN can only be attached to
3770			 * functions that return an int.
3771			 */
3772			if (!t)
3773				return false;
3774
3775			t = btf_type_skip_modifiers(btf, t->type, NULL);
3776			if (!btf_type_is_small_int(t)) {
3777				bpf_log(log,
3778					"ret type %s not allowed for fmod_ret\n",
3779					btf_kind_str[BTF_INFO_KIND(t->info)]);
3780				return false;
3781			}
3782			break;
3783		default:
3784			bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3785				tname, arg + 1);
3786			return false;
3787		}
3788	} else {
3789		if (!t)
3790			/* Default prog with 5 args */
3791			return true;
3792		t = btf_type_by_id(btf, args[arg].type);
3793	}
3794
3795	/* skip modifiers */
3796	while (btf_type_is_modifier(t))
3797		t = btf_type_by_id(btf, t->type);
3798	if (btf_type_is_small_int(t) || btf_type_is_enum(t))
3799		/* accessing a scalar */
3800		return true;
3801	if (!btf_type_is_ptr(t)) {
3802		bpf_log(log,
3803			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
3804			tname, arg,
3805			__btf_name_by_offset(btf, t->name_off),
3806			btf_kind_str[BTF_INFO_KIND(t->info)]);
3807		return false;
3808	}
3809
3810	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
3811	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
3812		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
3813
3814		if (ctx_arg_info->offset == off &&
3815		    (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL ||
3816		     ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) {
3817			info->reg_type = ctx_arg_info->reg_type;
3818			return true;
3819		}
3820	}
3821
3822	if (t->type == 0)
3823		/* This is a pointer to void.
3824		 * It is the same as scalar from the verifier safety pov.
3825		 * No further pointer walking is allowed.
3826		 */
3827		return true;
3828
3829	if (is_string_ptr(btf, t))
3830		return true;
3831
3832	/* this is a pointer to another type */
3833	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
3834		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
3835
3836		if (ctx_arg_info->offset == off) {
3837			info->reg_type = ctx_arg_info->reg_type;
3838			info->btf_id = ctx_arg_info->btf_id;
3839			return true;
3840		}
3841	}
3842
3843	info->reg_type = PTR_TO_BTF_ID;
3844	if (tgt_prog) {
3845		ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg);
3846		if (ret > 0) {
3847			info->btf_id = ret;
3848			return true;
3849		} else {
3850			return false;
3851		}
3852	}
3853
3854	info->btf_id = t->type;
3855	t = btf_type_by_id(btf, t->type);
3856	/* skip modifiers */
3857	while (btf_type_is_modifier(t)) {
3858		info->btf_id = t->type;
3859		t = btf_type_by_id(btf, t->type);
3860	}
3861	if (!btf_type_is_struct(t)) {
3862		bpf_log(log,
3863			"func '%s' arg%d type %s is not a struct\n",
3864			tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
3865		return false;
3866	}
3867	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
3868		tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
3869		__btf_name_by_offset(btf, t->name_off));
3870	return true;
3871}
3872
3873int btf_struct_access(struct bpf_verifier_log *log,
3874		      const struct btf_type *t, int off, int size,
3875		      enum bpf_access_type atype,
3876		      u32 *next_btf_id)
3877{
3878	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
3879	const struct btf_type *mtype, *elem_type = NULL;
3880	const struct btf_member *member;
3881	const char *tname, *mname;
3882	u32 vlen;
3883
3884again:
3885	tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3886	if (!btf_type_is_struct(t)) {
3887		bpf_log(log, "Type '%s' is not a struct\n", tname);
3888		return -EINVAL;
3889	}
3890
3891	vlen = btf_type_vlen(t);
3892	if (off + size > t->size) {
3893		/* If the last element is a variable size array, we may
3894		 * need to relax the rule.
3895		 */
3896		struct btf_array *array_elem;
3897
3898		if (vlen == 0)
3899			goto error;
3900
3901		member = btf_type_member(t) + vlen - 1;
3902		mtype = btf_type_skip_modifiers(btf_vmlinux, member->type,
3903						NULL);
3904		if (!btf_type_is_array(mtype))
3905			goto error;
3906
3907		array_elem = (struct btf_array *)(mtype + 1);
3908		if (array_elem->nelems != 0)
3909			goto error;
3910
3911		moff = btf_member_bit_offset(t, member) / 8;
3912		if (off < moff)
3913			goto error;
3914
3915		/* Only allow structure for now, can be relaxed for
3916		 * other types later.
3917		 */
3918		elem_type = btf_type_skip_modifiers(btf_vmlinux,
3919						    array_elem->type, NULL);
3920		if (!btf_type_is_struct(elem_type))
3921			goto error;
3922
3923		off = (off - moff) % elem_type->size;
3924		return btf_struct_access(log, elem_type, off, size, atype,
3925					 next_btf_id);
3926
3927error:
3928		bpf_log(log, "access beyond struct %s at off %u size %u\n",
3929			tname, off, size);
3930		return -EACCES;
3931	}
3932
3933	for_each_member(i, t, member) {
3934		/* offset of the field in bytes */
3935		moff = btf_member_bit_offset(t, member) / 8;
3936		if (off + size <= moff)
3937			/* won't find anything, field is already too far */
3938			break;
3939
3940		if (btf_member_bitfield_size(t, member)) {
3941			u32 end_bit = btf_member_bit_offset(t, member) +
3942				btf_member_bitfield_size(t, member);
3943
3944			/* off <= moff instead of off == moff because clang
3945			 * does not generate a BTF member for anonymous
3946			 * bitfield like the ":16" here:
3947			 * struct {
3948			 *	int :16;
3949			 *	int x:8;
3950			 * };
3951			 */
3952			if (off <= moff &&
3953			    BITS_ROUNDUP_BYTES(end_bit) <= off + size)
3954				return SCALAR_VALUE;
3955
3956			/* off may be accessing a following member
3957			 *
3958			 * or
3959			 *
3960			 * Doing partial access at either end of this
3961			 * bitfield.  Continue on this case also to
3962			 * treat it as not accessing this bitfield
3963			 * and eventually error out as field not
3964			 * found to keep it simple.
3965			 * It could be relaxed if there was a legit
3966			 * partial access case later.
3967			 */
3968			continue;
3969		}
3970
3971		/* In case of "off" is pointing to holes of a struct */
3972		if (off < moff)
3973			break;
3974
3975		/* type of the field */
3976		mtype = btf_type_by_id(btf_vmlinux, member->type);
3977		mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
3978
3979		mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
3980					 &elem_type, &total_nelems);
3981		if (IS_ERR(mtype)) {
3982			bpf_log(log, "field %s doesn't have size\n", mname);
3983			return -EFAULT;
3984		}
3985
3986		mtrue_end = moff + msize;
3987		if (off >= mtrue_end)
3988			/* no overlap with member, keep iterating */
3989			continue;
3990
3991		if (btf_type_is_array(mtype)) {
3992			u32 elem_idx;
3993
3994			/* btf_resolve_size() above helps to
3995			 * linearize a multi-dimensional array.
3996			 *
3997			 * The logic here is treating an array
3998			 * in a struct as the following way:
3999			 *
4000			 * struct outer {
4001			 *	struct inner array[2][2];
4002			 * };
4003			 *
4004			 * looks like:
4005			 *
4006			 * struct outer {
4007			 *	struct inner array_elem0;
4008			 *	struct inner array_elem1;
4009			 *	struct inner array_elem2;
4010			 *	struct inner array_elem3;
4011			 * };
4012			 *
4013			 * When accessing outer->array[1][0], it moves
4014			 * moff to "array_elem2", set mtype to
4015			 * "struct inner", and msize also becomes
4016			 * sizeof(struct inner).  Then most of the
4017			 * remaining logic will fall through without
4018			 * caring the current member is an array or
4019			 * not.
4020			 *
4021			 * Unlike mtype/msize/moff, mtrue_end does not
4022			 * change.  The naming difference ("_true") tells
4023			 * that it is not always corresponding to
4024			 * the current mtype/msize/moff.
4025			 * It is the true end of the current
4026			 * member (i.e. array in this case).  That
4027			 * will allow an int array to be accessed like
4028			 * a scratch space,
4029			 * i.e. allow access beyond the size of
4030			 *      the array's element as long as it is
4031			 *      within the mtrue_end boundary.
4032			 */
4033
4034			/* skip empty array */
4035			if (moff == mtrue_end)
4036				continue;
4037
4038			msize /= total_nelems;
4039			elem_idx = (off - moff) / msize;
4040			moff += elem_idx * msize;
4041			mtype = elem_type;
4042		}
4043
4044		/* the 'off' we're looking for is either equal to start
4045		 * of this field or inside of this struct
4046		 */
4047		if (btf_type_is_struct(mtype)) {
4048			/* our field must be inside that union or struct */
4049			t = mtype;
4050
4051			/* adjust offset we're looking for */
4052			off -= moff;
4053			goto again;
4054		}
4055
4056		if (btf_type_is_ptr(mtype)) {
4057			const struct btf_type *stype;
4058			u32 id;
4059
4060			if (msize != size || off != moff) {
4061				bpf_log(log,
4062					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
4063					mname, moff, tname, off, size);
4064				return -EACCES;
4065			}
4066
4067			stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
4068			if (btf_type_is_struct(stype)) {
4069				*next_btf_id = id;
4070				return PTR_TO_BTF_ID;
4071			}
4072		}
4073
4074		/* Allow more flexible access within an int as long as
4075		 * it is within mtrue_end.
4076		 * Since mtrue_end could be the end of an array,
4077		 * that also allows using an array of int as a scratch
4078		 * space. e.g. skb->cb[].
4079		 */
4080		if (off + size > mtrue_end) {
4081			bpf_log(log,
4082				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
4083				mname, mtrue_end, tname, off, size);
4084			return -EACCES;
4085		}
4086
4087		return SCALAR_VALUE;
4088	}
4089	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
4090	return -EINVAL;
4091}
4092
4093int btf_resolve_helper_id(struct bpf_verifier_log *log,
4094			  const struct bpf_func_proto *fn, int arg)
4095{
4096	int id;
4097
4098	if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID || !btf_vmlinux)
4099		return -EINVAL;
4100	id = fn->btf_id[arg];
4101	if (!id || id > btf_vmlinux->nr_types)
4102		return -EINVAL;
4103	return id;
4104}
4105
4106static int __get_type_size(struct btf *btf, u32 btf_id,
4107			   const struct btf_type **bad_type)
4108{
4109	const struct btf_type *t;
4110
4111	if (!btf_id)
4112		/* void */
4113		return 0;
4114	t = btf_type_by_id(btf, btf_id);
4115	while (t && btf_type_is_modifier(t))
4116		t = btf_type_by_id(btf, t->type);
4117	if (!t) {
4118		*bad_type = btf->types[0];
4119		return -EINVAL;
4120	}
4121	if (btf_type_is_ptr(t))
4122		/* kernel size of pointer. Not BPF's size of pointer*/
4123		return sizeof(void *);
4124	if (btf_type_is_int(t) || btf_type_is_enum(t))
4125		return t->size;
4126	*bad_type = t;
4127	return -EINVAL;
4128}
4129
4130int btf_distill_func_proto(struct bpf_verifier_log *log,
4131			   struct btf *btf,
4132			   const struct btf_type *func,
4133			   const char *tname,
4134			   struct btf_func_model *m)
4135{
4136	const struct btf_param *args;
4137	const struct btf_type *t;
4138	u32 i, nargs;
4139	int ret;
4140
4141	if (!func) {
4142		/* BTF function prototype doesn't match the verifier types.
4143		 * Fall back to 5 u64 args.
4144		 */
4145		for (i = 0; i < 5; i++)
4146			m->arg_size[i] = 8;
4147		m->ret_size = 8;
4148		m->nr_args = 5;
4149		return 0;
4150	}
4151	args = (const struct btf_param *)(func + 1);
4152	nargs = btf_type_vlen(func);
4153	if (nargs >= MAX_BPF_FUNC_ARGS) {
4154		bpf_log(log,
4155			"The function %s has %d arguments. Too many.\n",
4156			tname, nargs);
4157		return -EINVAL;
4158	}
4159	ret = __get_type_size(btf, func->type, &t);
4160	if (ret < 0) {
4161		bpf_log(log,
4162			"The function %s return type %s is unsupported.\n",
4163			tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
4164		return -EINVAL;
4165	}
4166	m->ret_size = ret;
4167
4168	for (i = 0; i < nargs; i++) {
4169		ret = __get_type_size(btf, args[i].type, &t);
4170		if (ret < 0) {
4171			bpf_log(log,
4172				"The function %s arg%d type %s is unsupported.\n",
4173				tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4174			return -EINVAL;
4175		}
4176		m->arg_size[i] = ret;
4177	}
4178	m->nr_args = nargs;
4179	return 0;
4180}
4181
4182/* Compare BTFs of two functions assuming only scalars and pointers to context.
4183 * t1 points to BTF_KIND_FUNC in btf1
4184 * t2 points to BTF_KIND_FUNC in btf2
4185 * Returns:
4186 * EINVAL - function prototype mismatch
4187 * EFAULT - verifier bug
4188 * 0 - 99% match. The last 1% is validated by the verifier.
4189 */
4190static int btf_check_func_type_match(struct bpf_verifier_log *log,
4191				     struct btf *btf1, const struct btf_type *t1,
4192				     struct btf *btf2, const struct btf_type *t2)
4193{
4194	const struct btf_param *args1, *args2;
4195	const char *fn1, *fn2, *s1, *s2;
4196	u32 nargs1, nargs2, i;
4197
4198	fn1 = btf_name_by_offset(btf1, t1->name_off);
4199	fn2 = btf_name_by_offset(btf2, t2->name_off);
4200
4201	if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
4202		bpf_log(log, "%s() is not a global function\n", fn1);
4203		return -EINVAL;
4204	}
4205	if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
4206		bpf_log(log, "%s() is not a global function\n", fn2);
4207		return -EINVAL;
4208	}
4209
4210	t1 = btf_type_by_id(btf1, t1->type);
4211	if (!t1 || !btf_type_is_func_proto(t1))
4212		return -EFAULT;
4213	t2 = btf_type_by_id(btf2, t2->type);
4214	if (!t2 || !btf_type_is_func_proto(t2))
4215		return -EFAULT;
4216
4217	args1 = (const struct btf_param *)(t1 + 1);
4218	nargs1 = btf_type_vlen(t1);
4219	args2 = (const struct btf_param *)(t2 + 1);
4220	nargs2 = btf_type_vlen(t2);
4221
4222	if (nargs1 != nargs2) {
4223		bpf_log(log, "%s() has %d args while %s() has %d args\n",
4224			fn1, nargs1, fn2, nargs2);
4225		return -EINVAL;
4226	}
4227
4228	t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
4229	t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
4230	if (t1->info != t2->info) {
4231		bpf_log(log,
4232			"Return type %s of %s() doesn't match type %s of %s()\n",
4233			btf_type_str(t1), fn1,
4234			btf_type_str(t2), fn2);
4235		return -EINVAL;
4236	}
4237
4238	for (i = 0; i < nargs1; i++) {
4239		t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
4240		t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
4241
4242		if (t1->info != t2->info) {
4243			bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
4244				i, fn1, btf_type_str(t1),
4245				fn2, btf_type_str(t2));
4246			return -EINVAL;
4247		}
4248		if (btf_type_has_size(t1) && t1->size != t2->size) {
4249			bpf_log(log,
4250				"arg%d in %s() has size %d while %s() has %d\n",
4251				i, fn1, t1->size,
4252				fn2, t2->size);
4253			return -EINVAL;
4254		}
4255
4256		/* global functions are validated with scalars and pointers
4257		 * to context only. And only global functions can be replaced.
4258		 * Hence type check only those types.
4259		 */
4260		if (btf_type_is_int(t1) || btf_type_is_enum(t1))
4261			continue;
4262		if (!btf_type_is_ptr(t1)) {
4263			bpf_log(log,
4264				"arg%d in %s() has unrecognized type\n",
4265				i, fn1);
4266			return -EINVAL;
4267		}
4268		t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
4269		t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
4270		if (!btf_type_is_struct(t1)) {
4271			bpf_log(log,
4272				"arg%d in %s() is not a pointer to context\n",
4273				i, fn1);
4274			return -EINVAL;
4275		}
4276		if (!btf_type_is_struct(t2)) {
4277			bpf_log(log,
4278				"arg%d in %s() is not a pointer to context\n",
4279				i, fn2);
4280			return -EINVAL;
4281		}
4282		/* This is an optional check to make program writing easier.
4283		 * Compare names of structs and report an error to the user.
4284		 * btf_prepare_func_args() already checked that t2 struct
4285		 * is a context type. btf_prepare_func_args() will check
4286		 * later that t1 struct is a context type as well.
4287		 */
4288		s1 = btf_name_by_offset(btf1, t1->name_off);
4289		s2 = btf_name_by_offset(btf2, t2->name_off);
4290		if (strcmp(s1, s2)) {
4291			bpf_log(log,
4292				"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
4293				i, fn1, s1, fn2, s2);
4294			return -EINVAL;
4295		}
4296	}
4297	return 0;
4298}
4299
4300/* Compare BTFs of given program with BTF of target program */
4301int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
4302			 struct btf *btf2, const struct btf_type *t2)
4303{
4304	struct btf *btf1 = prog->aux->btf;
4305	const struct btf_type *t1;
4306	u32 btf_id = 0;
4307
4308	if (!prog->aux->func_info) {
4309		bpf_log(&env->log, "Program extension requires BTF\n");
4310		return -EINVAL;
4311	}
4312
4313	btf_id = prog->aux->func_info[0].type_id;
4314	if (!btf_id)
4315		return -EFAULT;
4316
4317	t1 = btf_type_by_id(btf1, btf_id);
4318	if (!t1 || !btf_type_is_func(t1))
4319		return -EFAULT;
4320
4321	return btf_check_func_type_match(&env->log, btf1, t1, btf2, t2);
4322}
4323
4324/* Compare BTF of a function with given bpf_reg_state.
4325 * Returns:
4326 * EFAULT - there is a verifier bug. Abort verification.
4327 * EINVAL - there is a type mismatch or BTF is not available.
4328 * 0 - BTF matches with what bpf_reg_state expects.
4329 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
4330 */
4331int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
4332			     struct bpf_reg_state *reg)
4333{
4334	struct bpf_verifier_log *log = &env->log;
4335	struct bpf_prog *prog = env->prog;
4336	struct btf *btf = prog->aux->btf;
4337	const struct btf_param *args;
4338	const struct btf_type *t;
4339	u32 i, nargs, btf_id;
4340	const char *tname;
4341
4342	if (!prog->aux->func_info)
4343		return -EINVAL;
4344
4345	btf_id = prog->aux->func_info[subprog].type_id;
4346	if (!btf_id)
4347		return -EFAULT;
4348
4349	if (prog->aux->func_info_aux[subprog].unreliable)
4350		return -EINVAL;
4351
4352	t = btf_type_by_id(btf, btf_id);
4353	if (!t || !btf_type_is_func(t)) {
4354		/* These checks were already done by the verifier while loading
4355		 * struct bpf_func_info
4356		 */
4357		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
4358			subprog);
4359		return -EFAULT;
4360	}
4361	tname = btf_name_by_offset(btf, t->name_off);
4362
4363	t = btf_type_by_id(btf, t->type);
4364	if (!t || !btf_type_is_func_proto(t)) {
4365		bpf_log(log, "Invalid BTF of func %s\n", tname);
4366		return -EFAULT;
4367	}
4368	args = (const struct btf_param *)(t + 1);
4369	nargs = btf_type_vlen(t);
4370	if (nargs > 5) {
4371		bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
4372		goto out;
4373	}
4374	/* check that BTF function arguments match actual types that the
4375	 * verifier sees.
4376	 */
4377	for (i = 0; i < nargs; i++) {
4378		t = btf_type_by_id(btf, args[i].type);
4379		while (btf_type_is_modifier(t))
4380			t = btf_type_by_id(btf, t->type);
4381		if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4382			if (reg[i + 1].type == SCALAR_VALUE)
4383				continue;
4384			bpf_log(log, "R%d is not a scalar\n", i + 1);
4385			goto out;
4386		}
4387		if (btf_type_is_ptr(t)) {
4388			if (reg[i + 1].type == SCALAR_VALUE) {
4389				bpf_log(log, "R%d is not a pointer\n", i + 1);
4390				goto out;
4391			}
4392			/* If function expects ctx type in BTF check that caller
4393			 * is passing PTR_TO_CTX.
4394			 */
4395			if (btf_get_prog_ctx_type(log, btf, t, prog->type, i)) {
4396				if (reg[i + 1].type != PTR_TO_CTX) {
4397					bpf_log(log,
4398						"arg#%d expected pointer to ctx, but got %s\n",
4399						i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4400					goto out;
4401				}
4402				if (check_ctx_reg(env, &reg[i + 1], i + 1))
4403					goto out;
4404				continue;
4405			}
4406		}
4407		bpf_log(log, "Unrecognized arg#%d type %s\n",
4408			i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4409		goto out;
4410	}
4411	return 0;
4412out:
4413	/* Compiler optimizations can remove arguments from static functions
4414	 * or mismatched type can be passed into a global function.
4415	 * In such cases mark the function as unreliable from BTF point of view.
4416	 */
4417	prog->aux->func_info_aux[subprog].unreliable = true;
4418	return -EINVAL;
4419}
4420
4421/* Convert BTF of a function into bpf_reg_state if possible
4422 * Returns:
4423 * EFAULT - there is a verifier bug. Abort verification.
4424 * EINVAL - cannot convert BTF.
4425 * 0 - Successfully converted BTF into bpf_reg_state
4426 * (either PTR_TO_CTX or SCALAR_VALUE).
4427 */
4428int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
4429			  struct bpf_reg_state *reg)
4430{
4431	struct bpf_verifier_log *log = &env->log;
4432	struct bpf_prog *prog = env->prog;
4433	enum bpf_prog_type prog_type = prog->type;
4434	struct btf *btf = prog->aux->btf;
4435	const struct btf_param *args;
4436	const struct btf_type *t;
4437	u32 i, nargs, btf_id;
4438	const char *tname;
4439
4440	if (!prog->aux->func_info ||
4441	    prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
4442		bpf_log(log, "Verifier bug\n");
4443		return -EFAULT;
4444	}
4445
4446	btf_id = prog->aux->func_info[subprog].type_id;
4447	if (!btf_id) {
4448		bpf_log(log, "Global functions need valid BTF\n");
4449		return -EFAULT;
4450	}
4451
4452	t = btf_type_by_id(btf, btf_id);
4453	if (!t || !btf_type_is_func(t)) {
4454		/* These checks were already done by the verifier while loading
4455		 * struct bpf_func_info
4456		 */
4457		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
4458			subprog);
4459		return -EFAULT;
4460	}
4461	tname = btf_name_by_offset(btf, t->name_off);
4462
4463	if (log->level & BPF_LOG_LEVEL)
4464		bpf_log(log, "Validating %s() func#%d...\n",
4465			tname, subprog);
4466
4467	if (prog->aux->func_info_aux[subprog].unreliable) {
4468		bpf_log(log, "Verifier bug in function %s()\n", tname);
4469		return -EFAULT;
4470	}
4471	if (prog_type == BPF_PROG_TYPE_EXT)
4472		prog_type = prog->aux->linked_prog->type;
4473
4474	t = btf_type_by_id(btf, t->type);
4475	if (!t || !btf_type_is_func_proto(t)) {
4476		bpf_log(log, "Invalid type of function %s()\n", tname);
4477		return -EFAULT;
4478	}
4479	args = (const struct btf_param *)(t + 1);
4480	nargs = btf_type_vlen(t);
4481	if (nargs > 5) {
4482		bpf_log(log, "Global function %s() with %d > 5 args. Buggy compiler.\n",
4483			tname, nargs);
4484		return -EINVAL;
4485	}
4486	/* check that function returns int */
4487	t = btf_type_by_id(btf, t->type);
4488	while (btf_type_is_modifier(t))
4489		t = btf_type_by_id(btf, t->type);
4490	if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
4491		bpf_log(log,
4492			"Global function %s() doesn't return scalar. Only those are supported.\n",
4493			tname);
4494		return -EINVAL;
4495	}
4496	/* Convert BTF function arguments into verifier types.
4497	 * Only PTR_TO_CTX and SCALAR are supported atm.
4498	 */
4499	for (i = 0; i < nargs; i++) {
4500		t = btf_type_by_id(btf, args[i].type);
4501		while (btf_type_is_modifier(t))
4502			t = btf_type_by_id(btf, t->type);
4503		if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4504			reg[i + 1].type = SCALAR_VALUE;
4505			continue;
4506		}
4507		if (btf_type_is_ptr(t) &&
4508		    btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
4509			reg[i + 1].type = PTR_TO_CTX;
4510			continue;
4511		}
4512		bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
4513			i, btf_kind_str[BTF_INFO_KIND(t->info)], tname);
4514		return -EINVAL;
4515	}
4516	return 0;
4517}
4518
4519void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
4520		       struct seq_file *m)
4521{
4522	const struct btf_type *t = btf_type_by_id(btf, type_id);
4523
4524	btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
4525}
4526
4527#ifdef CONFIG_PROC_FS
4528static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
4529{
4530	const struct btf *btf = filp->private_data;
4531
4532	seq_printf(m, "btf_id:\t%u\n", btf->id);
4533}
4534#endif
4535
4536static int btf_release(struct inode *inode, struct file *filp)
4537{
4538	btf_put(filp->private_data);
4539	return 0;
4540}
4541
4542const struct file_operations btf_fops = {
4543#ifdef CONFIG_PROC_FS
4544	.show_fdinfo	= bpf_btf_show_fdinfo,
4545#endif
4546	.release	= btf_release,
4547};
4548
4549static int __btf_new_fd(struct btf *btf)
4550{
4551	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
4552}
4553
4554int btf_new_fd(const union bpf_attr *attr)
4555{
4556	struct btf *btf;
4557	int ret;
4558
4559	btf = btf_parse(u64_to_user_ptr(attr->btf),
4560			attr->btf_size, attr->btf_log_level,
4561			u64_to_user_ptr(attr->btf_log_buf),
4562			attr->btf_log_size);
4563	if (IS_ERR(btf))
4564		return PTR_ERR(btf);
4565
4566	ret = btf_alloc_id(btf);
4567	if (ret) {
4568		btf_free(btf);
4569		return ret;
4570	}
4571
4572	/*
4573	 * The BTF ID is published to the userspace.
4574	 * All BTF free must go through call_rcu() from
4575	 * now on (i.e. free by calling btf_put()).
4576	 */
4577
4578	ret = __btf_new_fd(btf);
4579	if (ret < 0)
4580		btf_put(btf);
4581
4582	return ret;
4583}
4584
4585struct btf *btf_get_by_fd(int fd)
4586{
4587	struct btf *btf;
4588	struct fd f;
4589
4590	f = fdget(fd);
4591
4592	if (!f.file)
4593		return ERR_PTR(-EBADF);
4594
4595	if (f.file->f_op != &btf_fops) {
4596		fdput(f);
4597		return ERR_PTR(-EINVAL);
4598	}
4599
4600	btf = f.file->private_data;
4601	refcount_inc(&btf->refcnt);
4602	fdput(f);
4603
4604	return btf;
4605}
4606
4607int btf_get_info_by_fd(const struct btf *btf,
4608		       const union bpf_attr *attr,
4609		       union bpf_attr __user *uattr)
4610{
4611	struct bpf_btf_info __user *uinfo;
4612	struct bpf_btf_info info;
4613	u32 info_copy, btf_copy;
4614	void __user *ubtf;
4615	u32 uinfo_len;
4616
4617	uinfo = u64_to_user_ptr(attr->info.info);
4618	uinfo_len = attr->info.info_len;
4619
4620	info_copy = min_t(u32, uinfo_len, sizeof(info));
4621	memset(&info, 0, sizeof(info));
4622	if (copy_from_user(&info, uinfo, info_copy))
4623		return -EFAULT;
4624
4625	info.id = btf->id;
4626	ubtf = u64_to_user_ptr(info.btf);
4627	btf_copy = min_t(u32, btf->data_size, info.btf_size);
4628	if (copy_to_user(ubtf, btf->data, btf_copy))
4629		return -EFAULT;
4630	info.btf_size = btf->data_size;
4631
4632	if (copy_to_user(uinfo, &info, info_copy) ||
4633	    put_user(info_copy, &uattr->info.info_len))
4634		return -EFAULT;
4635
4636	return 0;
4637}
4638
4639int btf_get_fd_by_id(u32 id)
4640{
4641	struct btf *btf;
4642	int fd;
4643
4644	rcu_read_lock();
4645	btf = idr_find(&btf_idr, id);
4646	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
4647		btf = ERR_PTR(-ENOENT);
4648	rcu_read_unlock();
4649
4650	if (IS_ERR(btf))
4651		return PTR_ERR(btf);
4652
4653	fd = __btf_new_fd(btf);
4654	if (fd < 0)
4655		btf_put(btf);
4656
4657	return fd;
4658}
4659
4660u32 btf_id(const struct btf *btf)
4661{
4662	return btf->id;
4663}