Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2/* Converted from tools/testing/selftests/bpf/verifier/helper_value_access.c */
   3
   4#include <linux/bpf.h>
   5#include <bpf/bpf_helpers.h>
   6#include "bpf_misc.h"
   7
   8struct other_val {
   9	long long foo;
  10	long long bar;
  11};
  12
  13struct {
  14	__uint(type, BPF_MAP_TYPE_HASH);
  15	__uint(max_entries, 1);
  16	__type(key, long long);
  17	__type(value, struct other_val);
  18} map_hash_16b SEC(".maps");
  19
  20#define MAX_ENTRIES 11
  21
  22struct test_val {
  23	unsigned int index;
  24	int foo[MAX_ENTRIES];
  25};
  26
  27struct {
  28	__uint(type, BPF_MAP_TYPE_HASH);
  29	__uint(max_entries, 1);
  30	__type(key, long long);
  31	__type(value, struct test_val);
  32} map_hash_48b SEC(".maps");
  33
  34struct {
  35	__uint(type, BPF_MAP_TYPE_HASH);
  36	__uint(max_entries, 1);
  37	__type(key, long long);
  38	__type(value, long long);
  39} map_hash_8b SEC(".maps");
  40
  41SEC("tracepoint")
  42__description("helper access to map: full range")
  43__success
  44__naked void access_to_map_full_range(void)
  45{
  46	asm volatile ("					\
  47	r2 = r10;					\
  48	r2 += -8;					\
  49	r1 = 0;						\
  50	*(u64*)(r2 + 0) = r1;				\
  51	r1 = %[map_hash_48b] ll;			\
  52	call %[bpf_map_lookup_elem];			\
  53	if r0 == 0 goto l0_%=;				\
  54	r1 = r0;					\
  55	r2 = %[sizeof_test_val];			\
  56	r3 = 0;						\
  57	call %[bpf_probe_read_kernel];			\
  58l0_%=:	exit;						\
  59"	:
  60	: __imm(bpf_map_lookup_elem),
  61	  __imm(bpf_probe_read_kernel),
  62	  __imm_addr(map_hash_48b),
  63	  __imm_const(sizeof_test_val, sizeof(struct test_val))
  64	: __clobber_all);
  65}
  66
  67SEC("tracepoint")
  68__description("helper access to map: partial range")
  69__success
  70__naked void access_to_map_partial_range(void)
  71{
  72	asm volatile ("					\
  73	r2 = r10;					\
  74	r2 += -8;					\
  75	r1 = 0;						\
  76	*(u64*)(r2 + 0) = r1;				\
  77	r1 = %[map_hash_48b] ll;			\
  78	call %[bpf_map_lookup_elem];			\
  79	if r0 == 0 goto l0_%=;				\
  80	r1 = r0;					\
  81	r2 = 8;						\
  82	r3 = 0;						\
  83	call %[bpf_probe_read_kernel];			\
  84l0_%=:	exit;						\
  85"	:
  86	: __imm(bpf_map_lookup_elem),
  87	  __imm(bpf_probe_read_kernel),
  88	  __imm_addr(map_hash_48b)
  89	: __clobber_all);
  90}
  91
  92/* Call a function taking a pointer and a size which doesn't allow the size to
  93 * be zero (i.e. bpf_trace_printk() declares the second argument to be
  94 * ARG_CONST_SIZE, not ARG_CONST_SIZE_OR_ZERO). We attempt to pass zero for the
  95 * size and expect to fail.
  96 */
  97SEC("tracepoint")
  98__description("helper access to map: empty range")
  99__failure __msg("R2 invalid zero-sized read: u64=[0,0]")
 100__naked void access_to_map_empty_range(void)
 101{
 102	asm volatile ("					\
 103	r2 = r10;					\
 104	r2 += -8;					\
 105	r1 = 0;						\
 106	*(u64*)(r2 + 0) = r1;				\
 107	r1 = %[map_hash_48b] ll;			\
 108	call %[bpf_map_lookup_elem];			\
 109	if r0 == 0 goto l0_%=;				\
 110	r1 = r0;					\
 111	r2 = 0;						\
 112	call %[bpf_trace_printk];			\
 113l0_%=:	exit;						\
 114"	:
 115	: __imm(bpf_map_lookup_elem),
 116	  __imm(bpf_trace_printk),
 117	  __imm_addr(map_hash_48b)
 118	: __clobber_all);
 119}
 120
 121/* Like the test above, but this time the size register is not known to be zero;
 122 * its lower-bound is zero though, which is still unacceptable.
 123 */
 124SEC("tracepoint")
 125__description("helper access to map: possibly-empty ange")
 126__failure __msg("R2 invalid zero-sized read: u64=[0,4]")
 127__naked void access_to_map_possibly_empty_range(void)
 128{
 129	asm volatile ("                                         \
 130	r2 = r10;                                               \
 131	r2 += -8;                                               \
 132	r1 = 0;                                                 \
 133	*(u64*)(r2 + 0) = r1;                                   \
 134	r1 = %[map_hash_48b] ll;                                \
 135	call %[bpf_map_lookup_elem];                            \
 136	if r0 == 0 goto l0_%=;                                  \
 137	r1 = r0;                                                \
 138	/* Read an unknown value */                             \
 139	r7 = *(u64*)(r0 + 0);                                   \
 140	/* Make it small and positive, to avoid other errors */ \
 141	r7 &= 4;                                                \
 142	r2 = 0;                                                 \
 143	r2 += r7;                                               \
 144	call %[bpf_trace_printk];                               \
 145l0_%=:	exit;                                               \
 146"	:
 147	: __imm(bpf_map_lookup_elem),
 148	  __imm(bpf_trace_printk),
 149	  __imm_addr(map_hash_48b)
 150	: __clobber_all);
 151}
 152
 153SEC("tracepoint")
 154__description("helper access to map: out-of-bound range")
 155__failure __msg("invalid access to map value, value_size=48 off=0 size=56")
 156__naked void map_out_of_bound_range(void)
 157{
 158	asm volatile ("					\
 159	r2 = r10;					\
 160	r2 += -8;					\
 161	r1 = 0;						\
 162	*(u64*)(r2 + 0) = r1;				\
 163	r1 = %[map_hash_48b] ll;			\
 164	call %[bpf_map_lookup_elem];			\
 165	if r0 == 0 goto l0_%=;				\
 166	r1 = r0;					\
 167	r2 = %[__imm_0];				\
 168	r3 = 0;						\
 169	call %[bpf_probe_read_kernel];			\
 170l0_%=:	exit;						\
 171"	:
 172	: __imm(bpf_map_lookup_elem),
 173	  __imm(bpf_probe_read_kernel),
 174	  __imm_addr(map_hash_48b),
 175	  __imm_const(__imm_0, sizeof(struct test_val) + 8)
 176	: __clobber_all);
 177}
 178
 179SEC("tracepoint")
 180__description("helper access to map: negative range")
 181__failure __msg("R2 min value is negative")
 182__naked void access_to_map_negative_range(void)
 183{
 184	asm volatile ("					\
 185	r2 = r10;					\
 186	r2 += -8;					\
 187	r1 = 0;						\
 188	*(u64*)(r2 + 0) = r1;				\
 189	r1 = %[map_hash_48b] ll;			\
 190	call %[bpf_map_lookup_elem];			\
 191	if r0 == 0 goto l0_%=;				\
 192	r1 = r0;					\
 193	r2 = -8;					\
 194	r3 = 0;						\
 195	call %[bpf_probe_read_kernel];			\
 196l0_%=:	exit;						\
 197"	:
 198	: __imm(bpf_map_lookup_elem),
 199	  __imm(bpf_probe_read_kernel),
 200	  __imm_addr(map_hash_48b)
 201	: __clobber_all);
 202}
 203
 204SEC("tracepoint")
 205__description("helper access to adjusted map (via const imm): full range")
 206__success
 207__naked void via_const_imm_full_range(void)
 208{
 209	asm volatile ("					\
 210	r2 = r10;					\
 211	r2 += -8;					\
 212	r1 = 0;						\
 213	*(u64*)(r2 + 0) = r1;				\
 214	r1 = %[map_hash_48b] ll;			\
 215	call %[bpf_map_lookup_elem];			\
 216	if r0 == 0 goto l0_%=;				\
 217	r1 = r0;					\
 218	r1 += %[test_val_foo];				\
 219	r2 = %[__imm_0];				\
 220	r3 = 0;						\
 221	call %[bpf_probe_read_kernel];			\
 222l0_%=:	exit;						\
 223"	:
 224	: __imm(bpf_map_lookup_elem),
 225	  __imm(bpf_probe_read_kernel),
 226	  __imm_addr(map_hash_48b),
 227	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
 228	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 229	: __clobber_all);
 230}
 231
 232SEC("tracepoint")
 233__description("helper access to adjusted map (via const imm): partial range")
 234__success
 235__naked void via_const_imm_partial_range(void)
 236{
 237	asm volatile ("					\
 238	r2 = r10;					\
 239	r2 += -8;					\
 240	r1 = 0;						\
 241	*(u64*)(r2 + 0) = r1;				\
 242	r1 = %[map_hash_48b] ll;			\
 243	call %[bpf_map_lookup_elem];			\
 244	if r0 == 0 goto l0_%=;				\
 245	r1 = r0;					\
 246	r1 += %[test_val_foo];				\
 247	r2 = 8;						\
 248	r3 = 0;						\
 249	call %[bpf_probe_read_kernel];			\
 250l0_%=:	exit;						\
 251"	:
 252	: __imm(bpf_map_lookup_elem),
 253	  __imm(bpf_probe_read_kernel),
 254	  __imm_addr(map_hash_48b),
 255	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 256	: __clobber_all);
 257}
 258
 259SEC("tracepoint")
 260__description("helper access to adjusted map (via const imm): empty range")
 261__failure __msg("R2 invalid zero-sized read")
 262__naked void via_const_imm_empty_range(void)
 263{
 264	asm volatile ("					\
 265	r2 = r10;					\
 266	r2 += -8;					\
 267	r1 = 0;						\
 268	*(u64*)(r2 + 0) = r1;				\
 269	r1 = %[map_hash_48b] ll;			\
 270	call %[bpf_map_lookup_elem];			\
 271	if r0 == 0 goto l0_%=;				\
 272	r1 = r0;					\
 273	r1 += %[test_val_foo];				\
 274	r2 = 0;						\
 275	call %[bpf_trace_printk];			\
 276l0_%=:	exit;						\
 277"	:
 278	: __imm(bpf_map_lookup_elem),
 279	  __imm(bpf_trace_printk),
 280	  __imm_addr(map_hash_48b),
 281	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 282	: __clobber_all);
 283}
 284
 285SEC("tracepoint")
 286__description("helper access to adjusted map (via const imm): out-of-bound range")
 287__failure __msg("invalid access to map value, value_size=48 off=4 size=52")
 288__naked void imm_out_of_bound_range(void)
 289{
 290	asm volatile ("					\
 291	r2 = r10;					\
 292	r2 += -8;					\
 293	r1 = 0;						\
 294	*(u64*)(r2 + 0) = r1;				\
 295	r1 = %[map_hash_48b] ll;			\
 296	call %[bpf_map_lookup_elem];			\
 297	if r0 == 0 goto l0_%=;				\
 298	r1 = r0;					\
 299	r1 += %[test_val_foo];				\
 300	r2 = %[__imm_0];				\
 301	r3 = 0;						\
 302	call %[bpf_probe_read_kernel];			\
 303l0_%=:	exit;						\
 304"	:
 305	: __imm(bpf_map_lookup_elem),
 306	  __imm(bpf_probe_read_kernel),
 307	  __imm_addr(map_hash_48b),
 308	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
 309	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 310	: __clobber_all);
 311}
 312
 313SEC("tracepoint")
 314__description("helper access to adjusted map (via const imm): negative range (> adjustment)")
 315__failure __msg("R2 min value is negative")
 316__naked void const_imm_negative_range_adjustment_1(void)
 317{
 318	asm volatile ("					\
 319	r2 = r10;					\
 320	r2 += -8;					\
 321	r1 = 0;						\
 322	*(u64*)(r2 + 0) = r1;				\
 323	r1 = %[map_hash_48b] ll;			\
 324	call %[bpf_map_lookup_elem];			\
 325	if r0 == 0 goto l0_%=;				\
 326	r1 = r0;					\
 327	r1 += %[test_val_foo];				\
 328	r2 = -8;					\
 329	r3 = 0;						\
 330	call %[bpf_probe_read_kernel];			\
 331l0_%=:	exit;						\
 332"	:
 333	: __imm(bpf_map_lookup_elem),
 334	  __imm(bpf_probe_read_kernel),
 335	  __imm_addr(map_hash_48b),
 336	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 337	: __clobber_all);
 338}
 339
 340SEC("tracepoint")
 341__description("helper access to adjusted map (via const imm): negative range (< adjustment)")
 342__failure __msg("R2 min value is negative")
 343__naked void const_imm_negative_range_adjustment_2(void)
 344{
 345	asm volatile ("					\
 346	r2 = r10;					\
 347	r2 += -8;					\
 348	r1 = 0;						\
 349	*(u64*)(r2 + 0) = r1;				\
 350	r1 = %[map_hash_48b] ll;			\
 351	call %[bpf_map_lookup_elem];			\
 352	if r0 == 0 goto l0_%=;				\
 353	r1 = r0;					\
 354	r1 += %[test_val_foo];				\
 355	r2 = -1;					\
 356	r3 = 0;						\
 357	call %[bpf_probe_read_kernel];			\
 358l0_%=:	exit;						\
 359"	:
 360	: __imm(bpf_map_lookup_elem),
 361	  __imm(bpf_probe_read_kernel),
 362	  __imm_addr(map_hash_48b),
 363	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 364	: __clobber_all);
 365}
 366
 367SEC("tracepoint")
 368__description("helper access to adjusted map (via const reg): full range")
 369__success
 370__naked void via_const_reg_full_range(void)
 371{
 372	asm volatile ("					\
 373	r2 = r10;					\
 374	r2 += -8;					\
 375	r1 = 0;						\
 376	*(u64*)(r2 + 0) = r1;				\
 377	r1 = %[map_hash_48b] ll;			\
 378	call %[bpf_map_lookup_elem];			\
 379	if r0 == 0 goto l0_%=;				\
 380	r1 = r0;					\
 381	r3 = %[test_val_foo];				\
 382	r1 += r3;					\
 383	r2 = %[__imm_0];				\
 384	r3 = 0;						\
 385	call %[bpf_probe_read_kernel];			\
 386l0_%=:	exit;						\
 387"	:
 388	: __imm(bpf_map_lookup_elem),
 389	  __imm(bpf_probe_read_kernel),
 390	  __imm_addr(map_hash_48b),
 391	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
 392	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 393	: __clobber_all);
 394}
 395
 396SEC("tracepoint")
 397__description("helper access to adjusted map (via const reg): partial range")
 398__success
 399__naked void via_const_reg_partial_range(void)
 400{
 401	asm volatile ("					\
 402	r2 = r10;					\
 403	r2 += -8;					\
 404	r1 = 0;						\
 405	*(u64*)(r2 + 0) = r1;				\
 406	r1 = %[map_hash_48b] ll;			\
 407	call %[bpf_map_lookup_elem];			\
 408	if r0 == 0 goto l0_%=;				\
 409	r1 = r0;					\
 410	r3 = %[test_val_foo];				\
 411	r1 += r3;					\
 412	r2 = 8;						\
 413	r3 = 0;						\
 414	call %[bpf_probe_read_kernel];			\
 415l0_%=:	exit;						\
 416"	:
 417	: __imm(bpf_map_lookup_elem),
 418	  __imm(bpf_probe_read_kernel),
 419	  __imm_addr(map_hash_48b),
 420	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 421	: __clobber_all);
 422}
 423
 424SEC("tracepoint")
 425__description("helper access to adjusted map (via const reg): empty range")
 426__failure __msg("R2 invalid zero-sized read")
 427__naked void via_const_reg_empty_range(void)
 428{
 429	asm volatile ("					\
 430	r2 = r10;					\
 431	r2 += -8;					\
 432	r1 = 0;						\
 433	*(u64*)(r2 + 0) = r1;				\
 434	r1 = %[map_hash_48b] ll;			\
 435	call %[bpf_map_lookup_elem];			\
 436	if r0 == 0 goto l0_%=;				\
 437	r1 = r0;					\
 438	r3 = 0;						\
 439	r1 += r3;					\
 440	r2 = 0;						\
 441	call %[bpf_trace_printk];			\
 442l0_%=:	exit;						\
 443"	:
 444	: __imm(bpf_map_lookup_elem),
 445	  __imm(bpf_trace_printk),
 446	  __imm_addr(map_hash_48b)
 447	: __clobber_all);
 448}
 449
 450SEC("tracepoint")
 451__description("helper access to adjusted map (via const reg): out-of-bound range")
 452__failure __msg("invalid access to map value, value_size=48 off=4 size=52")
 453__naked void reg_out_of_bound_range(void)
 454{
 455	asm volatile ("					\
 456	r2 = r10;					\
 457	r2 += -8;					\
 458	r1 = 0;						\
 459	*(u64*)(r2 + 0) = r1;				\
 460	r1 = %[map_hash_48b] ll;			\
 461	call %[bpf_map_lookup_elem];			\
 462	if r0 == 0 goto l0_%=;				\
 463	r1 = r0;					\
 464	r3 = %[test_val_foo];				\
 465	r1 += r3;					\
 466	r2 = %[__imm_0];				\
 467	r3 = 0;						\
 468	call %[bpf_probe_read_kernel];			\
 469l0_%=:	exit;						\
 470"	:
 471	: __imm(bpf_map_lookup_elem),
 472	  __imm(bpf_probe_read_kernel),
 473	  __imm_addr(map_hash_48b),
 474	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
 475	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 476	: __clobber_all);
 477}
 478
 479SEC("tracepoint")
 480__description("helper access to adjusted map (via const reg): negative range (> adjustment)")
 481__failure __msg("R2 min value is negative")
 482__naked void const_reg_negative_range_adjustment_1(void)
 483{
 484	asm volatile ("					\
 485	r2 = r10;					\
 486	r2 += -8;					\
 487	r1 = 0;						\
 488	*(u64*)(r2 + 0) = r1;				\
 489	r1 = %[map_hash_48b] ll;			\
 490	call %[bpf_map_lookup_elem];			\
 491	if r0 == 0 goto l0_%=;				\
 492	r1 = r0;					\
 493	r3 = %[test_val_foo];				\
 494	r1 += r3;					\
 495	r2 = -8;					\
 496	r3 = 0;						\
 497	call %[bpf_probe_read_kernel];			\
 498l0_%=:	exit;						\
 499"	:
 500	: __imm(bpf_map_lookup_elem),
 501	  __imm(bpf_probe_read_kernel),
 502	  __imm_addr(map_hash_48b),
 503	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 504	: __clobber_all);
 505}
 506
 507SEC("tracepoint")
 508__description("helper access to adjusted map (via const reg): negative range (< adjustment)")
 509__failure __msg("R2 min value is negative")
 510__naked void const_reg_negative_range_adjustment_2(void)
 511{
 512	asm volatile ("					\
 513	r2 = r10;					\
 514	r2 += -8;					\
 515	r1 = 0;						\
 516	*(u64*)(r2 + 0) = r1;				\
 517	r1 = %[map_hash_48b] ll;			\
 518	call %[bpf_map_lookup_elem];			\
 519	if r0 == 0 goto l0_%=;				\
 520	r1 = r0;					\
 521	r3 = %[test_val_foo];				\
 522	r1 += r3;					\
 523	r2 = -1;					\
 524	r3 = 0;						\
 525	call %[bpf_probe_read_kernel];			\
 526l0_%=:	exit;						\
 527"	:
 528	: __imm(bpf_map_lookup_elem),
 529	  __imm(bpf_probe_read_kernel),
 530	  __imm_addr(map_hash_48b),
 531	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 532	: __clobber_all);
 533}
 534
 535SEC("tracepoint")
 536__description("helper access to adjusted map (via variable): full range")
 537__success
 538__naked void map_via_variable_full_range(void)
 539{
 540	asm volatile ("					\
 541	r2 = r10;					\
 542	r2 += -8;					\
 543	r1 = 0;						\
 544	*(u64*)(r2 + 0) = r1;				\
 545	r1 = %[map_hash_48b] ll;			\
 546	call %[bpf_map_lookup_elem];			\
 547	if r0 == 0 goto l0_%=;				\
 548	r1 = r0;					\
 549	r3 = *(u32*)(r0 + 0);				\
 550	if r3 > %[test_val_foo] goto l0_%=;		\
 551	r1 += r3;					\
 552	r2 = %[__imm_0];				\
 553	r3 = 0;						\
 554	call %[bpf_probe_read_kernel];			\
 555l0_%=:	exit;						\
 556"	:
 557	: __imm(bpf_map_lookup_elem),
 558	  __imm(bpf_probe_read_kernel),
 559	  __imm_addr(map_hash_48b),
 560	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
 561	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 562	: __clobber_all);
 563}
 564
 565SEC("tracepoint")
 566__description("helper access to adjusted map (via variable): partial range")
 567__success
 568__naked void map_via_variable_partial_range(void)
 569{
 570	asm volatile ("					\
 571	r2 = r10;					\
 572	r2 += -8;					\
 573	r1 = 0;						\
 574	*(u64*)(r2 + 0) = r1;				\
 575	r1 = %[map_hash_48b] ll;			\
 576	call %[bpf_map_lookup_elem];			\
 577	if r0 == 0 goto l0_%=;				\
 578	r1 = r0;					\
 579	r3 = *(u32*)(r0 + 0);				\
 580	if r3 > %[test_val_foo] goto l0_%=;		\
 581	r1 += r3;					\
 582	r2 = 8;						\
 583	r3 = 0;						\
 584	call %[bpf_probe_read_kernel];			\
 585l0_%=:	exit;						\
 586"	:
 587	: __imm(bpf_map_lookup_elem),
 588	  __imm(bpf_probe_read_kernel),
 589	  __imm_addr(map_hash_48b),
 590	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 591	: __clobber_all);
 592}
 593
 594SEC("tracepoint")
 595__description("helper access to adjusted map (via variable): empty range")
 596__failure __msg("R2 invalid zero-sized read")
 597__naked void map_via_variable_empty_range(void)
 598{
 599	asm volatile ("					\
 600	r2 = r10;					\
 601	r2 += -8;					\
 602	r1 = 0;						\
 603	*(u64*)(r2 + 0) = r1;				\
 604	r1 = %[map_hash_48b] ll;			\
 605	call %[bpf_map_lookup_elem];			\
 606	if r0 == 0 goto l0_%=;				\
 607	r1 = r0;					\
 608	r3 = *(u32*)(r0 + 0);				\
 609	if r3 > %[test_val_foo] goto l0_%=;		\
 610	r1 += r3;					\
 611	r2 = 0;						\
 612	call %[bpf_trace_printk];			\
 613l0_%=:	exit;						\
 614"	:
 615	: __imm(bpf_map_lookup_elem),
 616	  __imm(bpf_trace_printk),
 617	  __imm_addr(map_hash_48b),
 618	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 619	: __clobber_all);
 620}
 621
 622SEC("tracepoint")
 623__description("helper access to adjusted map (via variable): no max check")
 624__failure __msg("R1 unbounded memory access")
 625__naked void via_variable_no_max_check_1(void)
 626{
 627	asm volatile ("					\
 628	r2 = r10;					\
 629	r2 += -8;					\
 630	r1 = 0;						\
 631	*(u64*)(r2 + 0) = r1;				\
 632	r1 = %[map_hash_48b] ll;			\
 633	call %[bpf_map_lookup_elem];			\
 634	if r0 == 0 goto l0_%=;				\
 635	r1 = r0;					\
 636	r3 = *(u32*)(r0 + 0);				\
 637	r1 += r3;					\
 638	r2 = 1;						\
 639	r3 = 0;						\
 640	call %[bpf_probe_read_kernel];			\
 641l0_%=:	exit;						\
 642"	:
 643	: __imm(bpf_map_lookup_elem),
 644	  __imm(bpf_probe_read_kernel),
 645	  __imm_addr(map_hash_48b)
 646	: __clobber_all);
 647}
 648
 649SEC("tracepoint")
 650__description("helper access to adjusted map (via variable): wrong max check")
 651__failure __msg("invalid access to map value, value_size=48 off=4 size=45")
 652__naked void via_variable_wrong_max_check_1(void)
 653{
 654	asm volatile ("					\
 655	r2 = r10;					\
 656	r2 += -8;					\
 657	r1 = 0;						\
 658	*(u64*)(r2 + 0) = r1;				\
 659	r1 = %[map_hash_48b] ll;			\
 660	call %[bpf_map_lookup_elem];			\
 661	if r0 == 0 goto l0_%=;				\
 662	r1 = r0;					\
 663	r3 = *(u32*)(r0 + 0);				\
 664	if r3 > %[test_val_foo] goto l0_%=;		\
 665	r1 += r3;					\
 666	r2 = %[__imm_0];				\
 667	r3 = 0;						\
 668	call %[bpf_probe_read_kernel];			\
 669l0_%=:	exit;						\
 670"	:
 671	: __imm(bpf_map_lookup_elem),
 672	  __imm(bpf_probe_read_kernel),
 673	  __imm_addr(map_hash_48b),
 674	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 1),
 675	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
 676	: __clobber_all);
 677}
 678
 679SEC("tracepoint")
 680__description("helper access to map: bounds check using <, good access")
 681__success
 682__naked void bounds_check_using_good_access_1(void)
 683{
 684	asm volatile ("					\
 685	r2 = r10;					\
 686	r2 += -8;					\
 687	r1 = 0;						\
 688	*(u64*)(r2 + 0) = r1;				\
 689	r1 = %[map_hash_48b] ll;			\
 690	call %[bpf_map_lookup_elem];			\
 691	if r0 == 0 goto l0_%=;				\
 692	r1 = r0;					\
 693	r3 = *(u32*)(r0 + 0);				\
 694	if r3 < 32 goto l1_%=;				\
 695	r0 = 0;						\
 696l0_%=:	exit;						\
 697l1_%=:	r1 += r3;					\
 698	r0 = 0;						\
 699	*(u8*)(r1 + 0) = r0;				\
 700	r0 = 0;						\
 701	exit;						\
 702"	:
 703	: __imm(bpf_map_lookup_elem),
 704	  __imm_addr(map_hash_48b)
 705	: __clobber_all);
 706}
 707
 708SEC("tracepoint")
 709__description("helper access to map: bounds check using <, bad access")
 710__failure __msg("R1 unbounded memory access")
 711__naked void bounds_check_using_bad_access_1(void)
 712{
 713	asm volatile ("					\
 714	r2 = r10;					\
 715	r2 += -8;					\
 716	r1 = 0;						\
 717	*(u64*)(r2 + 0) = r1;				\
 718	r1 = %[map_hash_48b] ll;			\
 719	call %[bpf_map_lookup_elem];			\
 720	if r0 == 0 goto l0_%=;				\
 721	r1 = r0;					\
 722	r3 = *(u32*)(r0 + 0);				\
 723	if r3 < 32 goto l1_%=;				\
 724	r1 += r3;					\
 725l0_%=:	r0 = 0;						\
 726	*(u8*)(r1 + 0) = r0;				\
 727	r0 = 0;						\
 728	exit;						\
 729l1_%=:	r0 = 0;						\
 730	exit;						\
 731"	:
 732	: __imm(bpf_map_lookup_elem),
 733	  __imm_addr(map_hash_48b)
 734	: __clobber_all);
 735}
 736
 737SEC("tracepoint")
 738__description("helper access to map: bounds check using <=, good access")
 739__success
 740__naked void bounds_check_using_good_access_2(void)
 741{
 742	asm volatile ("					\
 743	r2 = r10;					\
 744	r2 += -8;					\
 745	r1 = 0;						\
 746	*(u64*)(r2 + 0) = r1;				\
 747	r1 = %[map_hash_48b] ll;			\
 748	call %[bpf_map_lookup_elem];			\
 749	if r0 == 0 goto l0_%=;				\
 750	r1 = r0;					\
 751	r3 = *(u32*)(r0 + 0);				\
 752	if r3 <= 32 goto l1_%=;				\
 753	r0 = 0;						\
 754l0_%=:	exit;						\
 755l1_%=:	r1 += r3;					\
 756	r0 = 0;						\
 757	*(u8*)(r1 + 0) = r0;				\
 758	r0 = 0;						\
 759	exit;						\
 760"	:
 761	: __imm(bpf_map_lookup_elem),
 762	  __imm_addr(map_hash_48b)
 763	: __clobber_all);
 764}
 765
 766SEC("tracepoint")
 767__description("helper access to map: bounds check using <=, bad access")
 768__failure __msg("R1 unbounded memory access")
 769__naked void bounds_check_using_bad_access_2(void)
 770{
 771	asm volatile ("					\
 772	r2 = r10;					\
 773	r2 += -8;					\
 774	r1 = 0;						\
 775	*(u64*)(r2 + 0) = r1;				\
 776	r1 = %[map_hash_48b] ll;			\
 777	call %[bpf_map_lookup_elem];			\
 778	if r0 == 0 goto l0_%=;				\
 779	r1 = r0;					\
 780	r3 = *(u32*)(r0 + 0);				\
 781	if r3 <= 32 goto l1_%=;				\
 782	r1 += r3;					\
 783l0_%=:	r0 = 0;						\
 784	*(u8*)(r1 + 0) = r0;				\
 785	r0 = 0;						\
 786	exit;						\
 787l1_%=:	r0 = 0;						\
 788	exit;						\
 789"	:
 790	: __imm(bpf_map_lookup_elem),
 791	  __imm_addr(map_hash_48b)
 792	: __clobber_all);
 793}
 794
 795SEC("tracepoint")
 796__description("helper access to map: bounds check using s<, good access")
 797__success
 798__naked void check_using_s_good_access_1(void)
 799{
 800	asm volatile ("					\
 801	r2 = r10;					\
 802	r2 += -8;					\
 803	r1 = 0;						\
 804	*(u64*)(r2 + 0) = r1;				\
 805	r1 = %[map_hash_48b] ll;			\
 806	call %[bpf_map_lookup_elem];			\
 807	if r0 == 0 goto l0_%=;				\
 808	r1 = r0;					\
 809	r3 = *(u32*)(r0 + 0);				\
 810	if r3 s< 32 goto l1_%=;				\
 811l2_%=:	r0 = 0;						\
 812l0_%=:	exit;						\
 813l1_%=:	if r3 s< 0 goto l2_%=;				\
 814	r1 += r3;					\
 815	r0 = 0;						\
 816	*(u8*)(r1 + 0) = r0;				\
 817	r0 = 0;						\
 818	exit;						\
 819"	:
 820	: __imm(bpf_map_lookup_elem),
 821	  __imm_addr(map_hash_48b)
 822	: __clobber_all);
 823}
 824
 825SEC("tracepoint")
 826__description("helper access to map: bounds check using s<, good access 2")
 827__success
 828__naked void using_s_good_access_2_1(void)
 829{
 830	asm volatile ("					\
 831	r2 = r10;					\
 832	r2 += -8;					\
 833	r1 = 0;						\
 834	*(u64*)(r2 + 0) = r1;				\
 835	r1 = %[map_hash_48b] ll;			\
 836	call %[bpf_map_lookup_elem];			\
 837	if r0 == 0 goto l0_%=;				\
 838	r1 = r0;					\
 839	r3 = *(u32*)(r0 + 0);				\
 840	if r3 s< 32 goto l1_%=;				\
 841l2_%=:	r0 = 0;						\
 842l0_%=:	exit;						\
 843l1_%=:	if r3 s< -3 goto l2_%=;				\
 844	r1 += r3;					\
 845	r0 = 0;						\
 846	*(u8*)(r1 + 0) = r0;				\
 847	r0 = 0;						\
 848	exit;						\
 849"	:
 850	: __imm(bpf_map_lookup_elem),
 851	  __imm_addr(map_hash_48b)
 852	: __clobber_all);
 853}
 854
 855SEC("tracepoint")
 856__description("helper access to map: bounds check using s<, bad access")
 857__failure __msg("R1 min value is negative")
 858__naked void check_using_s_bad_access_1(void)
 859{
 860	asm volatile ("					\
 861	r2 = r10;					\
 862	r2 += -8;					\
 863	r1 = 0;						\
 864	*(u64*)(r2 + 0) = r1;				\
 865	r1 = %[map_hash_48b] ll;			\
 866	call %[bpf_map_lookup_elem];			\
 867	if r0 == 0 goto l0_%=;				\
 868	r1 = r0;					\
 869	r3 = *(u64*)(r0 + 0);				\
 870	if r3 s< 32 goto l1_%=;				\
 871l2_%=:	r0 = 0;						\
 872l0_%=:	exit;						\
 873l1_%=:	if r3 s< -3 goto l2_%=;				\
 874	r1 += r3;					\
 875	r0 = 0;						\
 876	*(u8*)(r1 + 0) = r0;				\
 877	r0 = 0;						\
 878	exit;						\
 879"	:
 880	: __imm(bpf_map_lookup_elem),
 881	  __imm_addr(map_hash_48b)
 882	: __clobber_all);
 883}
 884
 885SEC("tracepoint")
 886__description("helper access to map: bounds check using s<=, good access")
 887__success
 888__naked void check_using_s_good_access_2(void)
 889{
 890	asm volatile ("					\
 891	r2 = r10;					\
 892	r2 += -8;					\
 893	r1 = 0;						\
 894	*(u64*)(r2 + 0) = r1;				\
 895	r1 = %[map_hash_48b] ll;			\
 896	call %[bpf_map_lookup_elem];			\
 897	if r0 == 0 goto l0_%=;				\
 898	r1 = r0;					\
 899	r3 = *(u32*)(r0 + 0);				\
 900	if r3 s<= 32 goto l1_%=;			\
 901l2_%=:	r0 = 0;						\
 902l0_%=:	exit;						\
 903l1_%=:	if r3 s<= 0 goto l2_%=;				\
 904	r1 += r3;					\
 905	r0 = 0;						\
 906	*(u8*)(r1 + 0) = r0;				\
 907	r0 = 0;						\
 908	exit;						\
 909"	:
 910	: __imm(bpf_map_lookup_elem),
 911	  __imm_addr(map_hash_48b)
 912	: __clobber_all);
 913}
 914
 915SEC("tracepoint")
 916__description("helper access to map: bounds check using s<=, good access 2")
 917__success
 918__naked void using_s_good_access_2_2(void)
 919{
 920	asm volatile ("					\
 921	r2 = r10;					\
 922	r2 += -8;					\
 923	r1 = 0;						\
 924	*(u64*)(r2 + 0) = r1;				\
 925	r1 = %[map_hash_48b] ll;			\
 926	call %[bpf_map_lookup_elem];			\
 927	if r0 == 0 goto l0_%=;				\
 928	r1 = r0;					\
 929	r3 = *(u32*)(r0 + 0);				\
 930	if r3 s<= 32 goto l1_%=;			\
 931l2_%=:	r0 = 0;						\
 932l0_%=:	exit;						\
 933l1_%=:	if r3 s<= -3 goto l2_%=;			\
 934	r1 += r3;					\
 935	r0 = 0;						\
 936	*(u8*)(r1 + 0) = r0;				\
 937	r0 = 0;						\
 938	exit;						\
 939"	:
 940	: __imm(bpf_map_lookup_elem),
 941	  __imm_addr(map_hash_48b)
 942	: __clobber_all);
 943}
 944
 945SEC("tracepoint")
 946__description("helper access to map: bounds check using s<=, bad access")
 947__failure __msg("R1 min value is negative")
 948__naked void check_using_s_bad_access_2(void)
 949{
 950	asm volatile ("					\
 951	r2 = r10;					\
 952	r2 += -8;					\
 953	r1 = 0;						\
 954	*(u64*)(r2 + 0) = r1;				\
 955	r1 = %[map_hash_48b] ll;			\
 956	call %[bpf_map_lookup_elem];			\
 957	if r0 == 0 goto l0_%=;				\
 958	r1 = r0;					\
 959	r3 = *(u64*)(r0 + 0);				\
 960	if r3 s<= 32 goto l1_%=;			\
 961l2_%=:	r0 = 0;						\
 962l0_%=:	exit;						\
 963l1_%=:	if r3 s<= -3 goto l2_%=;			\
 964	r1 += r3;					\
 965	r0 = 0;						\
 966	*(u8*)(r1 + 0) = r0;				\
 967	r0 = 0;						\
 968	exit;						\
 969"	:
 970	: __imm(bpf_map_lookup_elem),
 971	  __imm_addr(map_hash_48b)
 972	: __clobber_all);
 973}
 974
 975SEC("tracepoint")
 976__description("map lookup helper access to map")
 977__success
 978__naked void lookup_helper_access_to_map(void)
 979{
 980	asm volatile ("					\
 981	r2 = r10;					\
 982	r2 += -8;					\
 983	r1 = 0;						\
 984	*(u64*)(r2 + 0) = r1;				\
 985	r1 = %[map_hash_16b] ll;			\
 986	call %[bpf_map_lookup_elem];			\
 987	if r0 == 0 goto l0_%=;				\
 988	r2 = r0;					\
 989	r1 = %[map_hash_16b] ll;			\
 990	call %[bpf_map_lookup_elem];			\
 991l0_%=:	exit;						\
 992"	:
 993	: __imm(bpf_map_lookup_elem),
 994	  __imm_addr(map_hash_16b)
 995	: __clobber_all);
 996}
 997
 998SEC("tracepoint")
 999__description("map update helper access to map")
1000__success
1001__naked void update_helper_access_to_map(void)
1002{
1003	asm volatile ("					\
1004	r2 = r10;					\
1005	r2 += -8;					\
1006	r1 = 0;						\
1007	*(u64*)(r2 + 0) = r1;				\
1008	r1 = %[map_hash_16b] ll;			\
1009	call %[bpf_map_lookup_elem];			\
1010	if r0 == 0 goto l0_%=;				\
1011	r4 = 0;						\
1012	r3 = r0;					\
1013	r2 = r0;					\
1014	r1 = %[map_hash_16b] ll;			\
1015	call %[bpf_map_update_elem];			\
1016l0_%=:	exit;						\
1017"	:
1018	: __imm(bpf_map_lookup_elem),
1019	  __imm(bpf_map_update_elem),
1020	  __imm_addr(map_hash_16b)
1021	: __clobber_all);
1022}
1023
1024SEC("tracepoint")
1025__description("map update helper access to map: wrong size")
1026__failure __msg("invalid access to map value, value_size=8 off=0 size=16")
1027__naked void access_to_map_wrong_size(void)
1028{
1029	asm volatile ("					\
1030	r2 = r10;					\
1031	r2 += -8;					\
1032	r1 = 0;						\
1033	*(u64*)(r2 + 0) = r1;				\
1034	r1 = %[map_hash_8b] ll;				\
1035	call %[bpf_map_lookup_elem];			\
1036	if r0 == 0 goto l0_%=;				\
1037	r4 = 0;						\
1038	r3 = r0;					\
1039	r2 = r0;					\
1040	r1 = %[map_hash_16b] ll;			\
1041	call %[bpf_map_update_elem];			\
1042l0_%=:	exit;						\
1043"	:
1044	: __imm(bpf_map_lookup_elem),
1045	  __imm(bpf_map_update_elem),
1046	  __imm_addr(map_hash_16b),
1047	  __imm_addr(map_hash_8b)
1048	: __clobber_all);
1049}
1050
1051SEC("tracepoint")
1052__description("map helper access to adjusted map (via const imm)")
1053__success
1054__naked void adjusted_map_via_const_imm(void)
1055{
1056	asm volatile ("					\
1057	r2 = r10;					\
1058	r2 += -8;					\
1059	r1 = 0;						\
1060	*(u64*)(r2 + 0) = r1;				\
1061	r1 = %[map_hash_16b] ll;			\
1062	call %[bpf_map_lookup_elem];			\
1063	if r0 == 0 goto l0_%=;				\
1064	r2 = r0;					\
1065	r2 += %[other_val_bar];				\
1066	r1 = %[map_hash_16b] ll;			\
1067	call %[bpf_map_lookup_elem];			\
1068l0_%=:	exit;						\
1069"	:
1070	: __imm(bpf_map_lookup_elem),
1071	  __imm_addr(map_hash_16b),
1072	  __imm_const(other_val_bar, offsetof(struct other_val, bar))
1073	: __clobber_all);
1074}
1075
1076SEC("tracepoint")
1077__description("map helper access to adjusted map (via const imm): out-of-bound 1")
1078__failure __msg("invalid access to map value, value_size=16 off=12 size=8")
1079__naked void imm_out_of_bound_1(void)
1080{
1081	asm volatile ("					\
1082	r2 = r10;					\
1083	r2 += -8;					\
1084	r1 = 0;						\
1085	*(u64*)(r2 + 0) = r1;				\
1086	r1 = %[map_hash_16b] ll;			\
1087	call %[bpf_map_lookup_elem];			\
1088	if r0 == 0 goto l0_%=;				\
1089	r2 = r0;					\
1090	r2 += %[__imm_0];				\
1091	r1 = %[map_hash_16b] ll;			\
1092	call %[bpf_map_lookup_elem];			\
1093l0_%=:	exit;						\
1094"	:
1095	: __imm(bpf_map_lookup_elem),
1096	  __imm_addr(map_hash_16b),
1097	  __imm_const(__imm_0, sizeof(struct other_val) - 4)
1098	: __clobber_all);
1099}
1100
1101SEC("tracepoint")
1102__description("map helper access to adjusted map (via const imm): out-of-bound 2")
1103__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
1104__naked void imm_out_of_bound_2(void)
1105{
1106	asm volatile ("					\
1107	r2 = r10;					\
1108	r2 += -8;					\
1109	r1 = 0;						\
1110	*(u64*)(r2 + 0) = r1;				\
1111	r1 = %[map_hash_16b] ll;			\
1112	call %[bpf_map_lookup_elem];			\
1113	if r0 == 0 goto l0_%=;				\
1114	r2 = r0;					\
1115	r2 += -4;					\
1116	r1 = %[map_hash_16b] ll;			\
1117	call %[bpf_map_lookup_elem];			\
1118l0_%=:	exit;						\
1119"	:
1120	: __imm(bpf_map_lookup_elem),
1121	  __imm_addr(map_hash_16b)
1122	: __clobber_all);
1123}
1124
1125SEC("tracepoint")
1126__description("map helper access to adjusted map (via const reg)")
1127__success
1128__naked void adjusted_map_via_const_reg(void)
1129{
1130	asm volatile ("					\
1131	r2 = r10;					\
1132	r2 += -8;					\
1133	r1 = 0;						\
1134	*(u64*)(r2 + 0) = r1;				\
1135	r1 = %[map_hash_16b] ll;			\
1136	call %[bpf_map_lookup_elem];			\
1137	if r0 == 0 goto l0_%=;				\
1138	r2 = r0;					\
1139	r3 = %[other_val_bar];				\
1140	r2 += r3;					\
1141	r1 = %[map_hash_16b] ll;			\
1142	call %[bpf_map_lookup_elem];			\
1143l0_%=:	exit;						\
1144"	:
1145	: __imm(bpf_map_lookup_elem),
1146	  __imm_addr(map_hash_16b),
1147	  __imm_const(other_val_bar, offsetof(struct other_val, bar))
1148	: __clobber_all);
1149}
1150
1151SEC("tracepoint")
1152__description("map helper access to adjusted map (via const reg): out-of-bound 1")
1153__failure __msg("invalid access to map value, value_size=16 off=12 size=8")
1154__naked void reg_out_of_bound_1(void)
1155{
1156	asm volatile ("					\
1157	r2 = r10;					\
1158	r2 += -8;					\
1159	r1 = 0;						\
1160	*(u64*)(r2 + 0) = r1;				\
1161	r1 = %[map_hash_16b] ll;			\
1162	call %[bpf_map_lookup_elem];			\
1163	if r0 == 0 goto l0_%=;				\
1164	r2 = r0;					\
1165	r3 = %[__imm_0];				\
1166	r2 += r3;					\
1167	r1 = %[map_hash_16b] ll;			\
1168	call %[bpf_map_lookup_elem];			\
1169l0_%=:	exit;						\
1170"	:
1171	: __imm(bpf_map_lookup_elem),
1172	  __imm_addr(map_hash_16b),
1173	  __imm_const(__imm_0, sizeof(struct other_val) - 4)
1174	: __clobber_all);
1175}
1176
1177SEC("tracepoint")
1178__description("map helper access to adjusted map (via const reg): out-of-bound 2")
1179__failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
1180__naked void reg_out_of_bound_2(void)
1181{
1182	asm volatile ("					\
1183	r2 = r10;					\
1184	r2 += -8;					\
1185	r1 = 0;						\
1186	*(u64*)(r2 + 0) = r1;				\
1187	r1 = %[map_hash_16b] ll;			\
1188	call %[bpf_map_lookup_elem];			\
1189	if r0 == 0 goto l0_%=;				\
1190	r2 = r0;					\
1191	r3 = -4;					\
1192	r2 += r3;					\
1193	r1 = %[map_hash_16b] ll;			\
1194	call %[bpf_map_lookup_elem];			\
1195l0_%=:	exit;						\
1196"	:
1197	: __imm(bpf_map_lookup_elem),
1198	  __imm_addr(map_hash_16b)
1199	: __clobber_all);
1200}
1201
1202SEC("tracepoint")
1203__description("map helper access to adjusted map (via variable)")
1204__success
1205__naked void to_adjusted_map_via_variable(void)
1206{
1207	asm volatile ("					\
1208	r2 = r10;					\
1209	r2 += -8;					\
1210	r1 = 0;						\
1211	*(u64*)(r2 + 0) = r1;				\
1212	r1 = %[map_hash_16b] ll;			\
1213	call %[bpf_map_lookup_elem];			\
1214	if r0 == 0 goto l0_%=;				\
1215	r2 = r0;					\
1216	r3 = *(u32*)(r0 + 0);				\
1217	if r3 > %[other_val_bar] goto l0_%=;		\
1218	r2 += r3;					\
1219	r1 = %[map_hash_16b] ll;			\
1220	call %[bpf_map_lookup_elem];			\
1221l0_%=:	exit;						\
1222"	:
1223	: __imm(bpf_map_lookup_elem),
1224	  __imm_addr(map_hash_16b),
1225	  __imm_const(other_val_bar, offsetof(struct other_val, bar))
1226	: __clobber_all);
1227}
1228
1229SEC("tracepoint")
1230__description("map helper access to adjusted map (via variable): no max check")
1231__failure
1232__msg("R2 unbounded memory access, make sure to bounds check any such access")
1233__naked void via_variable_no_max_check_2(void)
1234{
1235	asm volatile ("					\
1236	r2 = r10;					\
1237	r2 += -8;					\
1238	r1 = 0;						\
1239	*(u64*)(r2 + 0) = r1;				\
1240	r1 = %[map_hash_16b] ll;			\
1241	call %[bpf_map_lookup_elem];			\
1242	if r0 == 0 goto l0_%=;				\
1243	r2 = r0;					\
1244	r3 = *(u32*)(r0 + 0);				\
1245	r2 += r3;					\
1246	r1 = %[map_hash_16b] ll;			\
1247	call %[bpf_map_lookup_elem];			\
1248l0_%=:	exit;						\
1249"	:
1250	: __imm(bpf_map_lookup_elem),
1251	  __imm_addr(map_hash_16b)
1252	: __clobber_all);
1253}
1254
1255SEC("tracepoint")
1256__description("map helper access to adjusted map (via variable): wrong max check")
1257__failure __msg("invalid access to map value, value_size=16 off=9 size=8")
1258__naked void via_variable_wrong_max_check_2(void)
1259{
1260	asm volatile ("					\
1261	r2 = r10;					\
1262	r2 += -8;					\
1263	r1 = 0;						\
1264	*(u64*)(r2 + 0) = r1;				\
1265	r1 = %[map_hash_16b] ll;			\
1266	call %[bpf_map_lookup_elem];			\
1267	if r0 == 0 goto l0_%=;				\
1268	r2 = r0;					\
1269	r3 = *(u32*)(r0 + 0);				\
1270	if r3 > %[__imm_0] goto l0_%=;			\
1271	r2 += r3;					\
1272	r1 = %[map_hash_16b] ll;			\
1273	call %[bpf_map_lookup_elem];			\
1274l0_%=:	exit;						\
1275"	:
1276	: __imm(bpf_map_lookup_elem),
1277	  __imm_addr(map_hash_16b),
1278	  __imm_const(__imm_0, offsetof(struct other_val, bar) + 1)
1279	: __clobber_all);
1280}
1281
1282char _license[] SEC("license") = "GPL";