Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_tracing.h>
  6#include <bpf/bpf_helpers.h>
  7
  8#include "bpf_misc.h"
  9#include "cpumask_common.h"
 10
 11char _license[] SEC("license") = "GPL";
 12
 13int pid, nr_cpus;
 14
 15struct kptr_nested {
 16	struct bpf_cpumask __kptr * mask;
 17};
 18
 19struct kptr_nested_pair {
 20	struct bpf_cpumask __kptr * mask_1;
 21	struct bpf_cpumask __kptr * mask_2;
 22};
 23
 24struct kptr_nested_mid {
 25	int dummy;
 26	struct kptr_nested m;
 27};
 28
 29struct kptr_nested_deep {
 30	struct kptr_nested_mid ptrs[2];
 31	struct kptr_nested_pair ptr_pairs[3];
 32};
 33
 34struct kptr_nested_deep_array_1_2 {
 35	int dummy;
 36	struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX];
 37};
 38
 39struct kptr_nested_deep_array_1_1 {
 40	int dummy;
 41	struct kptr_nested_deep_array_1_2 d_2;
 42};
 43
 44struct kptr_nested_deep_array_1 {
 45	long dummy;
 46	struct kptr_nested_deep_array_1_1 d_1;
 47};
 48
 49struct kptr_nested_deep_array_2_2 {
 50	long dummy[2];
 51	struct bpf_cpumask __kptr * mask;
 52};
 53
 54struct kptr_nested_deep_array_2_1 {
 55	int dummy;
 56	struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX];
 57};
 58
 59struct kptr_nested_deep_array_2 {
 60	long dummy;
 61	struct kptr_nested_deep_array_2_1 d_1;
 62};
 63
 64struct kptr_nested_deep_array_3_2 {
 65	long dummy[2];
 66	struct bpf_cpumask __kptr * mask;
 67};
 68
 69struct kptr_nested_deep_array_3_1 {
 70	int dummy;
 71	struct kptr_nested_deep_array_3_2 d_2;
 72};
 73
 74struct kptr_nested_deep_array_3 {
 75	long dummy;
 76	struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX];
 77};
 78
 79private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
 80private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
 81private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
 82private(MASK) static struct kptr_nested global_mask_nested[2];
 83private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
 84private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1;
 85private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2;
 86private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3;
 87
 88static bool is_test_task(void)
 89{
 90	int cur_pid = bpf_get_current_pid_tgid() >> 32;
 91
 92	return pid == cur_pid;
 93}
 94
 95static bool create_cpumask_set(struct bpf_cpumask **out1,
 96			       struct bpf_cpumask **out2,
 97			       struct bpf_cpumask **out3,
 98			       struct bpf_cpumask **out4)
 99{
100	struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
101
102	mask1 = create_cpumask();
103	if (!mask1)
104		return false;
105
106	mask2 = create_cpumask();
107	if (!mask2) {
108		bpf_cpumask_release(mask1);
109		err = 3;
110		return false;
111	}
112
113	mask3 = create_cpumask();
114	if (!mask3) {
115		bpf_cpumask_release(mask1);
116		bpf_cpumask_release(mask2);
117		err = 4;
118		return false;
119	}
120
121	mask4 = create_cpumask();
122	if (!mask4) {
123		bpf_cpumask_release(mask1);
124		bpf_cpumask_release(mask2);
125		bpf_cpumask_release(mask3);
126		err = 5;
127		return false;
128	}
129
130	*out1 = mask1;
131	*out2 = mask2;
132	*out3 = mask3;
133	*out4 = mask4;
134
135	return true;
136}
137
138SEC("tp_btf/task_newtask")
139int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
140{
141	struct bpf_cpumask *cpumask;
142
143	if (!is_test_task())
144		return 0;
145
146	cpumask = create_cpumask();
147	if (!cpumask)
148		return 0;
149
150	bpf_cpumask_release(cpumask);
151	return 0;
152}
153
154SEC("tp_btf/task_newtask")
155int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
156{
157	struct bpf_cpumask *cpumask;
158
159	if (!is_test_task())
160		return 0;
161
162	cpumask = create_cpumask();
163	if (!cpumask)
164		return 0;
165
166	bpf_cpumask_set_cpu(0, cpumask);
167	if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
168		err = 3;
169		goto release_exit;
170	}
171
172	bpf_cpumask_clear_cpu(0, cpumask);
173	if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
174		err = 4;
175		goto release_exit;
176	}
177
178release_exit:
179	bpf_cpumask_release(cpumask);
180	return 0;
181}
182
183SEC("tp_btf/task_newtask")
184int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
185{
186	struct bpf_cpumask *cpumask;
187
188	if (!is_test_task())
189		return 0;
190
191	cpumask = create_cpumask();
192	if (!cpumask)
193		return 0;
194
195	bpf_cpumask_setall(cpumask);
196	if (!bpf_cpumask_full(cast(cpumask))) {
197		err = 3;
198		goto release_exit;
199	}
200
201	bpf_cpumask_clear(cpumask);
202	if (!bpf_cpumask_empty(cast(cpumask))) {
203		err = 4;
204		goto release_exit;
205	}
206
207release_exit:
208	bpf_cpumask_release(cpumask);
209	return 0;
210}
211
212SEC("tp_btf/task_newtask")
213int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
214{
215	struct bpf_cpumask *cpumask;
216
217	if (!is_test_task())
218		return 0;
219
220	cpumask = create_cpumask();
221	if (!cpumask)
222		return 0;
223
224	if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
225		err = 3;
226		goto release_exit;
227	}
228
229	if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
230		bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
231		err = 4;
232		goto release_exit;
233	}
234
235	bpf_cpumask_set_cpu(0, cpumask);
236	if (bpf_cpumask_first(cast(cpumask)) != 0) {
237		err = 5;
238		goto release_exit;
239	}
240
241	if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
242		err = 6;
243		goto release_exit;
244	}
245
246release_exit:
247	bpf_cpumask_release(cpumask);
248	return 0;
249}
250
251SEC("tp_btf/task_newtask")
252int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
253{
254	struct bpf_cpumask *mask1, *mask2;
255	u32 first;
256
257	if (!is_test_task())
258		return 0;
259
260	mask1 = create_cpumask();
261	if (!mask1)
262		return 0;
263
264	mask2 = create_cpumask();
265	if (!mask2)
266		goto release_exit;
267
268	bpf_cpumask_set_cpu(0, mask1);
269	bpf_cpumask_set_cpu(1, mask2);
270
271	first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
272	if (first <= 1)
273		err = 3;
274
275release_exit:
276	if (mask1)
277		bpf_cpumask_release(mask1);
278	if (mask2)
279		bpf_cpumask_release(mask2);
280	return 0;
281}
282
283SEC("tp_btf/task_newtask")
284int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
285{
286	struct bpf_cpumask *cpumask;
287
288	if (!is_test_task())
289		return 0;
290
291	cpumask = create_cpumask();
292	if (!cpumask)
293		return 0;
294
295	if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
296		err = 3;
297		goto release_exit;
298	}
299
300	if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
301		err = 4;
302		goto release_exit;
303	}
304
305	if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
306		err = 5;
307		goto release_exit;
308	}
309
310release_exit:
311	bpf_cpumask_release(cpumask);
312	return 0;
313}
314
315SEC("tp_btf/task_newtask")
316int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
317{
318	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
319
320	if (!is_test_task())
321		return 0;
322
323	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
324		return 0;
325
326	bpf_cpumask_set_cpu(0, mask1);
327	bpf_cpumask_set_cpu(1, mask2);
328
329	if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
330		err = 6;
331		goto release_exit;
332	}
333	if (!bpf_cpumask_empty(cast(dst1))) {
334		err = 7;
335		goto release_exit;
336	}
337
338	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
339	if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
340		err = 8;
341		goto release_exit;
342	}
343	if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
344		err = 9;
345		goto release_exit;
346	}
347
348	bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
349	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
350		err = 10;
351		goto release_exit;
352	}
353
354release_exit:
355	bpf_cpumask_release(mask1);
356	bpf_cpumask_release(mask2);
357	bpf_cpumask_release(dst1);
358	bpf_cpumask_release(dst2);
359	return 0;
360}
361
362SEC("tp_btf/task_newtask")
363int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
364{
365	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
366
367	if (!is_test_task())
368		return 0;
369
370	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
371		return 0;
372
373	bpf_cpumask_set_cpu(0, mask1);
374	bpf_cpumask_set_cpu(1, mask2);
375	if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
376		err = 6;
377		goto release_exit;
378	}
379
380	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
381	if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
382		err = 7;
383		goto release_exit;
384	}
385
386	if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
387		err = 8;
388		goto release_exit;
389	}
390
391	if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
392		err = 9;
393		goto release_exit;
394	}
395
396release_exit:
397	bpf_cpumask_release(mask1);
398	bpf_cpumask_release(mask2);
399	bpf_cpumask_release(dst1);
400	bpf_cpumask_release(dst2);
401	return 0;
402}
403
404SEC("tp_btf/task_newtask")
405int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
406{
407	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
408	int cpu;
409
410	if (!is_test_task())
411		return 0;
412
413	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
414		return 0;
415
416	bpf_cpumask_set_cpu(0, mask1);
417	bpf_cpumask_set_cpu(1, mask2);
418	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
419
420	cpu = bpf_cpumask_any_distribute(cast(mask1));
421	if (cpu != 0) {
422		err = 6;
423		goto release_exit;
424	}
425
426	cpu = bpf_cpumask_any_distribute(cast(dst2));
427	if (cpu < nr_cpus) {
428		err = 7;
429		goto release_exit;
430	}
431
432	bpf_cpumask_copy(dst2, cast(dst1));
433	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
434		err = 8;
435		goto release_exit;
436	}
437
438	cpu = bpf_cpumask_any_distribute(cast(dst2));
439	if (cpu > 1) {
440		err = 9;
441		goto release_exit;
442	}
443
444	cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
445	if (cpu < nr_cpus) {
446		err = 10;
447		goto release_exit;
448	}
449
450release_exit:
451	bpf_cpumask_release(mask1);
452	bpf_cpumask_release(mask2);
453	bpf_cpumask_release(dst1);
454	bpf_cpumask_release(dst2);
455	return 0;
456}
457
458SEC("tp_btf/task_newtask")
459int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
460{
461	struct bpf_cpumask *cpumask;
462
463	cpumask = create_cpumask();
464	if (!cpumask)
465		return 0;
466
467	if (cpumask_map_insert(cpumask))
468		err = 3;
469
470	return 0;
471}
472
473SEC("tp_btf/task_newtask")
474int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
475{
476	struct bpf_cpumask *cpumask;
477	struct __cpumask_map_value *v;
478
479	cpumask = create_cpumask();
480	if (!cpumask)
481		return 0;
482
483	if (cpumask_map_insert(cpumask)) {
484		err = 3;
485		return 0;
486	}
487
488	v = cpumask_map_value_lookup();
489	if (!v) {
490		err = 4;
491		return 0;
492	}
493
494	cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
495	if (cpumask)
496		bpf_cpumask_release(cpumask);
497	else
498		err = 5;
499
500	return 0;
501}
502
503SEC("tp_btf/task_newtask")
504int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
505{
506	struct bpf_cpumask *local, *prev;
507
508	if (!is_test_task())
509		return 0;
510
511	local = create_cpumask();
512	if (!local)
513		return 0;
514
515	prev = bpf_kptr_xchg(&global_mask, local);
516	if (prev) {
517		bpf_cpumask_release(prev);
518		err = 3;
519		return 0;
520	}
521
522	bpf_rcu_read_lock();
523	local = global_mask;
524	if (!local) {
525		err = 4;
526		bpf_rcu_read_unlock();
527		return 0;
528	}
529
530	bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
531	bpf_rcu_read_unlock();
532
533	return 0;
534}
535
536SEC("tp_btf/task_newtask")
537int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
538{
539	struct bpf_cpumask *local, *prev;
540
541	if (!is_test_task())
542		return 0;
543
544	/* Kptr arrays with one element are special cased, being treated
545	 * just like a single pointer.
546	 */
547
548	local = create_cpumask();
549	if (!local)
550		return 0;
551
552	prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
553	if (prev) {
554		bpf_cpumask_release(prev);
555		err = 3;
556		return 0;
557	}
558
559	bpf_rcu_read_lock();
560	local = global_mask_array_one[0];
561	if (!local) {
562		err = 4;
563		bpf_rcu_read_unlock();
564		return 0;
565	}
566
567	bpf_rcu_read_unlock();
568
569	return 0;
570}
571
572static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
573				  struct bpf_cpumask **mask1)
574{
575	struct bpf_cpumask *local;
576
577	if (!is_test_task())
578		return 0;
579
580	/* Check if two kptrs in the array work and independently */
581
582	local = create_cpumask();
583	if (!local)
584		return 0;
585
586	bpf_rcu_read_lock();
587
588	local = bpf_kptr_xchg(mask0, local);
589	if (local) {
590		err = 1;
591		goto err_exit;
592	}
593
594	/* [<mask 0>, *] */
595	if (!*mask0) {
596		err = 2;
597		goto err_exit;
598	}
599
600	if (!mask1)
601		goto err_exit;
602
603	/* [*, NULL] */
604	if (*mask1) {
605		err = 3;
606		goto err_exit;
607	}
608
609	local = create_cpumask();
610	if (!local) {
611		err = 9;
612		goto err_exit;
613	}
614
615	local = bpf_kptr_xchg(mask1, local);
616	if (local) {
617		err = 10;
618		goto err_exit;
619	}
620
621	/* [<mask 0>, <mask 1>] */
622	if (!*mask0 || !*mask1 || *mask0 == *mask1) {
623		err = 11;
624		goto err_exit;
625	}
626
627err_exit:
628	if (local)
629		bpf_cpumask_release(local);
630	bpf_rcu_read_unlock();
631	return 0;
632}
633
634SEC("tp_btf/task_newtask")
635int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
636{
637	return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
638}
639
640SEC("tp_btf/task_newtask")
641int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
642{
643	return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
644}
645
646SEC("tp_btf/task_newtask")
647int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
648{
649	return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
650}
651
652/* Ensure that the field->offset has been correctly advanced from one
653 * nested struct or array sub-tree to another. In the case of
654 * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2.  By
655 * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
656 * the verifier should reject the program if the field->offset of any kptr
657 * is incorrect.
658 *
659 * For instance, if we have 10 kptrs in a nested struct and a program that
660 * accesses each kptr individually with bpf_kptr_xchg(), the compiler
661 * should emit instructions to access 10 different offsets if it works
662 * correctly. If the field->offset values of any pair of them are
663 * incorrectly the same, the number of unique offsets in btf_record for
664 * this nested struct should be less than 10. The verifier should fail to
665 * discover some of the offsets emitted by the compiler.
666 *
667 * Even if the field->offset values of kptrs are not duplicated, the
668 * verifier should fail to find a btf_field for the instruction accessing a
669 * kptr if the corresponding field->offset is pointing to a random
670 * incorrect offset.
671 */
672SEC("tp_btf/task_newtask")
673int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
674{
675	int r, i;
676
677	r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
678				   &global_mask_nested_deep.ptrs[1].m.mask);
679	if (r)
680		return r;
681
682	for (i = 0; i < 3; i++) {
683		r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
684					   &global_mask_nested_deep.ptr_pairs[i].mask_2);
685		if (r)
686			return r;
687	}
688	return 0;
689}
690
691SEC("tp_btf/task_newtask")
692int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
693{
694	int i;
695
696	for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
697		_global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL);
698
699	for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
700		_global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL);
701
702	for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
703		_global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL);
704
705	return 0;
706}
707
708SEC("tp_btf/task_newtask")
709int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
710{
711	struct bpf_cpumask *local;
712
713	if (!is_test_task())
714		return 0;
715
716	local = create_cpumask();
717	if (!local)
718		return 0;
719
720	if (bpf_cpumask_weight(cast(local)) != 0) {
721		err = 3;
722		goto out;
723	}
724
725	bpf_cpumask_set_cpu(0, local);
726	if (bpf_cpumask_weight(cast(local)) != 1) {
727		err = 4;
728		goto out;
729	}
730
731	/*
732	 * Make sure that adding additional CPUs changes the weight. Test to
733	 * see whether the CPU was set to account for running on UP machines.
734	 */
735	bpf_cpumask_set_cpu(1, local);
736	if (bpf_cpumask_test_cpu(1, cast(local)) && bpf_cpumask_weight(cast(local)) != 2) {
737		err = 5;
738		goto out;
739	}
740
741	bpf_cpumask_clear(local);
742	if (bpf_cpumask_weight(cast(local)) != 0) {
743		err = 6;
744		goto out;
745	}
746out:
747	bpf_cpumask_release(local);
748	return 0;
749}
750
751SEC("tp_btf/task_newtask")
752__success
753int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
754{
755	struct bpf_cpumask *mask1, *mask2;
756
757	mask1 = bpf_cpumask_create();
758	mask2 = bpf_cpumask_create();
759
760	if (!mask1 || !mask2)
761		goto free_masks_return;
762
763	bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
764	bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
765
766free_masks_return:
767	if (mask1)
768		bpf_cpumask_release(mask1);
769	if (mask2)
770		bpf_cpumask_release(mask2);
771	return 0;
772}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
  3
  4#include <vmlinux.h>
  5#include <bpf/bpf_tracing.h>
  6#include <bpf/bpf_helpers.h>
  7
  8#include "bpf_misc.h"
  9#include "cpumask_common.h"
 10
 11char _license[] SEC("license") = "GPL";
 12
 13int pid, nr_cpus;
 14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15static bool is_test_task(void)
 16{
 17	int cur_pid = bpf_get_current_pid_tgid() >> 32;
 18
 19	return pid == cur_pid;
 20}
 21
 22static bool create_cpumask_set(struct bpf_cpumask **out1,
 23			       struct bpf_cpumask **out2,
 24			       struct bpf_cpumask **out3,
 25			       struct bpf_cpumask **out4)
 26{
 27	struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
 28
 29	mask1 = create_cpumask();
 30	if (!mask1)
 31		return false;
 32
 33	mask2 = create_cpumask();
 34	if (!mask2) {
 35		bpf_cpumask_release(mask1);
 36		err = 3;
 37		return false;
 38	}
 39
 40	mask3 = create_cpumask();
 41	if (!mask3) {
 42		bpf_cpumask_release(mask1);
 43		bpf_cpumask_release(mask2);
 44		err = 4;
 45		return false;
 46	}
 47
 48	mask4 = create_cpumask();
 49	if (!mask4) {
 50		bpf_cpumask_release(mask1);
 51		bpf_cpumask_release(mask2);
 52		bpf_cpumask_release(mask3);
 53		err = 5;
 54		return false;
 55	}
 56
 57	*out1 = mask1;
 58	*out2 = mask2;
 59	*out3 = mask3;
 60	*out4 = mask4;
 61
 62	return true;
 63}
 64
 65SEC("tp_btf/task_newtask")
 66int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
 67{
 68	struct bpf_cpumask *cpumask;
 69
 70	if (!is_test_task())
 71		return 0;
 72
 73	cpumask = create_cpumask();
 74	if (!cpumask)
 75		return 0;
 76
 77	bpf_cpumask_release(cpumask);
 78	return 0;
 79}
 80
 81SEC("tp_btf/task_newtask")
 82int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
 83{
 84	struct bpf_cpumask *cpumask;
 85
 86	if (!is_test_task())
 87		return 0;
 88
 89	cpumask = create_cpumask();
 90	if (!cpumask)
 91		return 0;
 92
 93	bpf_cpumask_set_cpu(0, cpumask);
 94	if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
 95		err = 3;
 96		goto release_exit;
 97	}
 98
 99	bpf_cpumask_clear_cpu(0, cpumask);
100	if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
101		err = 4;
102		goto release_exit;
103	}
104
105release_exit:
106	bpf_cpumask_release(cpumask);
107	return 0;
108}
109
110SEC("tp_btf/task_newtask")
111int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
112{
113	struct bpf_cpumask *cpumask;
114
115	if (!is_test_task())
116		return 0;
117
118	cpumask = create_cpumask();
119	if (!cpumask)
120		return 0;
121
122	bpf_cpumask_setall(cpumask);
123	if (!bpf_cpumask_full(cast(cpumask))) {
124		err = 3;
125		goto release_exit;
126	}
127
128	bpf_cpumask_clear(cpumask);
129	if (!bpf_cpumask_empty(cast(cpumask))) {
130		err = 4;
131		goto release_exit;
132	}
133
134release_exit:
135	bpf_cpumask_release(cpumask);
136	return 0;
137}
138
139SEC("tp_btf/task_newtask")
140int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
141{
142	struct bpf_cpumask *cpumask;
143
144	if (!is_test_task())
145		return 0;
146
147	cpumask = create_cpumask();
148	if (!cpumask)
149		return 0;
150
151	if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
152		err = 3;
153		goto release_exit;
154	}
155
156	if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
157		bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
158		err = 4;
159		goto release_exit;
160	}
161
162	bpf_cpumask_set_cpu(0, cpumask);
163	if (bpf_cpumask_first(cast(cpumask)) != 0) {
164		err = 5;
165		goto release_exit;
166	}
167
168	if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
169		err = 6;
170		goto release_exit;
171	}
172
173release_exit:
174	bpf_cpumask_release(cpumask);
175	return 0;
176}
177
178SEC("tp_btf/task_newtask")
179int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
180{
181	struct bpf_cpumask *mask1, *mask2;
182	u32 first;
183
184	if (!is_test_task())
185		return 0;
186
187	mask1 = create_cpumask();
188	if (!mask1)
189		return 0;
190
191	mask2 = create_cpumask();
192	if (!mask2)
193		goto release_exit;
194
195	bpf_cpumask_set_cpu(0, mask1);
196	bpf_cpumask_set_cpu(1, mask2);
197
198	first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
199	if (first <= 1)
200		err = 3;
201
202release_exit:
203	if (mask1)
204		bpf_cpumask_release(mask1);
205	if (mask2)
206		bpf_cpumask_release(mask2);
207	return 0;
208}
209
210SEC("tp_btf/task_newtask")
211int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
212{
213	struct bpf_cpumask *cpumask;
214
215	if (!is_test_task())
216		return 0;
217
218	cpumask = create_cpumask();
219	if (!cpumask)
220		return 0;
221
222	if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
223		err = 3;
224		goto release_exit;
225	}
226
227	if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
228		err = 4;
229		goto release_exit;
230	}
231
232	if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
233		err = 5;
234		goto release_exit;
235	}
236
237release_exit:
238	bpf_cpumask_release(cpumask);
239	return 0;
240}
241
242SEC("tp_btf/task_newtask")
243int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
244{
245	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
246
247	if (!is_test_task())
248		return 0;
249
250	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
251		return 0;
252
253	bpf_cpumask_set_cpu(0, mask1);
254	bpf_cpumask_set_cpu(1, mask2);
255
256	if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
257		err = 6;
258		goto release_exit;
259	}
260	if (!bpf_cpumask_empty(cast(dst1))) {
261		err = 7;
262		goto release_exit;
263	}
264
265	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
266	if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
267		err = 8;
268		goto release_exit;
269	}
270	if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
271		err = 9;
272		goto release_exit;
273	}
274
275	bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
276	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
277		err = 10;
278		goto release_exit;
279	}
280
281release_exit:
282	bpf_cpumask_release(mask1);
283	bpf_cpumask_release(mask2);
284	bpf_cpumask_release(dst1);
285	bpf_cpumask_release(dst2);
286	return 0;
287}
288
289SEC("tp_btf/task_newtask")
290int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
291{
292	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
293
294	if (!is_test_task())
295		return 0;
296
297	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
298		return 0;
299
300	bpf_cpumask_set_cpu(0, mask1);
301	bpf_cpumask_set_cpu(1, mask2);
302	if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
303		err = 6;
304		goto release_exit;
305	}
306
307	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
308	if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
309		err = 7;
310		goto release_exit;
311	}
312
313	if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
314		err = 8;
315		goto release_exit;
316	}
317
318	if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
319		err = 9;
320		goto release_exit;
321	}
322
323release_exit:
324	bpf_cpumask_release(mask1);
325	bpf_cpumask_release(mask2);
326	bpf_cpumask_release(dst1);
327	bpf_cpumask_release(dst2);
328	return 0;
329}
330
331SEC("tp_btf/task_newtask")
332int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
333{
334	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
335	int cpu;
336
337	if (!is_test_task())
338		return 0;
339
340	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
341		return 0;
342
343	bpf_cpumask_set_cpu(0, mask1);
344	bpf_cpumask_set_cpu(1, mask2);
345	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
346
347	cpu = bpf_cpumask_any_distribute(cast(mask1));
348	if (cpu != 0) {
349		err = 6;
350		goto release_exit;
351	}
352
353	cpu = bpf_cpumask_any_distribute(cast(dst2));
354	if (cpu < nr_cpus) {
355		err = 7;
356		goto release_exit;
357	}
358
359	bpf_cpumask_copy(dst2, cast(dst1));
360	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
361		err = 8;
362		goto release_exit;
363	}
364
365	cpu = bpf_cpumask_any_distribute(cast(dst2));
366	if (cpu > 1) {
367		err = 9;
368		goto release_exit;
369	}
370
371	cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
372	if (cpu < nr_cpus) {
373		err = 10;
374		goto release_exit;
375	}
376
377release_exit:
378	bpf_cpumask_release(mask1);
379	bpf_cpumask_release(mask2);
380	bpf_cpumask_release(dst1);
381	bpf_cpumask_release(dst2);
382	return 0;
383}
384
385SEC("tp_btf/task_newtask")
386int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
387{
388	struct bpf_cpumask *cpumask;
389
390	cpumask = create_cpumask();
391	if (!cpumask)
392		return 0;
393
394	if (cpumask_map_insert(cpumask))
395		err = 3;
396
397	return 0;
398}
399
400SEC("tp_btf/task_newtask")
401int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
402{
403	struct bpf_cpumask *cpumask;
404	struct __cpumask_map_value *v;
405
406	cpumask = create_cpumask();
407	if (!cpumask)
408		return 0;
409
410	if (cpumask_map_insert(cpumask)) {
411		err = 3;
412		return 0;
413	}
414
415	v = cpumask_map_value_lookup();
416	if (!v) {
417		err = 4;
418		return 0;
419	}
420
421	cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
422	if (cpumask)
423		bpf_cpumask_release(cpumask);
424	else
425		err = 5;
426
427	return 0;
428}
429
430SEC("tp_btf/task_newtask")
431int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
432{
433	struct bpf_cpumask *local, *prev;
434
435	if (!is_test_task())
436		return 0;
437
438	local = create_cpumask();
439	if (!local)
440		return 0;
441
442	prev = bpf_kptr_xchg(&global_mask, local);
443	if (prev) {
444		bpf_cpumask_release(prev);
445		err = 3;
446		return 0;
447	}
448
449	bpf_rcu_read_lock();
450	local = global_mask;
451	if (!local) {
452		err = 4;
453		bpf_rcu_read_unlock();
454		return 0;
455	}
456
457	bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
458	bpf_rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459
460	return 0;
461}
462
463SEC("tp_btf/task_newtask")
464int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
465{
466	struct bpf_cpumask *local;
467
468	if (!is_test_task())
469		return 0;
470
471	local = create_cpumask();
472	if (!local)
473		return 0;
474
475	if (bpf_cpumask_weight(cast(local)) != 0) {
476		err = 3;
477		goto out;
478	}
479
480	bpf_cpumask_set_cpu(0, local);
481	if (bpf_cpumask_weight(cast(local)) != 1) {
482		err = 4;
483		goto out;
484	}
485
486	/*
487	 * Make sure that adding additional CPUs changes the weight. Test to
488	 * see whether the CPU was set to account for running on UP machines.
489	 */
490	bpf_cpumask_set_cpu(1, local);
491	if (bpf_cpumask_test_cpu(1, cast(local)) && bpf_cpumask_weight(cast(local)) != 2) {
492		err = 5;
493		goto out;
494	}
495
496	bpf_cpumask_clear(local);
497	if (bpf_cpumask_weight(cast(local)) != 0) {
498		err = 6;
499		goto out;
500	}
501out:
502	bpf_cpumask_release(local);
503	return 0;
504}
505
506SEC("tp_btf/task_newtask")
507__success
508int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
509{
510	struct bpf_cpumask *mask1, *mask2;
511
512	mask1 = bpf_cpumask_create();
513	mask2 = bpf_cpumask_create();
514
515	if (!mask1 || !mask2)
516		goto free_masks_return;
517
518	bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
519	bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
520
521free_masks_return:
522	if (mask1)
523		bpf_cpumask_release(mask1);
524	if (mask2)
525		bpf_cpumask_release(mask2);
526	return 0;
527}