Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * A central FIFO sched_ext scheduler which demonstrates the followings:
  4 *
  5 * a. Making all scheduling decisions from one CPU:
  6 *
  7 *    The central CPU is the only one making scheduling decisions. All other
  8 *    CPUs kick the central CPU when they run out of tasks to run.
  9 *
 10 *    There is one global BPF queue and the central CPU schedules all CPUs by
 11 *    dispatching from the global queue to each CPU's local dsq from dispatch().
 12 *    This isn't the most straightforward. e.g. It'd be easier to bounce
 13 *    through per-CPU BPF queues. The current design is chosen to maximally
 14 *    utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
 15 *
 16 * b. Tickless operation
 17 *
 18 *    All tasks are dispatched with the infinite slice which allows stopping the
 19 *    ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
 20 *    parameter. The tickless operation can be observed through
 21 *    /proc/interrupts.
 22 *
 23 *    Periodic switching is enforced by a periodic timer checking all CPUs and
 24 *    preempting them as necessary. Unfortunately, BPF timer currently doesn't
 25 *    have a way to pin to a specific CPU, so the periodic timer isn't pinned to
 26 *    the central CPU.
 27 *
 28 * c. Preemption
 29 *
 30 *    Kthreads are unconditionally queued to the head of a matching local dsq
 31 *    and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
 32 *    prioritized over user threads, which is required for ensuring forward
 33 *    progress as e.g. the periodic timer may run on a ksoftirqd and if the
 34 *    ksoftirqd gets starved by a user thread, there may not be anything else to
 35 *    vacate that user thread.
 36 *
 37 *    SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
 38 *    next tasks.
 39 *
 40 * This scheduler is designed to maximize usage of various SCX mechanisms. A
 41 * more practical implementation would likely put the scheduling loop outside
 42 * the central CPU's dispatch() path and add some form of priority mechanism.
 43 *
 44 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
 45 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
 46 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
 47 */
 48#include <scx/common.bpf.h>
 49
 50char _license[] SEC("license") = "GPL";
 51
 52enum {
 53	FALLBACK_DSQ_ID		= 0,
 54	MS_TO_NS		= 1000LLU * 1000,
 55	TIMER_INTERVAL_NS	= 1 * MS_TO_NS,
 56};
 57
 58const volatile s32 central_cpu;
 59const volatile u32 nr_cpu_ids = 1;	/* !0 for veristat, set during init */
 60const volatile u64 slice_ns = SCX_SLICE_DFL;
 61
 62bool timer_pinned = true;
 63u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
 64u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;
 65u64 nr_overflows;
 66
 67UEI_DEFINE(uei);
 68
 69struct {
 70	__uint(type, BPF_MAP_TYPE_QUEUE);
 71	__uint(max_entries, 4096);
 72	__type(value, s32);
 73} central_q SEC(".maps");
 74
 75/* can't use percpu map due to bad lookups */
 76bool RESIZABLE_ARRAY(data, cpu_gimme_task);
 77u64 RESIZABLE_ARRAY(data, cpu_started_at);
 78
 79struct central_timer {
 80	struct bpf_timer timer;
 81};
 82
 83struct {
 84	__uint(type, BPF_MAP_TYPE_ARRAY);
 85	__uint(max_entries, 1);
 86	__type(key, u32);
 87	__type(value, struct central_timer);
 88} central_timer SEC(".maps");
 89
 90static bool vtime_before(u64 a, u64 b)
 91{
 92	return (s64)(a - b) < 0;
 93}
 94
 95s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
 96		   s32 prev_cpu, u64 wake_flags)
 97{
 98	/*
 99	 * Steer wakeups to the central CPU as much as possible to avoid
100	 * disturbing other CPUs. It's safe to blindly return the central cpu as
101	 * select_cpu() is a hint and if @p can't be on it, the kernel will
102	 * automatically pick a fallback CPU.
103	 */
104	return central_cpu;
105}
106
107void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)
108{
109	s32 pid = p->pid;
110
111	__sync_fetch_and_add(&nr_total, 1);
112
113	/*
114	 * Push per-cpu kthreads at the head of local dsq's and preempt the
115	 * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
116	 * behind other threads which is necessary for forward progress
117	 * guarantee as we depend on the BPF timer which may run from ksoftirqd.
118	 */
119	if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {
120		__sync_fetch_and_add(&nr_locals, 1);
121		scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
122				   enq_flags | SCX_ENQ_PREEMPT);
123		return;
124	}
125
126	if (bpf_map_push_elem(&central_q, &pid, 0)) {
127		__sync_fetch_and_add(&nr_overflows, 1);
128		scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);
129		return;
130	}
131
132	__sync_fetch_and_add(&nr_queued, 1);
133
134	if (!scx_bpf_task_running(p))
135		scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
136}
137
138static bool dispatch_to_cpu(s32 cpu)
139{
140	struct task_struct *p;
141	s32 pid;
142
143	bpf_repeat(BPF_MAX_LOOPS) {
144		if (bpf_map_pop_elem(&central_q, &pid))
145			break;
146
147		__sync_fetch_and_sub(&nr_queued, 1);
148
149		p = bpf_task_from_pid(pid);
150		if (!p) {
151			__sync_fetch_and_add(&nr_lost_pids, 1);
152			continue;
153		}
154
155		/*
156		 * If we can't run the task at the top, do the dumb thing and
157		 * bounce it to the fallback dsq.
158		 */
159		if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
160			__sync_fetch_and_add(&nr_mismatches, 1);
161			scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);
162			bpf_task_release(p);
163			/*
164			 * We might run out of dispatch buffer slots if we continue dispatching
165			 * to the fallback DSQ, without dispatching to the local DSQ of the
166			 * target CPU. In such a case, break the loop now as will fail the
167			 * next dispatch operation.
168			 */
169			if (!scx_bpf_dispatch_nr_slots())
170				break;
171			continue;
172		}
173
174		/* dispatch to local and mark that @cpu doesn't need more */
175		scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
176
177		if (cpu != central_cpu)
178			scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
179
180		bpf_task_release(p);
181		return true;
182	}
183
184	return false;
185}
186
187void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
188{
189	if (cpu == central_cpu) {
190		/* dispatch for all other CPUs first */
191		__sync_fetch_and_add(&nr_dispatches, 1);
192
193		bpf_for(cpu, 0, nr_cpu_ids) {
194			bool *gimme;
195
196			if (!scx_bpf_dispatch_nr_slots())
197				break;
198
199			/* central's gimme is never set */
200			gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
201			if (!gimme || !*gimme)
202				continue;
203
204			if (dispatch_to_cpu(cpu))
205				*gimme = false;
206		}
207
208		/*
209		 * Retry if we ran out of dispatch buffer slots as we might have
210		 * skipped some CPUs and also need to dispatch for self. The ext
211		 * core automatically retries if the local dsq is empty but we
212		 * can't rely on that as we're dispatching for other CPUs too.
213		 * Kick self explicitly to retry.
214		 */
215		if (!scx_bpf_dispatch_nr_slots()) {
216			__sync_fetch_and_add(&nr_retries, 1);
217			scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
218			return;
219		}
220
221		/* look for a task to run on the central CPU */
222		if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
223			return;
224		dispatch_to_cpu(central_cpu);
225	} else {
226		bool *gimme;
227
228		if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
229			return;
230
231		gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
232		if (gimme)
233			*gimme = true;
234
235		/*
236		 * Force dispatch on the scheduling CPU so that it finds a task
237		 * to run for us.
238		 */
239		scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
240	}
241}
242
243void BPF_STRUCT_OPS(central_running, struct task_struct *p)
244{
245	s32 cpu = scx_bpf_task_cpu(p);
246	u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
247	if (started_at)
248		*started_at = bpf_ktime_get_ns() ?: 1;	/* 0 indicates idle */
249}
250
251void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
252{
253	s32 cpu = scx_bpf_task_cpu(p);
254	u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
255	if (started_at)
256		*started_at = 0;
257}
258
259static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
260{
261	u64 now = bpf_ktime_get_ns();
262	u64 nr_to_kick = nr_queued;
263	s32 i, curr_cpu;
264
265	curr_cpu = bpf_get_smp_processor_id();
266	if (timer_pinned && (curr_cpu != central_cpu)) {
267		scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
268			      curr_cpu, central_cpu);
269		return 0;
270	}
271
272	bpf_for(i, 0, nr_cpu_ids) {
273		s32 cpu = (nr_timers + i) % nr_cpu_ids;
274		u64 *started_at;
275
276		if (cpu == central_cpu)
277			continue;
278
279		/* kick iff the current one exhausted its slice */
280		started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
281		if (started_at && *started_at &&
282		    vtime_before(now, *started_at + slice_ns))
283			continue;
284
285		/* and there's something pending */
286		if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||
287		    scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
288			;
289		else if (nr_to_kick)
290			nr_to_kick--;
291		else
292			continue;
293
294		scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
295	}
296
297	bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
298	__sync_fetch_and_add(&nr_timers, 1);
299	return 0;
300}
301
302int BPF_STRUCT_OPS_SLEEPABLE(central_init)
303{
304	u32 key = 0;
305	struct bpf_timer *timer;
306	int ret;
307
308	ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);
309	if (ret)
310		return ret;
311
312	timer = bpf_map_lookup_elem(&central_timer, &key);
313	if (!timer)
314		return -ESRCH;
315
316	if (bpf_get_smp_processor_id() != central_cpu) {
317		scx_bpf_error("init from non-central CPU");
318		return -EINVAL;
319	}
320
321	bpf_timer_init(timer, &central_timer, CLOCK_MONOTONIC);
322	bpf_timer_set_callback(timer, central_timerfn);
323
324	ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
325	/*
326	 * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
327	 * kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
328	 * Retry without the PIN. This would be the perfect use case for
329	 * bpf_core_enum_value_exists() but the enum type doesn't have a name
330	 * and can't be used with bpf_core_enum_value_exists(). Oh well...
331	 */
332	if (ret == -EINVAL) {
333		timer_pinned = false;
334		ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);
335	}
336	if (ret)
337		scx_bpf_error("bpf_timer_start failed (%d)", ret);
338	return ret;
339}
340
341void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)
342{
343	UEI_RECORD(uei, ei);
344}
345
346SCX_OPS_DEFINE(central_ops,
347	       /*
348		* We are offloading all scheduling decisions to the central CPU
349		* and thus being the last task on a given CPU doesn't mean
350		* anything special. Enqueue the last tasks like any other tasks.
351		*/
352	       .flags			= SCX_OPS_ENQ_LAST,
353
354	       .select_cpu		= (void *)central_select_cpu,
355	       .enqueue			= (void *)central_enqueue,
356	       .dispatch		= (void *)central_dispatch,
357	       .running			= (void *)central_running,
358	       .stopping		= (void *)central_stopping,
359	       .init			= (void *)central_init,
360	       .exit			= (void *)central_exit,
361	       .name			= "central");