Loading...
Note: File does not exist in v6.2.
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9#define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10
11enum scx_consts {
12 SCX_DSP_DFL_MAX_BATCH = 32,
13 SCX_DSP_MAX_LOOPS = 32,
14 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
15
16 SCX_EXIT_BT_LEN = 64,
17 SCX_EXIT_MSG_LEN = 1024,
18 SCX_EXIT_DUMP_DFL_LEN = 32768,
19
20 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
21
22 /*
23 * Iterating all tasks may take a while. Periodically drop
24 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
25 */
26 SCX_OPS_TASK_ITER_BATCH = 32,
27};
28
29enum scx_exit_kind {
30 SCX_EXIT_NONE,
31 SCX_EXIT_DONE,
32
33 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
34 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
35 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
36 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
37
38 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
39 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
40 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
41};
42
43/*
44 * An exit code can be specified when exiting with scx_bpf_exit() or
45 * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
46 * respectively. The codes are 64bit of the format:
47 *
48 * Bits: [63 .. 48 47 .. 32 31 .. 0]
49 * [ SYS ACT ] [ SYS RSN ] [ USR ]
50 *
51 * SYS ACT: System-defined exit actions
52 * SYS RSN: System-defined exit reasons
53 * USR : User-defined exit codes and reasons
54 *
55 * Using the above, users may communicate intention and context by ORing system
56 * actions and/or system reasons with a user-defined exit code.
57 */
58enum scx_exit_code {
59 /* Reasons */
60 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
61
62 /* Actions */
63 SCX_ECODE_ACT_RESTART = 1LLU << 48,
64};
65
66/*
67 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
68 * being disabled.
69 */
70struct scx_exit_info {
71 /* %SCX_EXIT_* - broad category of the exit reason */
72 enum scx_exit_kind kind;
73
74 /* exit code if gracefully exiting */
75 s64 exit_code;
76
77 /* textual representation of the above */
78 const char *reason;
79
80 /* backtrace if exiting due to an error */
81 unsigned long *bt;
82 u32 bt_len;
83
84 /* informational message */
85 char *msg;
86
87 /* debug dump */
88 char *dump;
89};
90
91/* sched_ext_ops.flags */
92enum scx_ops_flags {
93 /*
94 * Keep built-in idle tracking even if ops.update_idle() is implemented.
95 */
96 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
97
98 /*
99 * By default, if there are no other task to run on the CPU, ext core
100 * keeps running the current task even after its slice expires. If this
101 * flag is specified, such tasks are passed to ops.enqueue() with
102 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
103 */
104 SCX_OPS_ENQ_LAST = 1LLU << 1,
105
106 /*
107 * An exiting task may schedule after PF_EXITING is set. In such cases,
108 * bpf_task_from_pid() may not be able to find the task and if the BPF
109 * scheduler depends on pid lookup for dispatching, the task will be
110 * lost leading to various issues including RCU grace period stalls.
111 *
112 * To mask this problem, by default, unhashed tasks are automatically
113 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
114 * depend on pid lookups and wants to handle these tasks directly, the
115 * following flag can be used.
116 */
117 SCX_OPS_ENQ_EXITING = 1LLU << 2,
118
119 /*
120 * If set, only tasks with policy set to SCHED_EXT are attached to
121 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
122 */
123 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
124
125 /*
126 * CPU cgroup support flags
127 */
128 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
129
130 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
131 SCX_OPS_ENQ_LAST |
132 SCX_OPS_ENQ_EXITING |
133 SCX_OPS_SWITCH_PARTIAL |
134 SCX_OPS_HAS_CGROUP_WEIGHT,
135};
136
137/* argument container for ops.init_task() */
138struct scx_init_task_args {
139 /*
140 * Set if ops.init_task() is being invoked on the fork path, as opposed
141 * to the scheduler transition path.
142 */
143 bool fork;
144#ifdef CONFIG_EXT_GROUP_SCHED
145 /* the cgroup the task is joining */
146 struct cgroup *cgroup;
147#endif
148};
149
150/* argument container for ops.exit_task() */
151struct scx_exit_task_args {
152 /* Whether the task exited before running on sched_ext. */
153 bool cancelled;
154};
155
156/* argument container for ops->cgroup_init() */
157struct scx_cgroup_init_args {
158 /* the weight of the cgroup [1..10000] */
159 u32 weight;
160};
161
162enum scx_cpu_preempt_reason {
163 /* next task is being scheduled by &sched_class_rt */
164 SCX_CPU_PREEMPT_RT,
165 /* next task is being scheduled by &sched_class_dl */
166 SCX_CPU_PREEMPT_DL,
167 /* next task is being scheduled by &sched_class_stop */
168 SCX_CPU_PREEMPT_STOP,
169 /* unknown reason for SCX being preempted */
170 SCX_CPU_PREEMPT_UNKNOWN,
171};
172
173/*
174 * Argument container for ops->cpu_acquire(). Currently empty, but may be
175 * expanded in the future.
176 */
177struct scx_cpu_acquire_args {};
178
179/* argument container for ops->cpu_release() */
180struct scx_cpu_release_args {
181 /* the reason the CPU was preempted */
182 enum scx_cpu_preempt_reason reason;
183
184 /* the task that's going to be scheduled on the CPU */
185 struct task_struct *task;
186};
187
188/*
189 * Informational context provided to dump operations.
190 */
191struct scx_dump_ctx {
192 enum scx_exit_kind kind;
193 s64 exit_code;
194 const char *reason;
195 u64 at_ns;
196 u64 at_jiffies;
197};
198
199/**
200 * struct sched_ext_ops - Operation table for BPF scheduler implementation
201 *
202 * A BPF scheduler can implement an arbitrary scheduling policy by
203 * implementing and loading operations in this table. Note that a userland
204 * scheduling policy can also be implemented using the BPF scheduler
205 * as a shim layer.
206 */
207struct sched_ext_ops {
208 /**
209 * select_cpu - Pick the target CPU for a task which is being woken up
210 * @p: task being woken up
211 * @prev_cpu: the cpu @p was on before sleeping
212 * @wake_flags: SCX_WAKE_*
213 *
214 * Decision made here isn't final. @p may be moved to any CPU while it
215 * is getting dispatched for execution later. However, as @p is not on
216 * the rq at this point, getting the eventual execution CPU right here
217 * saves a small bit of overhead down the line.
218 *
219 * If an idle CPU is returned, the CPU is kicked and will try to
220 * dispatch. While an explicit custom mechanism can be added,
221 * select_cpu() serves as the default way to wake up idle CPUs.
222 *
223 * @p may be inserted into a DSQ directly by calling
224 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
225 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
226 * of the CPU returned by this operation.
227 *
228 * Note that select_cpu() is never called for tasks that can only run
229 * on a single CPU or tasks with migration disabled, as they don't have
230 * the option to select a different CPU. See select_task_rq() for
231 * details.
232 */
233 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
234
235 /**
236 * enqueue - Enqueue a task on the BPF scheduler
237 * @p: task being enqueued
238 * @enq_flags: %SCX_ENQ_*
239 *
240 * @p is ready to run. Insert directly into a DSQ by calling
241 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
242 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
243 * the task will stall.
244 *
245 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
246 * skipped.
247 */
248 void (*enqueue)(struct task_struct *p, u64 enq_flags);
249
250 /**
251 * dequeue - Remove a task from the BPF scheduler
252 * @p: task being dequeued
253 * @deq_flags: %SCX_DEQ_*
254 *
255 * Remove @p from the BPF scheduler. This is usually called to isolate
256 * the task while updating its scheduling properties (e.g. priority).
257 *
258 * The ext core keeps track of whether the BPF side owns a given task or
259 * not and can gracefully ignore spurious dispatches from BPF side,
260 * which makes it safe to not implement this method. However, depending
261 * on the scheduling logic, this can lead to confusing behaviors - e.g.
262 * scheduling position not being updated across a priority change.
263 */
264 void (*dequeue)(struct task_struct *p, u64 deq_flags);
265
266 /**
267 * dispatch - Dispatch tasks from the BPF scheduler and/or user DSQs
268 * @cpu: CPU to dispatch tasks for
269 * @prev: previous task being switched out
270 *
271 * Called when a CPU's local dsq is empty. The operation should dispatch
272 * one or more tasks from the BPF scheduler into the DSQs using
273 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
274 * using scx_bpf_dsq_move_to_local().
275 *
276 * The maximum number of times scx_bpf_dsq_insert() can be called
277 * without an intervening scx_bpf_dsq_move_to_local() is specified by
278 * ops.dispatch_max_batch. See the comments on top of the two functions
279 * for more details.
280 *
281 * When not %NULL, @prev is an SCX task with its slice depleted. If
282 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
283 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
284 * ops.dispatch() returns. To keep executing @prev, return without
285 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
286 */
287 void (*dispatch)(s32 cpu, struct task_struct *prev);
288
289 /**
290 * tick - Periodic tick
291 * @p: task running currently
292 *
293 * This operation is called every 1/HZ seconds on CPUs which are
294 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
295 * immediate dispatch cycle on the CPU.
296 */
297 void (*tick)(struct task_struct *p);
298
299 /**
300 * runnable - A task is becoming runnable on its associated CPU
301 * @p: task becoming runnable
302 * @enq_flags: %SCX_ENQ_*
303 *
304 * This and the following three functions can be used to track a task's
305 * execution state transitions. A task becomes ->runnable() on a CPU,
306 * and then goes through one or more ->running() and ->stopping() pairs
307 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
308 * done running on the CPU.
309 *
310 * @p is becoming runnable on the CPU because it's
311 *
312 * - waking up (%SCX_ENQ_WAKEUP)
313 * - being moved from another CPU
314 * - being restored after temporarily taken off the queue for an
315 * attribute change.
316 *
317 * This and ->enqueue() are related but not coupled. This operation
318 * notifies @p's state transition and may not be followed by ->enqueue()
319 * e.g. when @p is being dispatched to a remote CPU, or when @p is
320 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
321 * task may be ->enqueue()'d without being preceded by this operation
322 * e.g. after exhausting its slice.
323 */
324 void (*runnable)(struct task_struct *p, u64 enq_flags);
325
326 /**
327 * running - A task is starting to run on its associated CPU
328 * @p: task starting to run
329 *
330 * See ->runnable() for explanation on the task state notifiers.
331 */
332 void (*running)(struct task_struct *p);
333
334 /**
335 * stopping - A task is stopping execution
336 * @p: task stopping to run
337 * @runnable: is task @p still runnable?
338 *
339 * See ->runnable() for explanation on the task state notifiers. If
340 * !@runnable, ->quiescent() will be invoked after this operation
341 * returns.
342 */
343 void (*stopping)(struct task_struct *p, bool runnable);
344
345 /**
346 * quiescent - A task is becoming not runnable on its associated CPU
347 * @p: task becoming not runnable
348 * @deq_flags: %SCX_DEQ_*
349 *
350 * See ->runnable() for explanation on the task state notifiers.
351 *
352 * @p is becoming quiescent on the CPU because it's
353 *
354 * - sleeping (%SCX_DEQ_SLEEP)
355 * - being moved to another CPU
356 * - being temporarily taken off the queue for an attribute change
357 * (%SCX_DEQ_SAVE)
358 *
359 * This and ->dequeue() are related but not coupled. This operation
360 * notifies @p's state transition and may not be preceded by ->dequeue()
361 * e.g. when @p is being dispatched to a remote CPU.
362 */
363 void (*quiescent)(struct task_struct *p, u64 deq_flags);
364
365 /**
366 * yield - Yield CPU
367 * @from: yielding task
368 * @to: optional yield target task
369 *
370 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
371 * The BPF scheduler should ensure that other available tasks are
372 * dispatched before the yielding task. Return value is ignored in this
373 * case.
374 *
375 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
376 * scheduler can implement the request, return %true; otherwise, %false.
377 */
378 bool (*yield)(struct task_struct *from, struct task_struct *to);
379
380 /**
381 * core_sched_before - Task ordering for core-sched
382 * @a: task A
383 * @b: task B
384 *
385 * Used by core-sched to determine the ordering between two tasks. See
386 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
387 * core-sched.
388 *
389 * Both @a and @b are runnable and may or may not currently be queued on
390 * the BPF scheduler. Should return %true if @a should run before @b.
391 * %false if there's no required ordering or @b should run before @a.
392 *
393 * If not specified, the default is ordering them according to when they
394 * became runnable.
395 */
396 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
397
398 /**
399 * set_weight - Set task weight
400 * @p: task to set weight for
401 * @weight: new weight [1..10000]
402 *
403 * Update @p's weight to @weight.
404 */
405 void (*set_weight)(struct task_struct *p, u32 weight);
406
407 /**
408 * set_cpumask - Set CPU affinity
409 * @p: task to set CPU affinity for
410 * @cpumask: cpumask of cpus that @p can run on
411 *
412 * Update @p's CPU affinity to @cpumask.
413 */
414 void (*set_cpumask)(struct task_struct *p,
415 const struct cpumask *cpumask);
416
417 /**
418 * update_idle - Update the idle state of a CPU
419 * @cpu: CPU to udpate the idle state for
420 * @idle: whether entering or exiting the idle state
421 *
422 * This operation is called when @rq's CPU goes or leaves the idle
423 * state. By default, implementing this operation disables the built-in
424 * idle CPU tracking and the following helpers become unavailable:
425 *
426 * - scx_bpf_select_cpu_dfl()
427 * - scx_bpf_test_and_clear_cpu_idle()
428 * - scx_bpf_pick_idle_cpu()
429 *
430 * The user also must implement ops.select_cpu() as the default
431 * implementation relies on scx_bpf_select_cpu_dfl().
432 *
433 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
434 * tracking.
435 */
436 void (*update_idle)(s32 cpu, bool idle);
437
438 /**
439 * cpu_acquire - A CPU is becoming available to the BPF scheduler
440 * @cpu: The CPU being acquired by the BPF scheduler.
441 * @args: Acquire arguments, see the struct definition.
442 *
443 * A CPU that was previously released from the BPF scheduler is now once
444 * again under its control.
445 */
446 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
447
448 /**
449 * cpu_release - A CPU is taken away from the BPF scheduler
450 * @cpu: The CPU being released by the BPF scheduler.
451 * @args: Release arguments, see the struct definition.
452 *
453 * The specified CPU is no longer under the control of the BPF
454 * scheduler. This could be because it was preempted by a higher
455 * priority sched_class, though there may be other reasons as well. The
456 * caller should consult @args->reason to determine the cause.
457 */
458 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
459
460 /**
461 * init_task - Initialize a task to run in a BPF scheduler
462 * @p: task to initialize for BPF scheduling
463 * @args: init arguments, see the struct definition
464 *
465 * Either we're loading a BPF scheduler or a new task is being forked.
466 * Initialize @p for BPF scheduling. This operation may block and can
467 * be used for allocations, and is called exactly once for a task.
468 *
469 * Return 0 for success, -errno for failure. An error return while
470 * loading will abort loading of the BPF scheduler. During a fork, it
471 * will abort that specific fork.
472 */
473 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
474
475 /**
476 * exit_task - Exit a previously-running task from the system
477 * @p: task to exit
478 *
479 * @p is exiting or the BPF scheduler is being unloaded. Perform any
480 * necessary cleanup for @p.
481 */
482 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
483
484 /**
485 * enable - Enable BPF scheduling for a task
486 * @p: task to enable BPF scheduling for
487 *
488 * Enable @p for BPF scheduling. enable() is called on @p any time it
489 * enters SCX, and is always paired with a matching disable().
490 */
491 void (*enable)(struct task_struct *p);
492
493 /**
494 * disable - Disable BPF scheduling for a task
495 * @p: task to disable BPF scheduling for
496 *
497 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
498 * Disable BPF scheduling for @p. A disable() call is always matched
499 * with a prior enable() call.
500 */
501 void (*disable)(struct task_struct *p);
502
503 /**
504 * dump - Dump BPF scheduler state on error
505 * @ctx: debug dump context
506 *
507 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
508 */
509 void (*dump)(struct scx_dump_ctx *ctx);
510
511 /**
512 * dump_cpu - Dump BPF scheduler state for a CPU on error
513 * @ctx: debug dump context
514 * @cpu: CPU to generate debug dump for
515 * @idle: @cpu is currently idle without any runnable tasks
516 *
517 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
518 * @cpu. If @idle is %true and this operation doesn't produce any
519 * output, @cpu is skipped for dump.
520 */
521 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
522
523 /**
524 * dump_task - Dump BPF scheduler state for a runnable task on error
525 * @ctx: debug dump context
526 * @p: runnable task to generate debug dump for
527 *
528 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
529 * @p.
530 */
531 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
532
533#ifdef CONFIG_EXT_GROUP_SCHED
534 /**
535 * cgroup_init - Initialize a cgroup
536 * @cgrp: cgroup being initialized
537 * @args: init arguments, see the struct definition
538 *
539 * Either the BPF scheduler is being loaded or @cgrp created, initialize
540 * @cgrp for sched_ext. This operation may block.
541 *
542 * Return 0 for success, -errno for failure. An error return while
543 * loading will abort loading of the BPF scheduler. During cgroup
544 * creation, it will abort the specific cgroup creation.
545 */
546 s32 (*cgroup_init)(struct cgroup *cgrp,
547 struct scx_cgroup_init_args *args);
548
549 /**
550 * cgroup_exit - Exit a cgroup
551 * @cgrp: cgroup being exited
552 *
553 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
554 * @cgrp for sched_ext. This operation my block.
555 */
556 void (*cgroup_exit)(struct cgroup *cgrp);
557
558 /**
559 * cgroup_prep_move - Prepare a task to be moved to a different cgroup
560 * @p: task being moved
561 * @from: cgroup @p is being moved from
562 * @to: cgroup @p is being moved to
563 *
564 * Prepare @p for move from cgroup @from to @to. This operation may
565 * block and can be used for allocations.
566 *
567 * Return 0 for success, -errno for failure. An error return aborts the
568 * migration.
569 */
570 s32 (*cgroup_prep_move)(struct task_struct *p,
571 struct cgroup *from, struct cgroup *to);
572
573 /**
574 * cgroup_move - Commit cgroup move
575 * @p: task being moved
576 * @from: cgroup @p is being moved from
577 * @to: cgroup @p is being moved to
578 *
579 * Commit the move. @p is dequeued during this operation.
580 */
581 void (*cgroup_move)(struct task_struct *p,
582 struct cgroup *from, struct cgroup *to);
583
584 /**
585 * cgroup_cancel_move - Cancel cgroup move
586 * @p: task whose cgroup move is being canceled
587 * @from: cgroup @p was being moved from
588 * @to: cgroup @p was being moved to
589 *
590 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
591 * Undo the preparation.
592 */
593 void (*cgroup_cancel_move)(struct task_struct *p,
594 struct cgroup *from, struct cgroup *to);
595
596 /**
597 * cgroup_set_weight - A cgroup's weight is being changed
598 * @cgrp: cgroup whose weight is being updated
599 * @weight: new weight [1..10000]
600 *
601 * Update @tg's weight to @weight.
602 */
603 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
604#endif /* CONFIG_EXT_GROUP_SCHED */
605
606 /*
607 * All online ops must come before ops.cpu_online().
608 */
609
610 /**
611 * cpu_online - A CPU became online
612 * @cpu: CPU which just came up
613 *
614 * @cpu just came online. @cpu will not call ops.enqueue() or
615 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
616 */
617 void (*cpu_online)(s32 cpu);
618
619 /**
620 * cpu_offline - A CPU is going offline
621 * @cpu: CPU which is going offline
622 *
623 * @cpu is going offline. @cpu will not call ops.enqueue() or
624 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
625 */
626 void (*cpu_offline)(s32 cpu);
627
628 /*
629 * All CPU hotplug ops must come before ops.init().
630 */
631
632 /**
633 * init - Initialize the BPF scheduler
634 */
635 s32 (*init)(void);
636
637 /**
638 * exit - Clean up after the BPF scheduler
639 * @info: Exit info
640 *
641 * ops.exit() is also called on ops.init() failure, which is a bit
642 * unusual. This is to allow rich reporting through @info on how
643 * ops.init() failed.
644 */
645 void (*exit)(struct scx_exit_info *info);
646
647 /**
648 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
649 */
650 u32 dispatch_max_batch;
651
652 /**
653 * flags - %SCX_OPS_* flags
654 */
655 u64 flags;
656
657 /**
658 * timeout_ms - The maximum amount of time, in milliseconds, that a
659 * runnable task should be able to wait before being scheduled. The
660 * maximum timeout may not exceed the default timeout of 30 seconds.
661 *
662 * Defaults to the maximum allowed timeout value of 30 seconds.
663 */
664 u32 timeout_ms;
665
666 /**
667 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
668 * value of 32768 is used.
669 */
670 u32 exit_dump_len;
671
672 /**
673 * hotplug_seq - A sequence number that may be set by the scheduler to
674 * detect when a hotplug event has occurred during the loading process.
675 * If 0, no detection occurs. Otherwise, the scheduler will fail to
676 * load if the sequence number does not match @scx_hotplug_seq on the
677 * enable path.
678 */
679 u64 hotplug_seq;
680
681 /**
682 * name - BPF scheduler's name
683 *
684 * Must be a non-zero valid BPF object name including only isalnum(),
685 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
686 * BPF scheduler is enabled.
687 */
688 char name[SCX_OPS_NAME_LEN];
689};
690
691enum scx_opi {
692 SCX_OPI_BEGIN = 0,
693 SCX_OPI_NORMAL_BEGIN = 0,
694 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
695 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
696 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
697 SCX_OPI_END = SCX_OP_IDX(init),
698};
699
700enum scx_wake_flags {
701 /* expose select WF_* flags as enums */
702 SCX_WAKE_FORK = WF_FORK,
703 SCX_WAKE_TTWU = WF_TTWU,
704 SCX_WAKE_SYNC = WF_SYNC,
705};
706
707enum scx_enq_flags {
708 /* expose select ENQUEUE_* flags as enums */
709 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
710 SCX_ENQ_HEAD = ENQUEUE_HEAD,
711 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
712
713 /* high 32bits are SCX specific */
714
715 /*
716 * Set the following to trigger preemption when calling
717 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
718 * current task is cleared to zero and the CPU is kicked into the
719 * scheduling path. Implies %SCX_ENQ_HEAD.
720 */
721 SCX_ENQ_PREEMPT = 1LLU << 32,
722
723 /*
724 * The task being enqueued was previously enqueued on the current CPU's
725 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
726 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
727 * invoked in a ->cpu_release() callback, and the task is again
728 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
729 * task will not be scheduled on the CPU until at least the next invocation
730 * of the ->cpu_acquire() callback.
731 */
732 SCX_ENQ_REENQ = 1LLU << 40,
733
734 /*
735 * The task being enqueued is the only task available for the cpu. By
736 * default, ext core keeps executing such tasks but when
737 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
738 * %SCX_ENQ_LAST flag set.
739 *
740 * The BPF scheduler is responsible for triggering a follow-up
741 * scheduling event. Otherwise, Execution may stall.
742 */
743 SCX_ENQ_LAST = 1LLU << 41,
744
745 /* high 8 bits are internal */
746 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
747
748 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
749 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
750};
751
752enum scx_deq_flags {
753 /* expose select DEQUEUE_* flags as enums */
754 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
755
756 /* high 32bits are SCX specific */
757
758 /*
759 * The generic core-sched layer decided to execute the task even though
760 * it hasn't been dispatched yet. Dequeue from the BPF side.
761 */
762 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
763};
764
765enum scx_pick_idle_cpu_flags {
766 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
767};
768
769enum scx_kick_flags {
770 /*
771 * Kick the target CPU if idle. Guarantees that the target CPU goes
772 * through at least one full scheduling cycle before going idle. If the
773 * target CPU can be determined to be currently not idle and going to go
774 * through a scheduling cycle before going idle, noop.
775 */
776 SCX_KICK_IDLE = 1LLU << 0,
777
778 /*
779 * Preempt the current task and execute the dispatch path. If the
780 * current task of the target CPU is an SCX task, its ->scx.slice is
781 * cleared to zero before the scheduling path is invoked so that the
782 * task expires and the dispatch path is invoked.
783 */
784 SCX_KICK_PREEMPT = 1LLU << 1,
785
786 /*
787 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
788 * return after the target CPU finishes picking the next task.
789 */
790 SCX_KICK_WAIT = 1LLU << 2,
791};
792
793enum scx_tg_flags {
794 SCX_TG_ONLINE = 1U << 0,
795 SCX_TG_INITED = 1U << 1,
796};
797
798enum scx_ops_enable_state {
799 SCX_OPS_ENABLING,
800 SCX_OPS_ENABLED,
801 SCX_OPS_DISABLING,
802 SCX_OPS_DISABLED,
803};
804
805static const char *scx_ops_enable_state_str[] = {
806 [SCX_OPS_ENABLING] = "enabling",
807 [SCX_OPS_ENABLED] = "enabled",
808 [SCX_OPS_DISABLING] = "disabling",
809 [SCX_OPS_DISABLED] = "disabled",
810};
811
812/*
813 * sched_ext_entity->ops_state
814 *
815 * Used to track the task ownership between the SCX core and the BPF scheduler.
816 * State transitions look as follows:
817 *
818 * NONE -> QUEUEING -> QUEUED -> DISPATCHING
819 * ^ | |
820 * | v v
821 * \-------------------------------/
822 *
823 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
824 * sites for explanations on the conditions being waited upon and why they are
825 * safe. Transitions out of them into NONE or QUEUED must store_release and the
826 * waiters should load_acquire.
827 *
828 * Tracking scx_ops_state enables sched_ext core to reliably determine whether
829 * any given task can be dispatched by the BPF scheduler at all times and thus
830 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
831 * to try to dispatch any task anytime regardless of its state as the SCX core
832 * can safely reject invalid dispatches.
833 */
834enum scx_ops_state {
835 SCX_OPSS_NONE, /* owned by the SCX core */
836 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
837 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
838 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
839
840 /*
841 * QSEQ brands each QUEUED instance so that, when dispatch races
842 * dequeue/requeue, the dispatcher can tell whether it still has a claim
843 * on the task being dispatched.
844 *
845 * As some 32bit archs can't do 64bit store_release/load_acquire,
846 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
847 * 32bit machines. The dispatch race window QSEQ protects is very narrow
848 * and runs with IRQ disabled. 30 bits should be sufficient.
849 */
850 SCX_OPSS_QSEQ_SHIFT = 2,
851};
852
853/* Use macros to ensure that the type is unsigned long for the masks */
854#define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
855#define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
856
857/*
858 * During exit, a task may schedule after losing its PIDs. When disabling the
859 * BPF scheduler, we need to be able to iterate tasks in every state to
860 * guarantee system safety. Maintain a dedicated task list which contains every
861 * task between its fork and eventual free.
862 */
863static DEFINE_SPINLOCK(scx_tasks_lock);
864static LIST_HEAD(scx_tasks);
865
866/* ops enable/disable */
867static struct kthread_worker *scx_ops_helper;
868static DEFINE_MUTEX(scx_ops_enable_mutex);
869DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
870DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
871static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
872static unsigned long scx_in_softlockup;
873static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0);
874static int scx_ops_bypass_depth;
875static bool scx_ops_init_task_enabled;
876static bool scx_switching_all;
877DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
878
879static struct sched_ext_ops scx_ops;
880static bool scx_warned_zero_slice;
881
882static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
883static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
884static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
885static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
886
887#ifdef CONFIG_SMP
888static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
889static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
890#endif
891
892static struct static_key_false scx_has_op[SCX_OPI_END] =
893 { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
894
895static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
896static struct scx_exit_info *scx_exit_info;
897
898static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
899static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
900
901/*
902 * A monotically increasing sequence number that is incremented every time a
903 * scheduler is enabled. This can be used by to check if any custom sched_ext
904 * scheduler has ever been used in the system.
905 */
906static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
907
908/*
909 * The maximum amount of time in jiffies that a task may be runnable without
910 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
911 * scx_ops_error().
912 */
913static unsigned long scx_watchdog_timeout;
914
915/*
916 * The last time the delayed work was run. This delayed work relies on
917 * ksoftirqd being able to run to service timer interrupts, so it's possible
918 * that this work itself could get wedged. To account for this, we check that
919 * it's not stalled in the timer tick, and trigger an error if it is.
920 */
921static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
922
923static struct delayed_work scx_watchdog_work;
924
925/* idle tracking */
926#ifdef CONFIG_SMP
927#ifdef CONFIG_CPUMASK_OFFSTACK
928#define CL_ALIGNED_IF_ONSTACK
929#else
930#define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
931#endif
932
933static struct {
934 cpumask_var_t cpu;
935 cpumask_var_t smt;
936} idle_masks CL_ALIGNED_IF_ONSTACK;
937
938#endif /* CONFIG_SMP */
939
940/* for %SCX_KICK_WAIT */
941static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
942
943/*
944 * Direct dispatch marker.
945 *
946 * Non-NULL values are used for direct dispatch from enqueue path. A valid
947 * pointer points to the task currently being enqueued. An ERR_PTR value is used
948 * to indicate that direct dispatch has already happened.
949 */
950static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
951
952/*
953 * Dispatch queues.
954 *
955 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
956 * to avoid live-locking in bypass mode where all tasks are dispatched to
957 * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
958 * sufficient, it can be further split.
959 */
960static struct scx_dispatch_q **global_dsqs;
961
962static const struct rhashtable_params dsq_hash_params = {
963 .key_len = 8,
964 .key_offset = offsetof(struct scx_dispatch_q, id),
965 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
966};
967
968static struct rhashtable dsq_hash;
969static LLIST_HEAD(dsqs_to_free);
970
971/* dispatch buf */
972struct scx_dsp_buf_ent {
973 struct task_struct *task;
974 unsigned long qseq;
975 u64 dsq_id;
976 u64 enq_flags;
977};
978
979static u32 scx_dsp_max_batch;
980
981struct scx_dsp_ctx {
982 struct rq *rq;
983 u32 cursor;
984 u32 nr_tasks;
985 struct scx_dsp_buf_ent buf[];
986};
987
988static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
989
990/* string formatting from BPF */
991struct scx_bstr_buf {
992 u64 data[MAX_BPRINTF_VARARGS];
993 char line[SCX_EXIT_MSG_LEN];
994};
995
996static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
997static struct scx_bstr_buf scx_exit_bstr_buf;
998
999/* ops debug dump */
1000struct scx_dump_data {
1001 s32 cpu;
1002 bool first;
1003 s32 cursor;
1004 struct seq_buf *s;
1005 const char *prefix;
1006 struct scx_bstr_buf buf;
1007};
1008
1009static struct scx_dump_data scx_dump_data = {
1010 .cpu = -1,
1011};
1012
1013/* /sys/kernel/sched_ext interface */
1014static struct kset *scx_kset;
1015static struct kobject *scx_root_kobj;
1016
1017#define CREATE_TRACE_POINTS
1018#include <trace/events/sched_ext.h>
1019
1020static void process_ddsp_deferred_locals(struct rq *rq);
1021static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1022static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1023 s64 exit_code,
1024 const char *fmt, ...);
1025
1026#define scx_ops_error_kind(err, fmt, args...) \
1027 scx_ops_exit_kind((err), 0, fmt, ##args)
1028
1029#define scx_ops_exit(code, fmt, args...) \
1030 scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1031
1032#define scx_ops_error(fmt, args...) \
1033 scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1034
1035#define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1036
1037static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1038{
1039 if (time_after(at, now))
1040 return jiffies_to_msecs(at - now);
1041 else
1042 return -(long)jiffies_to_msecs(now - at);
1043}
1044
1045/* if the highest set bit is N, return a mask with bits [N+1, 31] set */
1046static u32 higher_bits(u32 flags)
1047{
1048 return ~((1 << fls(flags)) - 1);
1049}
1050
1051/* return the mask with only the highest bit set */
1052static u32 highest_bit(u32 flags)
1053{
1054 int bit = fls(flags);
1055 return ((u64)1 << bit) >> 1;
1056}
1057
1058static bool u32_before(u32 a, u32 b)
1059{
1060 return (s32)(a - b) < 0;
1061}
1062
1063static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1064{
1065 return global_dsqs[cpu_to_node(task_cpu(p))];
1066}
1067
1068static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1069{
1070 return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1071}
1072
1073/*
1074 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1075 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1076 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1077 * whether it's running from an allowed context.
1078 *
1079 * @mask is constant, always inline to cull the mask calculations.
1080 */
1081static __always_inline void scx_kf_allow(u32 mask)
1082{
1083 /* nesting is allowed only in increasing scx_kf_mask order */
1084 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1085 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1086 current->scx.kf_mask, mask);
1087 current->scx.kf_mask |= mask;
1088 barrier();
1089}
1090
1091static void scx_kf_disallow(u32 mask)
1092{
1093 barrier();
1094 current->scx.kf_mask &= ~mask;
1095}
1096
1097#define SCX_CALL_OP(mask, op, args...) \
1098do { \
1099 if (mask) { \
1100 scx_kf_allow(mask); \
1101 scx_ops.op(args); \
1102 scx_kf_disallow(mask); \
1103 } else { \
1104 scx_ops.op(args); \
1105 } \
1106} while (0)
1107
1108#define SCX_CALL_OP_RET(mask, op, args...) \
1109({ \
1110 __typeof__(scx_ops.op(args)) __ret; \
1111 if (mask) { \
1112 scx_kf_allow(mask); \
1113 __ret = scx_ops.op(args); \
1114 scx_kf_disallow(mask); \
1115 } else { \
1116 __ret = scx_ops.op(args); \
1117 } \
1118 __ret; \
1119})
1120
1121/*
1122 * Some kfuncs are allowed only on the tasks that are subjects of the
1123 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1124 * restrictions, the following SCX_CALL_OP_*() variants should be used when
1125 * invoking scx_ops operations that take task arguments. These can only be used
1126 * for non-nesting operations due to the way the tasks are tracked.
1127 *
1128 * kfuncs which can only operate on such tasks can in turn use
1129 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1130 * the specific task.
1131 */
1132#define SCX_CALL_OP_TASK(mask, op, task, args...) \
1133do { \
1134 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1135 current->scx.kf_tasks[0] = task; \
1136 SCX_CALL_OP(mask, op, task, ##args); \
1137 current->scx.kf_tasks[0] = NULL; \
1138} while (0)
1139
1140#define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \
1141({ \
1142 __typeof__(scx_ops.op(task, ##args)) __ret; \
1143 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1144 current->scx.kf_tasks[0] = task; \
1145 __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \
1146 current->scx.kf_tasks[0] = NULL; \
1147 __ret; \
1148})
1149
1150#define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \
1151({ \
1152 __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \
1153 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1154 current->scx.kf_tasks[0] = task0; \
1155 current->scx.kf_tasks[1] = task1; \
1156 __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \
1157 current->scx.kf_tasks[0] = NULL; \
1158 current->scx.kf_tasks[1] = NULL; \
1159 __ret; \
1160})
1161
1162/* @mask is constant, always inline to cull unnecessary branches */
1163static __always_inline bool scx_kf_allowed(u32 mask)
1164{
1165 if (unlikely(!(current->scx.kf_mask & mask))) {
1166 scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1167 mask, current->scx.kf_mask);
1168 return false;
1169 }
1170
1171 /*
1172 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1173 * DISPATCH must not be called if we're running DEQUEUE which is nested
1174 * inside ops.dispatch(). We don't need to check boundaries for any
1175 * blocking kfuncs as the verifier ensures they're only called from
1176 * sleepable progs.
1177 */
1178 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1179 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1180 scx_ops_error("cpu_release kfunc called from a nested operation");
1181 return false;
1182 }
1183
1184 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1185 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1186 scx_ops_error("dispatch kfunc called from a nested operation");
1187 return false;
1188 }
1189
1190 return true;
1191}
1192
1193/* see SCX_CALL_OP_TASK() */
1194static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1195 struct task_struct *p)
1196{
1197 if (!scx_kf_allowed(mask))
1198 return false;
1199
1200 if (unlikely((p != current->scx.kf_tasks[0] &&
1201 p != current->scx.kf_tasks[1]))) {
1202 scx_ops_error("called on a task not being operated on");
1203 return false;
1204 }
1205
1206 return true;
1207}
1208
1209static bool scx_kf_allowed_if_unlocked(void)
1210{
1211 return !current->scx.kf_mask;
1212}
1213
1214/**
1215 * nldsq_next_task - Iterate to the next task in a non-local DSQ
1216 * @dsq: user dsq being interated
1217 * @cur: current position, %NULL to start iteration
1218 * @rev: walk backwards
1219 *
1220 * Returns %NULL when iteration is finished.
1221 */
1222static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1223 struct task_struct *cur, bool rev)
1224{
1225 struct list_head *list_node;
1226 struct scx_dsq_list_node *dsq_lnode;
1227
1228 lockdep_assert_held(&dsq->lock);
1229
1230 if (cur)
1231 list_node = &cur->scx.dsq_list.node;
1232 else
1233 list_node = &dsq->list;
1234
1235 /* find the next task, need to skip BPF iteration cursors */
1236 do {
1237 if (rev)
1238 list_node = list_node->prev;
1239 else
1240 list_node = list_node->next;
1241
1242 if (list_node == &dsq->list)
1243 return NULL;
1244
1245 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1246 node);
1247 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1248
1249 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1250}
1251
1252#define nldsq_for_each_task(p, dsq) \
1253 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1254 (p) = nldsq_next_task((dsq), (p), false))
1255
1256
1257/*
1258 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1259 * dispatch order. BPF-visible iterator is opaque and larger to allow future
1260 * changes without breaking backward compatibility. Can be used with
1261 * bpf_for_each(). See bpf_iter_scx_dsq_*().
1262 */
1263enum scx_dsq_iter_flags {
1264 /* iterate in the reverse dispatch order */
1265 SCX_DSQ_ITER_REV = 1U << 16,
1266
1267 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
1268 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
1269
1270 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
1271 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
1272 __SCX_DSQ_ITER_HAS_SLICE |
1273 __SCX_DSQ_ITER_HAS_VTIME,
1274};
1275
1276struct bpf_iter_scx_dsq_kern {
1277 struct scx_dsq_list_node cursor;
1278 struct scx_dispatch_q *dsq;
1279 u64 slice;
1280 u64 vtime;
1281} __attribute__((aligned(8)));
1282
1283struct bpf_iter_scx_dsq {
1284 u64 __opaque[6];
1285} __attribute__((aligned(8)));
1286
1287
1288/*
1289 * SCX task iterator.
1290 */
1291struct scx_task_iter {
1292 struct sched_ext_entity cursor;
1293 struct task_struct *locked;
1294 struct rq *rq;
1295 struct rq_flags rf;
1296 u32 cnt;
1297};
1298
1299/**
1300 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1301 * @iter: iterator to init
1302 *
1303 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1304 * must eventually be stopped with scx_task_iter_stop().
1305 *
1306 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1307 * between this and the first next() call or between any two next() calls. If
1308 * the locks are released between two next() calls, the caller is responsible
1309 * for ensuring that the task being iterated remains accessible either through
1310 * RCU read lock or obtaining a reference count.
1311 *
1312 * All tasks which existed when the iteration started are guaranteed to be
1313 * visited as long as they still exist.
1314 */
1315static void scx_task_iter_start(struct scx_task_iter *iter)
1316{
1317 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1318 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1319
1320 spin_lock_irq(&scx_tasks_lock);
1321
1322 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1323 list_add(&iter->cursor.tasks_node, &scx_tasks);
1324 iter->locked = NULL;
1325 iter->cnt = 0;
1326}
1327
1328static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1329{
1330 if (iter->locked) {
1331 task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1332 iter->locked = NULL;
1333 }
1334}
1335
1336/**
1337 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1338 * @iter: iterator to unlock
1339 *
1340 * If @iter is in the middle of a locked iteration, it may be locking the rq of
1341 * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1342 * This function can be safely called anytime during an iteration.
1343 */
1344static void scx_task_iter_unlock(struct scx_task_iter *iter)
1345{
1346 __scx_task_iter_rq_unlock(iter);
1347 spin_unlock_irq(&scx_tasks_lock);
1348}
1349
1350/**
1351 * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1352 * @iter: iterator to re-lock
1353 *
1354 * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1355 * doesn't re-lock the rq lock. Must be called before other iterator operations.
1356 */
1357static void scx_task_iter_relock(struct scx_task_iter *iter)
1358{
1359 spin_lock_irq(&scx_tasks_lock);
1360}
1361
1362/**
1363 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1364 * @iter: iterator to exit
1365 *
1366 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1367 * which is released on return. If the iterator holds a task's rq lock, that rq
1368 * lock is also released. See scx_task_iter_start() for details.
1369 */
1370static void scx_task_iter_stop(struct scx_task_iter *iter)
1371{
1372 list_del_init(&iter->cursor.tasks_node);
1373 scx_task_iter_unlock(iter);
1374}
1375
1376/**
1377 * scx_task_iter_next - Next task
1378 * @iter: iterator to walk
1379 *
1380 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1381 * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1382 * stalls by holding scx_tasks_lock for too long.
1383 */
1384static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1385{
1386 struct list_head *cursor = &iter->cursor.tasks_node;
1387 struct sched_ext_entity *pos;
1388
1389 if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
1390 scx_task_iter_unlock(iter);
1391 cond_resched();
1392 scx_task_iter_relock(iter);
1393 }
1394
1395 list_for_each_entry(pos, cursor, tasks_node) {
1396 if (&pos->tasks_node == &scx_tasks)
1397 return NULL;
1398 if (!(pos->flags & SCX_TASK_CURSOR)) {
1399 list_move(cursor, &pos->tasks_node);
1400 return container_of(pos, struct task_struct, scx);
1401 }
1402 }
1403
1404 /* can't happen, should always terminate at scx_tasks above */
1405 BUG();
1406}
1407
1408/**
1409 * scx_task_iter_next_locked - Next non-idle task with its rq locked
1410 * @iter: iterator to walk
1411 * @include_dead: Whether we should include dead tasks in the iteration
1412 *
1413 * Visit the non-idle task with its rq lock held. Allows callers to specify
1414 * whether they would like to filter out dead tasks. See scx_task_iter_start()
1415 * for details.
1416 */
1417static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1418{
1419 struct task_struct *p;
1420
1421 __scx_task_iter_rq_unlock(iter);
1422
1423 while ((p = scx_task_iter_next(iter))) {
1424 /*
1425 * scx_task_iter is used to prepare and move tasks into SCX
1426 * while loading the BPF scheduler and vice-versa while
1427 * unloading. The init_tasks ("swappers") should be excluded
1428 * from the iteration because:
1429 *
1430 * - It's unsafe to use __setschduler_prio() on an init_task to
1431 * determine the sched_class to use as it won't preserve its
1432 * idle_sched_class.
1433 *
1434 * - ops.init/exit_task() can easily be confused if called with
1435 * init_tasks as they, e.g., share PID 0.
1436 *
1437 * As init_tasks are never scheduled through SCX, they can be
1438 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1439 * doesn't work here:
1440 *
1441 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1442 * yet been onlined.
1443 *
1444 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1445 * play_idle_precise() used by CONFIG_IDLE_INJECT.
1446 *
1447 * Test for idle_sched_class as only init_tasks are on it.
1448 */
1449 if (p->sched_class != &idle_sched_class)
1450 break;
1451 }
1452 if (!p)
1453 return NULL;
1454
1455 iter->rq = task_rq_lock(p, &iter->rf);
1456 iter->locked = p;
1457
1458 return p;
1459}
1460
1461static enum scx_ops_enable_state scx_ops_enable_state(void)
1462{
1463 return atomic_read(&scx_ops_enable_state_var);
1464}
1465
1466static enum scx_ops_enable_state
1467scx_ops_set_enable_state(enum scx_ops_enable_state to)
1468{
1469 return atomic_xchg(&scx_ops_enable_state_var, to);
1470}
1471
1472static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1473 enum scx_ops_enable_state from)
1474{
1475 int from_v = from;
1476
1477 return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1478}
1479
1480static bool scx_rq_bypassing(struct rq *rq)
1481{
1482 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1483}
1484
1485/**
1486 * wait_ops_state - Busy-wait the specified ops state to end
1487 * @p: target task
1488 * @opss: state to wait the end of
1489 *
1490 * Busy-wait for @p to transition out of @opss. This can only be used when the
1491 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1492 * has load_acquire semantics to ensure that the caller can see the updates made
1493 * in the enqueueing and dispatching paths.
1494 */
1495static void wait_ops_state(struct task_struct *p, unsigned long opss)
1496{
1497 do {
1498 cpu_relax();
1499 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1500}
1501
1502/**
1503 * ops_cpu_valid - Verify a cpu number
1504 * @cpu: cpu number which came from a BPF ops
1505 * @where: extra information reported on error
1506 *
1507 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1508 * Verify that it is in range and one of the possible cpus. If invalid, trigger
1509 * an ops error.
1510 */
1511static bool ops_cpu_valid(s32 cpu, const char *where)
1512{
1513 if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1514 return true;
1515 } else {
1516 scx_ops_error("invalid CPU %d%s%s", cpu,
1517 where ? " " : "", where ?: "");
1518 return false;
1519 }
1520}
1521
1522/**
1523 * ops_sanitize_err - Sanitize a -errno value
1524 * @ops_name: operation to blame on failure
1525 * @err: -errno value to sanitize
1526 *
1527 * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1528 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1529 * cause misbehaviors. For an example, a large negative return from
1530 * ops.init_task() triggers an oops when passed up the call chain because the
1531 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1532 * handled as a pointer.
1533 */
1534static int ops_sanitize_err(const char *ops_name, s32 err)
1535{
1536 if (err < 0 && err >= -MAX_ERRNO)
1537 return err;
1538
1539 scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1540 return -EPROTO;
1541}
1542
1543static void run_deferred(struct rq *rq)
1544{
1545 process_ddsp_deferred_locals(rq);
1546}
1547
1548#ifdef CONFIG_SMP
1549static void deferred_bal_cb_workfn(struct rq *rq)
1550{
1551 run_deferred(rq);
1552}
1553#endif
1554
1555static void deferred_irq_workfn(struct irq_work *irq_work)
1556{
1557 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1558
1559 raw_spin_rq_lock(rq);
1560 run_deferred(rq);
1561 raw_spin_rq_unlock(rq);
1562}
1563
1564/**
1565 * schedule_deferred - Schedule execution of deferred actions on an rq
1566 * @rq: target rq
1567 *
1568 * Schedule execution of deferred actions on @rq. Must be called with @rq
1569 * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1570 * can unlock @rq to e.g. migrate tasks to other rqs.
1571 */
1572static void schedule_deferred(struct rq *rq)
1573{
1574 lockdep_assert_rq_held(rq);
1575
1576#ifdef CONFIG_SMP
1577 /*
1578 * If in the middle of waking up a task, task_woken_scx() will be called
1579 * afterwards which will then run the deferred actions, no need to
1580 * schedule anything.
1581 */
1582 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1583 return;
1584
1585 /*
1586 * If in balance, the balance callbacks will be called before rq lock is
1587 * released. Schedule one.
1588 */
1589 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1590 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1591 deferred_bal_cb_workfn);
1592 return;
1593 }
1594#endif
1595 /*
1596 * No scheduler hooks available. Queue an irq work. They are executed on
1597 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1598 * The above WAKEUP and BALANCE paths should cover most of the cases and
1599 * the time to IRQ re-enable shouldn't be long.
1600 */
1601 irq_work_queue(&rq->scx.deferred_irq_work);
1602}
1603
1604/**
1605 * touch_core_sched - Update timestamp used for core-sched task ordering
1606 * @rq: rq to read clock from, must be locked
1607 * @p: task to update the timestamp for
1608 *
1609 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1610 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1611 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1612 * exhaustion).
1613 */
1614static void touch_core_sched(struct rq *rq, struct task_struct *p)
1615{
1616 lockdep_assert_rq_held(rq);
1617
1618#ifdef CONFIG_SCHED_CORE
1619 /*
1620 * It's okay to update the timestamp spuriously. Use
1621 * sched_core_disabled() which is cheaper than enabled().
1622 *
1623 * As this is used to determine ordering between tasks of sibling CPUs,
1624 * it may be better to use per-core dispatch sequence instead.
1625 */
1626 if (!sched_core_disabled())
1627 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1628#endif
1629}
1630
1631/**
1632 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1633 * @rq: rq to read clock from, must be locked
1634 * @p: task being dispatched
1635 *
1636 * If the BPF scheduler implements custom core-sched ordering via
1637 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1638 * ordering within each local DSQ. This function is called from dispatch paths
1639 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1640 */
1641static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1642{
1643 lockdep_assert_rq_held(rq);
1644
1645#ifdef CONFIG_SCHED_CORE
1646 if (SCX_HAS_OP(core_sched_before))
1647 touch_core_sched(rq, p);
1648#endif
1649}
1650
1651static void update_curr_scx(struct rq *rq)
1652{
1653 struct task_struct *curr = rq->curr;
1654 s64 delta_exec;
1655
1656 delta_exec = update_curr_common(rq);
1657 if (unlikely(delta_exec <= 0))
1658 return;
1659
1660 if (curr->scx.slice != SCX_SLICE_INF) {
1661 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1662 if (!curr->scx.slice)
1663 touch_core_sched(rq, curr);
1664 }
1665}
1666
1667static bool scx_dsq_priq_less(struct rb_node *node_a,
1668 const struct rb_node *node_b)
1669{
1670 const struct task_struct *a =
1671 container_of(node_a, struct task_struct, scx.dsq_priq);
1672 const struct task_struct *b =
1673 container_of(node_b, struct task_struct, scx.dsq_priq);
1674
1675 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1676}
1677
1678static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1679{
1680 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1681 WRITE_ONCE(dsq->nr, dsq->nr + delta);
1682}
1683
1684static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1685 u64 enq_flags)
1686{
1687 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1688
1689 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1690 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1691 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1692
1693 if (!is_local) {
1694 raw_spin_lock(&dsq->lock);
1695 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1696 scx_ops_error("attempting to dispatch to a destroyed dsq");
1697 /* fall back to the global dsq */
1698 raw_spin_unlock(&dsq->lock);
1699 dsq = find_global_dsq(p);
1700 raw_spin_lock(&dsq->lock);
1701 }
1702 }
1703
1704 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1705 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1706 /*
1707 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1708 * their FIFO queues. To avoid confusion and accidentally
1709 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1710 * disallow any internal DSQ from doing vtime ordering of
1711 * tasks.
1712 */
1713 scx_ops_error("cannot use vtime ordering for built-in DSQs");
1714 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1715 }
1716
1717 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1718 struct rb_node *rbp;
1719
1720 /*
1721 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1722 * linked to both the rbtree and list on PRIQs, this can only be
1723 * tested easily when adding the first task.
1724 */
1725 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1726 nldsq_next_task(dsq, NULL, false)))
1727 scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1728 dsq->id);
1729
1730 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1731 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1732
1733 /*
1734 * Find the previous task and insert after it on the list so
1735 * that @dsq->list is vtime ordered.
1736 */
1737 rbp = rb_prev(&p->scx.dsq_priq);
1738 if (rbp) {
1739 struct task_struct *prev =
1740 container_of(rbp, struct task_struct,
1741 scx.dsq_priq);
1742 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1743 } else {
1744 list_add(&p->scx.dsq_list.node, &dsq->list);
1745 }
1746 } else {
1747 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1748 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1749 scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1750 dsq->id);
1751
1752 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1753 list_add(&p->scx.dsq_list.node, &dsq->list);
1754 else
1755 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1756 }
1757
1758 /* seq records the order tasks are queued, used by BPF DSQ iterator */
1759 dsq->seq++;
1760 p->scx.dsq_seq = dsq->seq;
1761
1762 dsq_mod_nr(dsq, 1);
1763 p->scx.dsq = dsq;
1764
1765 /*
1766 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1767 * direct dispatch path, but we clear them here because the direct
1768 * dispatch verdict may be overridden on the enqueue path during e.g.
1769 * bypass.
1770 */
1771 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1772 p->scx.ddsp_enq_flags = 0;
1773
1774 /*
1775 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1776 * match waiters' load_acquire.
1777 */
1778 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1779 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1780
1781 if (is_local) {
1782 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1783 bool preempt = false;
1784
1785 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1786 rq->curr->sched_class == &ext_sched_class) {
1787 rq->curr->scx.slice = 0;
1788 preempt = true;
1789 }
1790
1791 if (preempt || sched_class_above(&ext_sched_class,
1792 rq->curr->sched_class))
1793 resched_curr(rq);
1794 } else {
1795 raw_spin_unlock(&dsq->lock);
1796 }
1797}
1798
1799static void task_unlink_from_dsq(struct task_struct *p,
1800 struct scx_dispatch_q *dsq)
1801{
1802 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1803
1804 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1805 rb_erase(&p->scx.dsq_priq, &dsq->priq);
1806 RB_CLEAR_NODE(&p->scx.dsq_priq);
1807 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1808 }
1809
1810 list_del_init(&p->scx.dsq_list.node);
1811 dsq_mod_nr(dsq, -1);
1812}
1813
1814static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1815{
1816 struct scx_dispatch_q *dsq = p->scx.dsq;
1817 bool is_local = dsq == &rq->scx.local_dsq;
1818
1819 if (!dsq) {
1820 /*
1821 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1822 * Unlinking is all that's needed to cancel.
1823 */
1824 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1825 list_del_init(&p->scx.dsq_list.node);
1826
1827 /*
1828 * When dispatching directly from the BPF scheduler to a local
1829 * DSQ, the task isn't associated with any DSQ but
1830 * @p->scx.holding_cpu may be set under the protection of
1831 * %SCX_OPSS_DISPATCHING.
1832 */
1833 if (p->scx.holding_cpu >= 0)
1834 p->scx.holding_cpu = -1;
1835
1836 return;
1837 }
1838
1839 if (!is_local)
1840 raw_spin_lock(&dsq->lock);
1841
1842 /*
1843 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1844 * change underneath us.
1845 */
1846 if (p->scx.holding_cpu < 0) {
1847 /* @p must still be on @dsq, dequeue */
1848 task_unlink_from_dsq(p, dsq);
1849 } else {
1850 /*
1851 * We're racing against dispatch_to_local_dsq() which already
1852 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1853 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1854 * the race.
1855 */
1856 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1857 p->scx.holding_cpu = -1;
1858 }
1859 p->scx.dsq = NULL;
1860
1861 if (!is_local)
1862 raw_spin_unlock(&dsq->lock);
1863}
1864
1865static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1866 struct task_struct *p)
1867{
1868 struct scx_dispatch_q *dsq;
1869
1870 if (dsq_id == SCX_DSQ_LOCAL)
1871 return &rq->scx.local_dsq;
1872
1873 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1874 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1875
1876 if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1877 return find_global_dsq(p);
1878
1879 return &cpu_rq(cpu)->scx.local_dsq;
1880 }
1881
1882 if (dsq_id == SCX_DSQ_GLOBAL)
1883 dsq = find_global_dsq(p);
1884 else
1885 dsq = find_user_dsq(dsq_id);
1886
1887 if (unlikely(!dsq)) {
1888 scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1889 dsq_id, p->comm, p->pid);
1890 return find_global_dsq(p);
1891 }
1892
1893 return dsq;
1894}
1895
1896static void mark_direct_dispatch(struct task_struct *ddsp_task,
1897 struct task_struct *p, u64 dsq_id,
1898 u64 enq_flags)
1899{
1900 /*
1901 * Mark that dispatch already happened from ops.select_cpu() or
1902 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1903 * which can never match a valid task pointer.
1904 */
1905 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1906
1907 /* @p must match the task on the enqueue path */
1908 if (unlikely(p != ddsp_task)) {
1909 if (IS_ERR(ddsp_task))
1910 scx_ops_error("%s[%d] already direct-dispatched",
1911 p->comm, p->pid);
1912 else
1913 scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1914 ddsp_task->comm, ddsp_task->pid,
1915 p->comm, p->pid);
1916 return;
1917 }
1918
1919 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1920 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1921
1922 p->scx.ddsp_dsq_id = dsq_id;
1923 p->scx.ddsp_enq_flags = enq_flags;
1924}
1925
1926static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1927{
1928 struct rq *rq = task_rq(p);
1929 struct scx_dispatch_q *dsq =
1930 find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1931
1932 touch_core_sched_dispatch(rq, p);
1933
1934 p->scx.ddsp_enq_flags |= enq_flags;
1935
1936 /*
1937 * We are in the enqueue path with @rq locked and pinned, and thus can't
1938 * double lock a remote rq and enqueue to its local DSQ. For
1939 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1940 * the enqueue so that it's executed when @rq can be unlocked.
1941 */
1942 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1943 unsigned long opss;
1944
1945 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1946
1947 switch (opss & SCX_OPSS_STATE_MASK) {
1948 case SCX_OPSS_NONE:
1949 break;
1950 case SCX_OPSS_QUEUEING:
1951 /*
1952 * As @p was never passed to the BPF side, _release is
1953 * not strictly necessary. Still do it for consistency.
1954 */
1955 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1956 break;
1957 default:
1958 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1959 p->comm, p->pid, opss);
1960 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1961 break;
1962 }
1963
1964 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1965 list_add_tail(&p->scx.dsq_list.node,
1966 &rq->scx.ddsp_deferred_locals);
1967 schedule_deferred(rq);
1968 return;
1969 }
1970
1971 dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1972}
1973
1974static bool scx_rq_online(struct rq *rq)
1975{
1976 /*
1977 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1978 * the online state as seen from the BPF scheduler. cpu_active() test
1979 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1980 * stay set until the current scheduling operation is complete even if
1981 * we aren't locking @rq.
1982 */
1983 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1984}
1985
1986static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1987 int sticky_cpu)
1988{
1989 struct task_struct **ddsp_taskp;
1990 unsigned long qseq;
1991
1992 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1993
1994 /* rq migration */
1995 if (sticky_cpu == cpu_of(rq))
1996 goto local_norefill;
1997
1998 /*
1999 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2000 * is offline and are just running the hotplug path. Don't bother the
2001 * BPF scheduler.
2002 */
2003 if (!scx_rq_online(rq))
2004 goto local;
2005
2006 if (scx_rq_bypassing(rq))
2007 goto global;
2008
2009 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2010 goto direct;
2011
2012 /* see %SCX_OPS_ENQ_EXITING */
2013 if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
2014 unlikely(p->flags & PF_EXITING))
2015 goto local;
2016
2017 if (!SCX_HAS_OP(enqueue))
2018 goto global;
2019
2020 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2021 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2022
2023 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2024 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2025
2026 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2027 WARN_ON_ONCE(*ddsp_taskp);
2028 *ddsp_taskp = p;
2029
2030 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
2031
2032 *ddsp_taskp = NULL;
2033 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2034 goto direct;
2035
2036 /*
2037 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2038 * dequeue may be waiting. The store_release matches their load_acquire.
2039 */
2040 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2041 return;
2042
2043direct:
2044 direct_dispatch(p, enq_flags);
2045 return;
2046
2047local:
2048 /*
2049 * For task-ordering, slice refill must be treated as implying the end
2050 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2051 * higher priority it becomes from scx_prio_less()'s POV.
2052 */
2053 touch_core_sched(rq, p);
2054 p->scx.slice = SCX_SLICE_DFL;
2055local_norefill:
2056 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2057 return;
2058
2059global:
2060 touch_core_sched(rq, p); /* see the comment in local: */
2061 p->scx.slice = SCX_SLICE_DFL;
2062 dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2063}
2064
2065static bool task_runnable(const struct task_struct *p)
2066{
2067 return !list_empty(&p->scx.runnable_node);
2068}
2069
2070static void set_task_runnable(struct rq *rq, struct task_struct *p)
2071{
2072 lockdep_assert_rq_held(rq);
2073
2074 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2075 p->scx.runnable_at = jiffies;
2076 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2077 }
2078
2079 /*
2080 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2081 * appened to the runnable_list.
2082 */
2083 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2084}
2085
2086static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2087{
2088 list_del_init(&p->scx.runnable_node);
2089 if (reset_runnable_at)
2090 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2091}
2092
2093static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2094{
2095 int sticky_cpu = p->scx.sticky_cpu;
2096
2097 if (enq_flags & ENQUEUE_WAKEUP)
2098 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2099
2100 enq_flags |= rq->scx.extra_enq_flags;
2101
2102 if (sticky_cpu >= 0)
2103 p->scx.sticky_cpu = -1;
2104
2105 /*
2106 * Restoring a running task will be immediately followed by
2107 * set_next_task_scx() which expects the task to not be on the BPF
2108 * scheduler as tasks can only start running through local DSQs. Force
2109 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2110 */
2111 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2112 sticky_cpu = cpu_of(rq);
2113
2114 if (p->scx.flags & SCX_TASK_QUEUED) {
2115 WARN_ON_ONCE(!task_runnable(p));
2116 goto out;
2117 }
2118
2119 set_task_runnable(rq, p);
2120 p->scx.flags |= SCX_TASK_QUEUED;
2121 rq->scx.nr_running++;
2122 add_nr_running(rq, 1);
2123
2124 if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2125 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2126
2127 if (enq_flags & SCX_ENQ_WAKEUP)
2128 touch_core_sched(rq, p);
2129
2130 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2131out:
2132 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2133}
2134
2135static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2136{
2137 unsigned long opss;
2138
2139 /* dequeue is always temporary, don't reset runnable_at */
2140 clr_task_runnable(p, false);
2141
2142 /* acquire ensures that we see the preceding updates on QUEUED */
2143 opss = atomic_long_read_acquire(&p->scx.ops_state);
2144
2145 switch (opss & SCX_OPSS_STATE_MASK) {
2146 case SCX_OPSS_NONE:
2147 break;
2148 case SCX_OPSS_QUEUEING:
2149 /*
2150 * QUEUEING is started and finished while holding @p's rq lock.
2151 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2152 */
2153 BUG();
2154 case SCX_OPSS_QUEUED:
2155 if (SCX_HAS_OP(dequeue))
2156 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2157
2158 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2159 SCX_OPSS_NONE))
2160 break;
2161 fallthrough;
2162 case SCX_OPSS_DISPATCHING:
2163 /*
2164 * If @p is being dispatched from the BPF scheduler to a DSQ,
2165 * wait for the transfer to complete so that @p doesn't get
2166 * added to its DSQ after dequeueing is complete.
2167 *
2168 * As we're waiting on DISPATCHING with the rq locked, the
2169 * dispatching side shouldn't try to lock the rq while
2170 * DISPATCHING is set. See dispatch_to_local_dsq().
2171 *
2172 * DISPATCHING shouldn't have qseq set and control can reach
2173 * here with NONE @opss from the above QUEUED case block.
2174 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2175 */
2176 wait_ops_state(p, SCX_OPSS_DISPATCHING);
2177 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2178 break;
2179 }
2180}
2181
2182static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2183{
2184 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2185 WARN_ON_ONCE(task_runnable(p));
2186 return true;
2187 }
2188
2189 ops_dequeue(p, deq_flags);
2190
2191 /*
2192 * A currently running task which is going off @rq first gets dequeued
2193 * and then stops running. As we want running <-> stopping transitions
2194 * to be contained within runnable <-> quiescent transitions, trigger
2195 * ->stopping() early here instead of in put_prev_task_scx().
2196 *
2197 * @p may go through multiple stopping <-> running transitions between
2198 * here and put_prev_task_scx() if task attribute changes occur while
2199 * balance_scx() leaves @rq unlocked. However, they don't contain any
2200 * information meaningful to the BPF scheduler and can be suppressed by
2201 * skipping the callbacks if the task is !QUEUED.
2202 */
2203 if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2204 update_curr_scx(rq);
2205 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2206 }
2207
2208 if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2209 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2210
2211 if (deq_flags & SCX_DEQ_SLEEP)
2212 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2213 else
2214 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2215
2216 p->scx.flags &= ~SCX_TASK_QUEUED;
2217 rq->scx.nr_running--;
2218 sub_nr_running(rq, 1);
2219
2220 dispatch_dequeue(rq, p);
2221 return true;
2222}
2223
2224static void yield_task_scx(struct rq *rq)
2225{
2226 struct task_struct *p = rq->curr;
2227
2228 if (SCX_HAS_OP(yield))
2229 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2230 else
2231 p->scx.slice = 0;
2232}
2233
2234static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2235{
2236 struct task_struct *from = rq->curr;
2237
2238 if (SCX_HAS_OP(yield))
2239 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2240 else
2241 return false;
2242}
2243
2244static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2245 struct scx_dispatch_q *src_dsq,
2246 struct rq *dst_rq)
2247{
2248 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2249
2250 /* @dsq is locked and @p is on @dst_rq */
2251 lockdep_assert_held(&src_dsq->lock);
2252 lockdep_assert_rq_held(dst_rq);
2253
2254 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2255
2256 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2257 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2258 else
2259 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2260
2261 dsq_mod_nr(dst_dsq, 1);
2262 p->scx.dsq = dst_dsq;
2263}
2264
2265#ifdef CONFIG_SMP
2266/**
2267 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2268 * @p: task to move
2269 * @enq_flags: %SCX_ENQ_*
2270 * @src_rq: rq to move the task from, locked on entry, released on return
2271 * @dst_rq: rq to move the task into, locked on return
2272 *
2273 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2274 */
2275static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2276 struct rq *src_rq, struct rq *dst_rq)
2277{
2278 lockdep_assert_rq_held(src_rq);
2279
2280 /* the following marks @p MIGRATING which excludes dequeue */
2281 deactivate_task(src_rq, p, 0);
2282 set_task_cpu(p, cpu_of(dst_rq));
2283 p->scx.sticky_cpu = cpu_of(dst_rq);
2284
2285 raw_spin_rq_unlock(src_rq);
2286 raw_spin_rq_lock(dst_rq);
2287
2288 /*
2289 * We want to pass scx-specific enq_flags but activate_task() will
2290 * truncate the upper 32 bit. As we own @rq, we can pass them through
2291 * @rq->scx.extra_enq_flags instead.
2292 */
2293 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2294 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2295 dst_rq->scx.extra_enq_flags = enq_flags;
2296 activate_task(dst_rq, p, 0);
2297 dst_rq->scx.extra_enq_flags = 0;
2298}
2299
2300/*
2301 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2302 * differences:
2303 *
2304 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2305 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2306 * this CPU?".
2307 *
2308 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2309 * must be allowed to finish on the CPU that it's currently on regardless of
2310 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2311 * BPF scheduler shouldn't attempt to migrate a task which has migration
2312 * disabled.
2313 *
2314 * - The BPF scheduler is bypassed while the rq is offline and we can always say
2315 * no to the BPF scheduler initiated migrations while offline.
2316 *
2317 * The caller must ensure that @p and @rq are on different CPUs.
2318 */
2319static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2320 bool trigger_error)
2321{
2322 int cpu = cpu_of(rq);
2323
2324 SCHED_WARN_ON(task_cpu(p) == cpu);
2325
2326 /*
2327 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2328 * the pinned CPU in migrate_disable_switch() while @p is being switched
2329 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2330 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2331 * @p passing the below task_allowed_on_cpu() check while migration is
2332 * disabled.
2333 *
2334 * Test the migration disabled state first as the race window is narrow
2335 * and the BPF scheduler failing to check migration disabled state can
2336 * easily be masked if task_allowed_on_cpu() is done first.
2337 */
2338 if (unlikely(is_migration_disabled(p))) {
2339 if (trigger_error)
2340 scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2341 p->comm, p->pid, task_cpu(p), cpu);
2342 return false;
2343 }
2344
2345 /*
2346 * We don't require the BPF scheduler to avoid dispatching to offline
2347 * CPUs mostly for convenience but also because CPUs can go offline
2348 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2349 * picked CPU is outside the allowed mask.
2350 */
2351 if (!task_allowed_on_cpu(p, cpu)) {
2352 if (trigger_error)
2353 scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2354 cpu, p->comm, p->pid);
2355 return false;
2356 }
2357
2358 if (!scx_rq_online(rq))
2359 return false;
2360
2361 return true;
2362}
2363
2364/**
2365 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2366 * @p: target task
2367 * @dsq: locked DSQ @p is currently on
2368 * @src_rq: rq @p is currently on, stable with @dsq locked
2369 *
2370 * Called with @dsq locked but no rq's locked. We want to move @p to a different
2371 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2372 * required when transferring into a local DSQ. Even when transferring into a
2373 * non-local DSQ, it's better to use the same mechanism to protect against
2374 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2375 * @src_rq is locked, which e.g. scx_dump_task() depends on.
2376 *
2377 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2378 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2379 * this may race with dequeue, which can't drop the rq lock or fail, do a little
2380 * dancing from our side.
2381 *
2382 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2383 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2384 * would be cleared to -1. While other cpus may have updated it to different
2385 * values afterwards, as this operation can't be preempted or recurse, the
2386 * holding_cpu can never become this CPU again before we're done. Thus, we can
2387 * tell whether we lost to dequeue by testing whether the holding_cpu still
2388 * points to this CPU. See dispatch_dequeue() for the counterpart.
2389 *
2390 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2391 * still valid. %false if lost to dequeue.
2392 */
2393static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2394 struct scx_dispatch_q *dsq,
2395 struct rq *src_rq)
2396{
2397 s32 cpu = raw_smp_processor_id();
2398
2399 lockdep_assert_held(&dsq->lock);
2400
2401 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2402 task_unlink_from_dsq(p, dsq);
2403 p->scx.holding_cpu = cpu;
2404
2405 raw_spin_unlock(&dsq->lock);
2406 raw_spin_rq_lock(src_rq);
2407
2408 /* task_rq couldn't have changed if we're still the holding cpu */
2409 return likely(p->scx.holding_cpu == cpu) &&
2410 !WARN_ON_ONCE(src_rq != task_rq(p));
2411}
2412
2413static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2414 struct scx_dispatch_q *dsq, struct rq *src_rq)
2415{
2416 raw_spin_rq_unlock(this_rq);
2417
2418 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2419 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2420 return true;
2421 } else {
2422 raw_spin_rq_unlock(src_rq);
2423 raw_spin_rq_lock(this_rq);
2424 return false;
2425 }
2426}
2427#else /* CONFIG_SMP */
2428static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
2429static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
2430static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2431#endif /* CONFIG_SMP */
2432
2433/**
2434 * move_task_between_dsqs() - Move a task from one DSQ to another
2435 * @p: target task
2436 * @enq_flags: %SCX_ENQ_*
2437 * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2438 * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2439 *
2440 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2441 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2442 * will change. As @p's task_rq is locked, this function doesn't need to use the
2443 * holding_cpu mechanism.
2444 *
2445 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2446 * return value, is locked.
2447 */
2448static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
2449 struct scx_dispatch_q *src_dsq,
2450 struct scx_dispatch_q *dst_dsq)
2451{
2452 struct rq *src_rq = task_rq(p), *dst_rq;
2453
2454 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2455 lockdep_assert_held(&src_dsq->lock);
2456 lockdep_assert_rq_held(src_rq);
2457
2458 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2459 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2460 if (src_rq != dst_rq &&
2461 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2462 dst_dsq = find_global_dsq(p);
2463 dst_rq = src_rq;
2464 }
2465 } else {
2466 /* no need to migrate if destination is a non-local DSQ */
2467 dst_rq = src_rq;
2468 }
2469
2470 /*
2471 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2472 * CPU, @p will be migrated.
2473 */
2474 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2475 /* @p is going from a non-local DSQ to a local DSQ */
2476 if (src_rq == dst_rq) {
2477 task_unlink_from_dsq(p, src_dsq);
2478 move_local_task_to_local_dsq(p, enq_flags,
2479 src_dsq, dst_rq);
2480 raw_spin_unlock(&src_dsq->lock);
2481 } else {
2482 raw_spin_unlock(&src_dsq->lock);
2483 move_remote_task_to_local_dsq(p, enq_flags,
2484 src_rq, dst_rq);
2485 }
2486 } else {
2487 /*
2488 * @p is going from a non-local DSQ to a non-local DSQ. As
2489 * $src_dsq is already locked, do an abbreviated dequeue.
2490 */
2491 task_unlink_from_dsq(p, src_dsq);
2492 p->scx.dsq = NULL;
2493 raw_spin_unlock(&src_dsq->lock);
2494
2495 dispatch_enqueue(dst_dsq, p, enq_flags);
2496 }
2497
2498 return dst_rq;
2499}
2500
2501/*
2502 * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2503 * banging on the same DSQ on a large NUMA system to the point where switching
2504 * to the bypass mode can take a long time. Inject artifical delays while the
2505 * bypass mode is switching to guarantee timely completion.
2506 */
2507static void scx_ops_breather(struct rq *rq)
2508{
2509 u64 until;
2510
2511 lockdep_assert_rq_held(rq);
2512
2513 if (likely(!atomic_read(&scx_ops_breather_depth)))
2514 return;
2515
2516 raw_spin_rq_unlock(rq);
2517
2518 until = ktime_get_ns() + NSEC_PER_MSEC;
2519
2520 do {
2521 int cnt = 1024;
2522 while (atomic_read(&scx_ops_breather_depth) && --cnt)
2523 cpu_relax();
2524 } while (atomic_read(&scx_ops_breather_depth) &&
2525 time_before64(ktime_get_ns(), until));
2526
2527 raw_spin_rq_lock(rq);
2528}
2529
2530static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2531{
2532 struct task_struct *p;
2533retry:
2534 /*
2535 * This retry loop can repeatedly race against scx_ops_bypass()
2536 * dequeueing tasks from @dsq trying to put the system into the bypass
2537 * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can
2538 * live-lock the machine into soft lockups. Give a breather.
2539 */
2540 scx_ops_breather(rq);
2541
2542 /*
2543 * The caller can't expect to successfully consume a task if the task's
2544 * addition to @dsq isn't guaranteed to be visible somehow. Test
2545 * @dsq->list without locking and skip if it seems empty.
2546 */
2547 if (list_empty(&dsq->list))
2548 return false;
2549
2550 raw_spin_lock(&dsq->lock);
2551
2552 nldsq_for_each_task(p, dsq) {
2553 struct rq *task_rq = task_rq(p);
2554
2555 if (rq == task_rq) {
2556 task_unlink_from_dsq(p, dsq);
2557 move_local_task_to_local_dsq(p, 0, dsq, rq);
2558 raw_spin_unlock(&dsq->lock);
2559 return true;
2560 }
2561
2562 if (task_can_run_on_remote_rq(p, rq, false)) {
2563 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2564 return true;
2565 goto retry;
2566 }
2567 }
2568
2569 raw_spin_unlock(&dsq->lock);
2570 return false;
2571}
2572
2573static bool consume_global_dsq(struct rq *rq)
2574{
2575 int node = cpu_to_node(cpu_of(rq));
2576
2577 return consume_dispatch_q(rq, global_dsqs[node]);
2578}
2579
2580/**
2581 * dispatch_to_local_dsq - Dispatch a task to a local dsq
2582 * @rq: current rq which is locked
2583 * @dst_dsq: destination DSQ
2584 * @p: task to dispatch
2585 * @enq_flags: %SCX_ENQ_*
2586 *
2587 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2588 * DSQ. This function performs all the synchronization dancing needed because
2589 * local DSQs are protected with rq locks.
2590 *
2591 * The caller must have exclusive ownership of @p (e.g. through
2592 * %SCX_OPSS_DISPATCHING).
2593 */
2594static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2595 struct task_struct *p, u64 enq_flags)
2596{
2597 struct rq *src_rq = task_rq(p);
2598 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2599#ifdef CONFIG_SMP
2600 struct rq *locked_rq = rq;
2601#endif
2602
2603 /*
2604 * We're synchronized against dequeue through DISPATCHING. As @p can't
2605 * be dequeued, its task_rq and cpus_allowed are stable too.
2606 *
2607 * If dispatching to @rq that @p is already on, no lock dancing needed.
2608 */
2609 if (rq == src_rq && rq == dst_rq) {
2610 dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2611 return;
2612 }
2613
2614#ifdef CONFIG_SMP
2615 if (src_rq != dst_rq &&
2616 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2617 dispatch_enqueue(find_global_dsq(p), p,
2618 enq_flags | SCX_ENQ_CLEAR_OPSS);
2619 return;
2620 }
2621
2622 /*
2623 * @p is on a possibly remote @src_rq which we need to lock to move the
2624 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2625 * on DISPATCHING, so we can't grab @src_rq lock while holding
2626 * DISPATCHING.
2627 *
2628 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2629 * we're moving from a DSQ and use the same mechanism - mark the task
2630 * under transfer with holding_cpu, release DISPATCHING and then follow
2631 * the same protocol. See unlink_dsq_and_lock_src_rq().
2632 */
2633 p->scx.holding_cpu = raw_smp_processor_id();
2634
2635 /* store_release ensures that dequeue sees the above */
2636 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2637
2638 /* switch to @src_rq lock */
2639 if (locked_rq != src_rq) {
2640 raw_spin_rq_unlock(locked_rq);
2641 locked_rq = src_rq;
2642 raw_spin_rq_lock(src_rq);
2643 }
2644
2645 /* task_rq couldn't have changed if we're still the holding cpu */
2646 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2647 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2648 /*
2649 * If @p is staying on the same rq, there's no need to go
2650 * through the full deactivate/activate cycle. Optimize by
2651 * abbreviating move_remote_task_to_local_dsq().
2652 */
2653 if (src_rq == dst_rq) {
2654 p->scx.holding_cpu = -1;
2655 dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2656 } else {
2657 move_remote_task_to_local_dsq(p, enq_flags,
2658 src_rq, dst_rq);
2659 /* task has been moved to dst_rq, which is now locked */
2660 locked_rq = dst_rq;
2661 }
2662
2663 /* if the destination CPU is idle, wake it up */
2664 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2665 resched_curr(dst_rq);
2666 }
2667
2668 /* switch back to @rq lock */
2669 if (locked_rq != rq) {
2670 raw_spin_rq_unlock(locked_rq);
2671 raw_spin_rq_lock(rq);
2672 }
2673#else /* CONFIG_SMP */
2674 BUG(); /* control can not reach here on UP */
2675#endif /* CONFIG_SMP */
2676}
2677
2678/**
2679 * finish_dispatch - Asynchronously finish dispatching a task
2680 * @rq: current rq which is locked
2681 * @p: task to finish dispatching
2682 * @qseq_at_dispatch: qseq when @p started getting dispatched
2683 * @dsq_id: destination DSQ ID
2684 * @enq_flags: %SCX_ENQ_*
2685 *
2686 * Dispatching to local DSQs may need to wait for queueing to complete or
2687 * require rq lock dancing. As we don't wanna do either while inside
2688 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2689 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2690 * task and its qseq. Once ops.dispatch() returns, this function is called to
2691 * finish up.
2692 *
2693 * There is no guarantee that @p is still valid for dispatching or even that it
2694 * was valid in the first place. Make sure that the task is still owned by the
2695 * BPF scheduler and claim the ownership before dispatching.
2696 */
2697static void finish_dispatch(struct rq *rq, struct task_struct *p,
2698 unsigned long qseq_at_dispatch,
2699 u64 dsq_id, u64 enq_flags)
2700{
2701 struct scx_dispatch_q *dsq;
2702 unsigned long opss;
2703
2704 touch_core_sched_dispatch(rq, p);
2705retry:
2706 /*
2707 * No need for _acquire here. @p is accessed only after a successful
2708 * try_cmpxchg to DISPATCHING.
2709 */
2710 opss = atomic_long_read(&p->scx.ops_state);
2711
2712 switch (opss & SCX_OPSS_STATE_MASK) {
2713 case SCX_OPSS_DISPATCHING:
2714 case SCX_OPSS_NONE:
2715 /* someone else already got to it */
2716 return;
2717 case SCX_OPSS_QUEUED:
2718 /*
2719 * If qseq doesn't match, @p has gone through at least one
2720 * dispatch/dequeue and re-enqueue cycle between
2721 * scx_bpf_dsq_insert() and here and we have no claim on it.
2722 */
2723 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2724 return;
2725
2726 /*
2727 * While we know @p is accessible, we don't yet have a claim on
2728 * it - the BPF scheduler is allowed to dispatch tasks
2729 * spuriously and there can be a racing dequeue attempt. Let's
2730 * claim @p by atomically transitioning it from QUEUED to
2731 * DISPATCHING.
2732 */
2733 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2734 SCX_OPSS_DISPATCHING)))
2735 break;
2736 goto retry;
2737 case SCX_OPSS_QUEUEING:
2738 /*
2739 * do_enqueue_task() is in the process of transferring the task
2740 * to the BPF scheduler while holding @p's rq lock. As we aren't
2741 * holding any kernel or BPF resource that the enqueue path may
2742 * depend upon, it's safe to wait.
2743 */
2744 wait_ops_state(p, opss);
2745 goto retry;
2746 }
2747
2748 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2749
2750 dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2751
2752 if (dsq->id == SCX_DSQ_LOCAL)
2753 dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2754 else
2755 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2756}
2757
2758static void flush_dispatch_buf(struct rq *rq)
2759{
2760 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2761 u32 u;
2762
2763 for (u = 0; u < dspc->cursor; u++) {
2764 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2765
2766 finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2767 ent->enq_flags);
2768 }
2769
2770 dspc->nr_tasks += dspc->cursor;
2771 dspc->cursor = 0;
2772}
2773
2774static int balance_one(struct rq *rq, struct task_struct *prev)
2775{
2776 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2777 bool prev_on_scx = prev->sched_class == &ext_sched_class;
2778 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2779 int nr_loops = SCX_DSP_MAX_LOOPS;
2780
2781 lockdep_assert_rq_held(rq);
2782 rq->scx.flags |= SCX_RQ_IN_BALANCE;
2783 rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2784
2785 if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2786 unlikely(rq->scx.cpu_released)) {
2787 /*
2788 * If the previous sched_class for the current CPU was not SCX,
2789 * notify the BPF scheduler that it again has control of the
2790 * core. This callback complements ->cpu_release(), which is
2791 * emitted in switch_class().
2792 */
2793 if (SCX_HAS_OP(cpu_acquire))
2794 SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
2795 rq->scx.cpu_released = false;
2796 }
2797
2798 if (prev_on_scx) {
2799 update_curr_scx(rq);
2800
2801 /*
2802 * If @prev is runnable & has slice left, it has priority and
2803 * fetching more just increases latency for the fetched tasks.
2804 * Tell pick_task_scx() to keep running @prev. If the BPF
2805 * scheduler wants to handle this explicitly, it should
2806 * implement ->cpu_release().
2807 *
2808 * See scx_ops_disable_workfn() for the explanation on the
2809 * bypassing test.
2810 */
2811 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2812 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2813 goto has_tasks;
2814 }
2815 }
2816
2817 /* if there already are tasks to run, nothing to do */
2818 if (rq->scx.local_dsq.nr)
2819 goto has_tasks;
2820
2821 if (consume_global_dsq(rq))
2822 goto has_tasks;
2823
2824 if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2825 goto no_tasks;
2826
2827 dspc->rq = rq;
2828
2829 /*
2830 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2831 * the local DSQ might still end up empty after a successful
2832 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2833 * produced some tasks, retry. The BPF scheduler may depend on this
2834 * looping behavior to simplify its implementation.
2835 */
2836 do {
2837 dspc->nr_tasks = 0;
2838
2839 SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2840 prev_on_scx ? prev : NULL);
2841
2842 flush_dispatch_buf(rq);
2843
2844 if (prev_on_rq && prev->scx.slice) {
2845 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2846 goto has_tasks;
2847 }
2848 if (rq->scx.local_dsq.nr)
2849 goto has_tasks;
2850 if (consume_global_dsq(rq))
2851 goto has_tasks;
2852
2853 /*
2854 * ops.dispatch() can trap us in this loop by repeatedly
2855 * dispatching ineligible tasks. Break out once in a while to
2856 * allow the watchdog to run. As IRQ can't be enabled in
2857 * balance(), we want to complete this scheduling cycle and then
2858 * start a new one. IOW, we want to call resched_curr() on the
2859 * next, most likely idle, task, not the current one. Use
2860 * scx_bpf_kick_cpu() for deferred kicking.
2861 */
2862 if (unlikely(!--nr_loops)) {
2863 scx_bpf_kick_cpu(cpu_of(rq), 0);
2864 break;
2865 }
2866 } while (dspc->nr_tasks);
2867
2868no_tasks:
2869 /*
2870 * Didn't find another task to run. Keep running @prev unless
2871 * %SCX_OPS_ENQ_LAST is in effect.
2872 */
2873 if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
2874 scx_rq_bypassing(rq))) {
2875 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2876 goto has_tasks;
2877 }
2878 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2879 return false;
2880
2881has_tasks:
2882 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2883 return true;
2884}
2885
2886static int balance_scx(struct rq *rq, struct task_struct *prev,
2887 struct rq_flags *rf)
2888{
2889 int ret;
2890
2891 rq_unpin_lock(rq, rf);
2892
2893 ret = balance_one(rq, prev);
2894
2895#ifdef CONFIG_SCHED_SMT
2896 /*
2897 * When core-sched is enabled, this ops.balance() call will be followed
2898 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2899 * siblings too.
2900 */
2901 if (sched_core_enabled(rq)) {
2902 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2903 int scpu;
2904
2905 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2906 struct rq *srq = cpu_rq(scpu);
2907 struct task_struct *sprev = srq->curr;
2908
2909 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2910 update_rq_clock(srq);
2911 balance_one(srq, sprev);
2912 }
2913 }
2914#endif
2915 rq_repin_lock(rq, rf);
2916
2917 return ret;
2918}
2919
2920static void process_ddsp_deferred_locals(struct rq *rq)
2921{
2922 struct task_struct *p;
2923
2924 lockdep_assert_rq_held(rq);
2925
2926 /*
2927 * Now that @rq can be unlocked, execute the deferred enqueueing of
2928 * tasks directly dispatched to the local DSQs of other CPUs. See
2929 * direct_dispatch(). Keep popping from the head instead of using
2930 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2931 * temporarily.
2932 */
2933 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2934 struct task_struct, scx.dsq_list.node))) {
2935 struct scx_dispatch_q *dsq;
2936
2937 list_del_init(&p->scx.dsq_list.node);
2938
2939 dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2940 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2941 dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2942 }
2943}
2944
2945static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2946{
2947 if (p->scx.flags & SCX_TASK_QUEUED) {
2948 /*
2949 * Core-sched might decide to execute @p before it is
2950 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2951 */
2952 ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2953 dispatch_dequeue(rq, p);
2954 }
2955
2956 p->se.exec_start = rq_clock_task(rq);
2957
2958 /* see dequeue_task_scx() on why we skip when !QUEUED */
2959 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2960 SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2961
2962 clr_task_runnable(p, true);
2963
2964 /*
2965 * @p is getting newly scheduled or got kicked after someone updated its
2966 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2967 */
2968 if ((p->scx.slice == SCX_SLICE_INF) !=
2969 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2970 if (p->scx.slice == SCX_SLICE_INF)
2971 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2972 else
2973 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2974
2975 sched_update_tick_dependency(rq);
2976
2977 /*
2978 * For now, let's refresh the load_avgs just when transitioning
2979 * in and out of nohz. In the future, we might want to add a
2980 * mechanism which calls the following periodically on
2981 * tick-stopped CPUs.
2982 */
2983 update_other_load_avgs(rq);
2984 }
2985}
2986
2987static enum scx_cpu_preempt_reason
2988preempt_reason_from_class(const struct sched_class *class)
2989{
2990#ifdef CONFIG_SMP
2991 if (class == &stop_sched_class)
2992 return SCX_CPU_PREEMPT_STOP;
2993#endif
2994 if (class == &dl_sched_class)
2995 return SCX_CPU_PREEMPT_DL;
2996 if (class == &rt_sched_class)
2997 return SCX_CPU_PREEMPT_RT;
2998 return SCX_CPU_PREEMPT_UNKNOWN;
2999}
3000
3001static void switch_class(struct rq *rq, struct task_struct *next)
3002{
3003 const struct sched_class *next_class = next->sched_class;
3004
3005#ifdef CONFIG_SMP
3006 /*
3007 * Pairs with the smp_load_acquire() issued by a CPU in
3008 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
3009 * resched.
3010 */
3011 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
3012#endif
3013 if (!static_branch_unlikely(&scx_ops_cpu_preempt))
3014 return;
3015
3016 /*
3017 * The callback is conceptually meant to convey that the CPU is no
3018 * longer under the control of SCX. Therefore, don't invoke the callback
3019 * if the next class is below SCX (in which case the BPF scheduler has
3020 * actively decided not to schedule any tasks on the CPU).
3021 */
3022 if (sched_class_above(&ext_sched_class, next_class))
3023 return;
3024
3025 /*
3026 * At this point we know that SCX was preempted by a higher priority
3027 * sched_class, so invoke the ->cpu_release() callback if we have not
3028 * done so already. We only send the callback once between SCX being
3029 * preempted, and it regaining control of the CPU.
3030 *
3031 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3032 * next time that balance_scx() is invoked.
3033 */
3034 if (!rq->scx.cpu_released) {
3035 if (SCX_HAS_OP(cpu_release)) {
3036 struct scx_cpu_release_args args = {
3037 .reason = preempt_reason_from_class(next_class),
3038 .task = next,
3039 };
3040
3041 SCX_CALL_OP(SCX_KF_CPU_RELEASE,
3042 cpu_release, cpu_of(rq), &args);
3043 }
3044 rq->scx.cpu_released = true;
3045 }
3046}
3047
3048static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3049 struct task_struct *next)
3050{
3051 update_curr_scx(rq);
3052
3053 /* see dequeue_task_scx() on why we skip when !QUEUED */
3054 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3055 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
3056
3057 if (p->scx.flags & SCX_TASK_QUEUED) {
3058 set_task_runnable(rq, p);
3059
3060 /*
3061 * If @p has slice left and is being put, @p is getting
3062 * preempted by a higher priority scheduler class or core-sched
3063 * forcing a different task. Leave it at the head of the local
3064 * DSQ.
3065 */
3066 if (p->scx.slice && !scx_rq_bypassing(rq)) {
3067 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3068 goto switch_class;
3069 }
3070
3071 /*
3072 * If @p is runnable but we're about to enter a lower
3073 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3074 * ops.enqueue() that @p is the only one available for this cpu,
3075 * which should trigger an explicit follow-up scheduling event.
3076 */
3077 if (sched_class_above(&ext_sched_class, next->sched_class)) {
3078 WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
3079 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3080 } else {
3081 do_enqueue_task(rq, p, 0, -1);
3082 }
3083 }
3084
3085switch_class:
3086 if (next && next->sched_class != &ext_sched_class)
3087 switch_class(rq, next);
3088}
3089
3090static struct task_struct *first_local_task(struct rq *rq)
3091{
3092 return list_first_entry_or_null(&rq->scx.local_dsq.list,
3093 struct task_struct, scx.dsq_list.node);
3094}
3095
3096static struct task_struct *pick_task_scx(struct rq *rq)
3097{
3098 struct task_struct *prev = rq->curr;
3099 struct task_struct *p;
3100 bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3101 bool kick_idle = false;
3102
3103 /*
3104 * WORKAROUND:
3105 *
3106 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3107 * have gone through balance_scx(). Unfortunately, there currently is a
3108 * bug where fair could say yes on balance() but no on pick_task(),
3109 * which then ends up calling pick_task_scx() without preceding
3110 * balance_scx().
3111 *
3112 * Keep running @prev if possible and avoid stalling from entering idle
3113 * without balancing.
3114 *
3115 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3116 * if pick_task_scx() is called without preceding balance_scx().
3117 */
3118 if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3119 if (prev->scx.flags & SCX_TASK_QUEUED) {
3120 keep_prev = true;
3121 } else {
3122 keep_prev = false;
3123 kick_idle = true;
3124 }
3125 } else if (unlikely(keep_prev &&
3126 prev->sched_class != &ext_sched_class)) {
3127 /*
3128 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
3129 * conditional on scx_enabled() and may have been skipped.
3130 */
3131 WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
3132 keep_prev = false;
3133 }
3134
3135 /*
3136 * If balance_scx() is telling us to keep running @prev, replenish slice
3137 * if necessary and keep running @prev. Otherwise, pop the first one
3138 * from the local DSQ.
3139 */
3140 if (keep_prev) {
3141 p = prev;
3142 if (!p->scx.slice)
3143 p->scx.slice = SCX_SLICE_DFL;
3144 } else {
3145 p = first_local_task(rq);
3146 if (!p) {
3147 if (kick_idle)
3148 scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3149 return NULL;
3150 }
3151
3152 if (unlikely(!p->scx.slice)) {
3153 if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
3154 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3155 p->comm, p->pid, __func__);
3156 scx_warned_zero_slice = true;
3157 }
3158 p->scx.slice = SCX_SLICE_DFL;
3159 }
3160 }
3161
3162 return p;
3163}
3164
3165#ifdef CONFIG_SCHED_CORE
3166/**
3167 * scx_prio_less - Task ordering for core-sched
3168 * @a: task A
3169 * @b: task B
3170 *
3171 * Core-sched is implemented as an additional scheduling layer on top of the
3172 * usual sched_class'es and needs to find out the expected task ordering. For
3173 * SCX, core-sched calls this function to interrogate the task ordering.
3174 *
3175 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3176 * to implement the default task ordering. The older the timestamp, the higher
3177 * prority the task - the global FIFO ordering matching the default scheduling
3178 * behavior.
3179 *
3180 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3181 * implement FIFO ordering within each local DSQ. See pick_task_scx().
3182 */
3183bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3184 bool in_fi)
3185{
3186 /*
3187 * The const qualifiers are dropped from task_struct pointers when
3188 * calling ops.core_sched_before(). Accesses are controlled by the
3189 * verifier.
3190 */
3191 if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
3192 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3193 (struct task_struct *)a,
3194 (struct task_struct *)b);
3195 else
3196 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3197}
3198#endif /* CONFIG_SCHED_CORE */
3199
3200#ifdef CONFIG_SMP
3201
3202static bool test_and_clear_cpu_idle(int cpu)
3203{
3204#ifdef CONFIG_SCHED_SMT
3205 /*
3206 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3207 * cluster is not wholly idle either way. This also prevents
3208 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3209 */
3210 if (sched_smt_active()) {
3211 const struct cpumask *smt = cpu_smt_mask(cpu);
3212
3213 /*
3214 * If offline, @cpu is not its own sibling and
3215 * scx_pick_idle_cpu() can get caught in an infinite loop as
3216 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3217 * is eventually cleared.
3218 */
3219 if (cpumask_intersects(smt, idle_masks.smt))
3220 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3221 else if (cpumask_test_cpu(cpu, idle_masks.smt))
3222 __cpumask_clear_cpu(cpu, idle_masks.smt);
3223 }
3224#endif
3225 return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3226}
3227
3228static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3229{
3230 int cpu;
3231
3232retry:
3233 if (sched_smt_active()) {
3234 cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3235 if (cpu < nr_cpu_ids)
3236 goto found;
3237
3238 if (flags & SCX_PICK_IDLE_CORE)
3239 return -EBUSY;
3240 }
3241
3242 cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3243 if (cpu >= nr_cpu_ids)
3244 return -EBUSY;
3245
3246found:
3247 if (test_and_clear_cpu_idle(cpu))
3248 return cpu;
3249 else
3250 goto retry;
3251}
3252
3253/*
3254 * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
3255 * domain is not defined).
3256 */
3257static unsigned int llc_weight(s32 cpu)
3258{
3259 struct sched_domain *sd;
3260
3261 sd = rcu_dereference(per_cpu(sd_llc, cpu));
3262 if (!sd)
3263 return 0;
3264
3265 return sd->span_weight;
3266}
3267
3268/*
3269 * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
3270 * domain is not defined).
3271 */
3272static struct cpumask *llc_span(s32 cpu)
3273{
3274 struct sched_domain *sd;
3275
3276 sd = rcu_dereference(per_cpu(sd_llc, cpu));
3277 if (!sd)
3278 return 0;
3279
3280 return sched_domain_span(sd);
3281}
3282
3283/*
3284 * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
3285 * NUMA domain is not defined).
3286 */
3287static unsigned int numa_weight(s32 cpu)
3288{
3289 struct sched_domain *sd;
3290 struct sched_group *sg;
3291
3292 sd = rcu_dereference(per_cpu(sd_numa, cpu));
3293 if (!sd)
3294 return 0;
3295 sg = sd->groups;
3296 if (!sg)
3297 return 0;
3298
3299 return sg->group_weight;
3300}
3301
3302/*
3303 * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
3304 * domain is not defined).
3305 */
3306static struct cpumask *numa_span(s32 cpu)
3307{
3308 struct sched_domain *sd;
3309 struct sched_group *sg;
3310
3311 sd = rcu_dereference(per_cpu(sd_numa, cpu));
3312 if (!sd)
3313 return NULL;
3314 sg = sd->groups;
3315 if (!sg)
3316 return NULL;
3317
3318 return sched_group_span(sg);
3319}
3320
3321/*
3322 * Return true if the LLC domains do not perfectly overlap with the NUMA
3323 * domains, false otherwise.
3324 */
3325static bool llc_numa_mismatch(void)
3326{
3327 int cpu;
3328
3329 /*
3330 * We need to scan all online CPUs to verify whether their scheduling
3331 * domains overlap.
3332 *
3333 * While it is rare to encounter architectures with asymmetric NUMA
3334 * topologies, CPU hotplugging or virtualized environments can result
3335 * in asymmetric configurations.
3336 *
3337 * For example:
3338 *
3339 * NUMA 0:
3340 * - LLC 0: cpu0..cpu7
3341 * - LLC 1: cpu8..cpu15 [offline]
3342 *
3343 * NUMA 1:
3344 * - LLC 0: cpu16..cpu23
3345 * - LLC 1: cpu24..cpu31
3346 *
3347 * In this case, if we only check the first online CPU (cpu0), we might
3348 * incorrectly assume that the LLC and NUMA domains are fully
3349 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
3350 * domains).
3351 */
3352 for_each_online_cpu(cpu)
3353 if (llc_weight(cpu) != numa_weight(cpu))
3354 return true;
3355
3356 return false;
3357}
3358
3359/*
3360 * Initialize topology-aware scheduling.
3361 *
3362 * Detect if the system has multiple LLC or multiple NUMA domains and enable
3363 * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
3364 * selection policy.
3365 *
3366 * Assumption: the kernel's internal topology representation assumes that each
3367 * CPU belongs to a single LLC domain, and that each LLC domain is entirely
3368 * contained within a single NUMA node.
3369 */
3370static void update_selcpu_topology(void)
3371{
3372 bool enable_llc = false, enable_numa = false;
3373 unsigned int nr_cpus;
3374 s32 cpu = cpumask_first(cpu_online_mask);
3375
3376 /*
3377 * Enable LLC domain optimization only when there are multiple LLC
3378 * domains among the online CPUs. If all online CPUs are part of a
3379 * single LLC domain, the idle CPU selection logic can choose any
3380 * online CPU without bias.
3381 *
3382 * Note that it is sufficient to check the LLC domain of the first
3383 * online CPU to determine whether a single LLC domain includes all
3384 * CPUs.
3385 */
3386 rcu_read_lock();
3387 nr_cpus = llc_weight(cpu);
3388 if (nr_cpus > 0) {
3389 if (nr_cpus < num_online_cpus())
3390 enable_llc = true;
3391 pr_debug("sched_ext: LLC=%*pb weight=%u\n",
3392 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
3393 }
3394
3395 /*
3396 * Enable NUMA optimization only when there are multiple NUMA domains
3397 * among the online CPUs and the NUMA domains don't perfectly overlaps
3398 * with the LLC domains.
3399 *
3400 * If all CPUs belong to the same NUMA node and the same LLC domain,
3401 * enabling both NUMA and LLC optimizations is unnecessary, as checking
3402 * for an idle CPU in the same domain twice is redundant.
3403 */
3404 nr_cpus = numa_weight(cpu);
3405 if (nr_cpus > 0) {
3406 if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
3407 enable_numa = true;
3408 pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
3409 cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
3410 }
3411 rcu_read_unlock();
3412
3413 pr_debug("sched_ext: LLC idle selection %s\n",
3414 enable_llc ? "enabled" : "disabled");
3415 pr_debug("sched_ext: NUMA idle selection %s\n",
3416 enable_numa ? "enabled" : "disabled");
3417
3418 if (enable_llc)
3419 static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
3420 else
3421 static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
3422 if (enable_numa)
3423 static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
3424 else
3425 static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
3426}
3427
3428/*
3429 * Built-in CPU idle selection policy:
3430 *
3431 * 1. Prioritize full-idle cores:
3432 * - always prioritize CPUs from fully idle cores (both logical CPUs are
3433 * idle) to avoid interference caused by SMT.
3434 *
3435 * 2. Reuse the same CPU:
3436 * - prefer the last used CPU to take advantage of cached data (L1, L2) and
3437 * branch prediction optimizations.
3438 *
3439 * 3. Pick a CPU within the same LLC (Last-Level Cache):
3440 * - if the above conditions aren't met, pick a CPU that shares the same LLC
3441 * to maintain cache locality.
3442 *
3443 * 4. Pick a CPU within the same NUMA node, if enabled:
3444 * - choose a CPU from the same NUMA node to reduce memory access latency.
3445 *
3446 * Step 3 and 4 are performed only if the system has, respectively, multiple
3447 * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
3448 * scx_selcpu_topo_numa).
3449 *
3450 * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
3451 * we never call ops.select_cpu() for them, see select_task_rq().
3452 */
3453static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3454 u64 wake_flags, bool *found)
3455{
3456 const struct cpumask *llc_cpus = NULL;
3457 const struct cpumask *numa_cpus = NULL;
3458 s32 cpu;
3459
3460 *found = false;
3461
3462 /*
3463 * This is necessary to protect llc_cpus.
3464 */
3465 rcu_read_lock();
3466
3467 /*
3468 * Determine the scheduling domain only if the task is allowed to run
3469 * on all CPUs.
3470 *
3471 * This is done primarily for efficiency, as it avoids the overhead of
3472 * updating a cpumask every time we need to select an idle CPU (which
3473 * can be costly in large SMP systems), but it also aligns logically:
3474 * if a task's scheduling domain is restricted by user-space (through
3475 * CPU affinity), the task will simply use the flat scheduling domain
3476 * defined by user-space.
3477 */
3478 if (p->nr_cpus_allowed >= num_possible_cpus()) {
3479 if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
3480 numa_cpus = numa_span(prev_cpu);
3481
3482 if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
3483 llc_cpus = llc_span(prev_cpu);
3484 }
3485
3486 /*
3487 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
3488 */
3489 if (wake_flags & SCX_WAKE_SYNC) {
3490 cpu = smp_processor_id();
3491
3492 /*
3493 * If the waker's CPU is cache affine and prev_cpu is idle,
3494 * then avoid a migration.
3495 */
3496 if (cpus_share_cache(cpu, prev_cpu) &&
3497 test_and_clear_cpu_idle(prev_cpu)) {
3498 cpu = prev_cpu;
3499 goto cpu_found;
3500 }
3501
3502 /*
3503 * If the waker's local DSQ is empty, and the system is under
3504 * utilized, try to wake up @p to the local DSQ of the waker.
3505 *
3506 * Checking only for an empty local DSQ is insufficient as it
3507 * could give the wakee an unfair advantage when the system is
3508 * oversaturated.
3509 *
3510 * Checking only for the presence of idle CPUs is also
3511 * insufficient as the local DSQ of the waker could have tasks
3512 * piled up on it even if there is an idle core elsewhere on
3513 * the system.
3514 */
3515 if (!cpumask_empty(idle_masks.cpu) &&
3516 !(current->flags & PF_EXITING) &&
3517 cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3518 if (cpumask_test_cpu(cpu, p->cpus_ptr))
3519 goto cpu_found;
3520 }
3521 }
3522
3523 /*
3524 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3525 * partially idle @prev_cpu.
3526 */
3527 if (sched_smt_active()) {
3528 /*
3529 * Keep using @prev_cpu if it's part of a fully idle core.
3530 */
3531 if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3532 test_and_clear_cpu_idle(prev_cpu)) {
3533 cpu = prev_cpu;
3534 goto cpu_found;
3535 }
3536
3537 /*
3538 * Search for any fully idle core in the same LLC domain.
3539 */
3540 if (llc_cpus) {
3541 cpu = scx_pick_idle_cpu(llc_cpus, SCX_PICK_IDLE_CORE);
3542 if (cpu >= 0)
3543 goto cpu_found;
3544 }
3545
3546 /*
3547 * Search for any fully idle core in the same NUMA node.
3548 */
3549 if (numa_cpus) {
3550 cpu = scx_pick_idle_cpu(numa_cpus, SCX_PICK_IDLE_CORE);
3551 if (cpu >= 0)
3552 goto cpu_found;
3553 }
3554
3555 /*
3556 * Search for any full idle core usable by the task.
3557 */
3558 cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3559 if (cpu >= 0)
3560 goto cpu_found;
3561 }
3562
3563 /*
3564 * Use @prev_cpu if it's idle.
3565 */
3566 if (test_and_clear_cpu_idle(prev_cpu)) {
3567 cpu = prev_cpu;
3568 goto cpu_found;
3569 }
3570
3571 /*
3572 * Search for any idle CPU in the same LLC domain.
3573 */
3574 if (llc_cpus) {
3575 cpu = scx_pick_idle_cpu(llc_cpus, 0);
3576 if (cpu >= 0)
3577 goto cpu_found;
3578 }
3579
3580 /*
3581 * Search for any idle CPU in the same NUMA node.
3582 */
3583 if (numa_cpus) {
3584 cpu = scx_pick_idle_cpu(numa_cpus, 0);
3585 if (cpu >= 0)
3586 goto cpu_found;
3587 }
3588
3589 /*
3590 * Search for any idle CPU usable by the task.
3591 */
3592 cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3593 if (cpu >= 0)
3594 goto cpu_found;
3595
3596 rcu_read_unlock();
3597 return prev_cpu;
3598
3599cpu_found:
3600 rcu_read_unlock();
3601
3602 *found = true;
3603 return cpu;
3604}
3605
3606static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3607{
3608 /*
3609 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3610 * can be a good migration opportunity with low cache and memory
3611 * footprint. Returning a CPU different than @prev_cpu triggers
3612 * immediate rq migration. However, for SCX, as the current rq
3613 * association doesn't dictate where the task is going to run, this
3614 * doesn't fit well. If necessary, we can later add a dedicated method
3615 * which can decide to preempt self to force it through the regular
3616 * scheduling path.
3617 */
3618 if (unlikely(wake_flags & WF_EXEC))
3619 return prev_cpu;
3620
3621 if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) {
3622 s32 cpu;
3623 struct task_struct **ddsp_taskp;
3624
3625 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3626 WARN_ON_ONCE(*ddsp_taskp);
3627 *ddsp_taskp = p;
3628
3629 cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3630 select_cpu, p, prev_cpu, wake_flags);
3631 *ddsp_taskp = NULL;
3632 if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3633 return cpu;
3634 else
3635 return prev_cpu;
3636 } else {
3637 bool found;
3638 s32 cpu;
3639
3640 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3641 if (found) {
3642 p->scx.slice = SCX_SLICE_DFL;
3643 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3644 }
3645 return cpu;
3646 }
3647}
3648
3649static void task_woken_scx(struct rq *rq, struct task_struct *p)
3650{
3651 run_deferred(rq);
3652}
3653
3654static void set_cpus_allowed_scx(struct task_struct *p,
3655 struct affinity_context *ac)
3656{
3657 set_cpus_allowed_common(p, ac);
3658
3659 /*
3660 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3661 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3662 * scheduler the effective one.
3663 *
3664 * Fine-grained memory write control is enforced by BPF making the const
3665 * designation pointless. Cast it away when calling the operation.
3666 */
3667 if (SCX_HAS_OP(set_cpumask))
3668 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3669 (struct cpumask *)p->cpus_ptr);
3670}
3671
3672static void reset_idle_masks(void)
3673{
3674 /*
3675 * Consider all online cpus idle. Should converge to the actual state
3676 * quickly.
3677 */
3678 cpumask_copy(idle_masks.cpu, cpu_online_mask);
3679 cpumask_copy(idle_masks.smt, cpu_online_mask);
3680}
3681
3682static void update_builtin_idle(int cpu, bool idle)
3683{
3684 if (idle)
3685 cpumask_set_cpu(cpu, idle_masks.cpu);
3686 else
3687 cpumask_clear_cpu(cpu, idle_masks.cpu);
3688
3689#ifdef CONFIG_SCHED_SMT
3690 if (sched_smt_active()) {
3691 const struct cpumask *smt = cpu_smt_mask(cpu);
3692
3693 if (idle) {
3694 /*
3695 * idle_masks.smt handling is racy but that's fine as
3696 * it's only for optimization and self-correcting.
3697 */
3698 for_each_cpu(cpu, smt) {
3699 if (!cpumask_test_cpu(cpu, idle_masks.cpu))
3700 return;
3701 }
3702 cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3703 } else {
3704 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3705 }
3706 }
3707#endif
3708}
3709
3710/*
3711 * Update the idle state of a CPU to @idle.
3712 *
3713 * If @do_notify is true, ops.update_idle() is invoked to notify the scx
3714 * scheduler of an actual idle state transition (idle to busy or vice
3715 * versa). If @do_notify is false, only the idle state in the idle masks is
3716 * refreshed without invoking ops.update_idle().
3717 *
3718 * This distinction is necessary, because an idle CPU can be "reserved" and
3719 * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
3720 * busy even if no tasks are dispatched. In this case, the CPU may return
3721 * to idle without a true state transition. Refreshing the idle masks
3722 * without invoking ops.update_idle() ensures accurate idle state tracking
3723 * while avoiding unnecessary updates and maintaining balanced state
3724 * transitions.
3725 */
3726void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
3727{
3728 int cpu = cpu_of(rq);
3729
3730 lockdep_assert_rq_held(rq);
3731
3732 /*
3733 * Trigger ops.update_idle() only when transitioning from a task to
3734 * the idle thread and vice versa.
3735 *
3736 * Idle transitions are indicated by do_notify being set to true,
3737 * managed by put_prev_task_idle()/set_next_task_idle().
3738 */
3739 if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
3740 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3741
3742 /*
3743 * Update the idle masks:
3744 * - for real idle transitions (do_notify == true)
3745 * - for idle-to-idle transitions (indicated by the previous task
3746 * being the idle thread, managed by pick_task_idle())
3747 *
3748 * Skip updating idle masks if the previous task is not the idle
3749 * thread, since set_next_task_idle() has already handled it when
3750 * transitioning from a task to the idle thread (calling this
3751 * function with do_notify == true).
3752 *
3753 * In this way we can avoid updating the idle masks twice,
3754 * unnecessarily.
3755 */
3756 if (static_branch_likely(&scx_builtin_idle_enabled))
3757 if (do_notify || is_idle_task(rq->curr))
3758 update_builtin_idle(cpu, idle);
3759}
3760
3761static void handle_hotplug(struct rq *rq, bool online)
3762{
3763 int cpu = cpu_of(rq);
3764
3765 atomic_long_inc(&scx_hotplug_seq);
3766
3767 if (scx_enabled())
3768 update_selcpu_topology();
3769
3770 if (online && SCX_HAS_OP(cpu_online))
3771 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3772 else if (!online && SCX_HAS_OP(cpu_offline))
3773 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3774 else
3775 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3776 "cpu %d going %s, exiting scheduler", cpu,
3777 online ? "online" : "offline");
3778}
3779
3780void scx_rq_activate(struct rq *rq)
3781{
3782 handle_hotplug(rq, true);
3783}
3784
3785void scx_rq_deactivate(struct rq *rq)
3786{
3787 handle_hotplug(rq, false);
3788}
3789
3790static void rq_online_scx(struct rq *rq)
3791{
3792 rq->scx.flags |= SCX_RQ_ONLINE;
3793}
3794
3795static void rq_offline_scx(struct rq *rq)
3796{
3797 rq->scx.flags &= ~SCX_RQ_ONLINE;
3798}
3799
3800#else /* CONFIG_SMP */
3801
3802static bool test_and_clear_cpu_idle(int cpu) { return false; }
3803static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
3804static void reset_idle_masks(void) {}
3805
3806#endif /* CONFIG_SMP */
3807
3808static bool check_rq_for_timeouts(struct rq *rq)
3809{
3810 struct task_struct *p;
3811 struct rq_flags rf;
3812 bool timed_out = false;
3813
3814 rq_lock_irqsave(rq, &rf);
3815 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3816 unsigned long last_runnable = p->scx.runnable_at;
3817
3818 if (unlikely(time_after(jiffies,
3819 last_runnable + scx_watchdog_timeout))) {
3820 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3821
3822 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3823 "%s[%d] failed to run for %u.%03us",
3824 p->comm, p->pid,
3825 dur_ms / 1000, dur_ms % 1000);
3826 timed_out = true;
3827 break;
3828 }
3829 }
3830 rq_unlock_irqrestore(rq, &rf);
3831
3832 return timed_out;
3833}
3834
3835static void scx_watchdog_workfn(struct work_struct *work)
3836{
3837 int cpu;
3838
3839 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3840
3841 for_each_online_cpu(cpu) {
3842 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3843 break;
3844
3845 cond_resched();
3846 }
3847 queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3848 scx_watchdog_timeout / 2);
3849}
3850
3851void scx_tick(struct rq *rq)
3852{
3853 unsigned long last_check;
3854
3855 if (!scx_enabled())
3856 return;
3857
3858 last_check = READ_ONCE(scx_watchdog_timestamp);
3859 if (unlikely(time_after(jiffies,
3860 last_check + READ_ONCE(scx_watchdog_timeout)))) {
3861 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3862
3863 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3864 "watchdog failed to check in for %u.%03us",
3865 dur_ms / 1000, dur_ms % 1000);
3866 }
3867
3868 update_other_load_avgs(rq);
3869}
3870
3871static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3872{
3873 update_curr_scx(rq);
3874
3875 /*
3876 * While disabling, always resched and refresh core-sched timestamp as
3877 * we can't trust the slice management or ops.core_sched_before().
3878 */
3879 if (scx_rq_bypassing(rq)) {
3880 curr->scx.slice = 0;
3881 touch_core_sched(rq, curr);
3882 } else if (SCX_HAS_OP(tick)) {
3883 SCX_CALL_OP_TASK(SCX_KF_REST, tick, curr);
3884 }
3885
3886 if (!curr->scx.slice)
3887 resched_curr(rq);
3888}
3889
3890#ifdef CONFIG_EXT_GROUP_SCHED
3891static struct cgroup *tg_cgrp(struct task_group *tg)
3892{
3893 /*
3894 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3895 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3896 * root cgroup.
3897 */
3898 if (tg && tg->css.cgroup)
3899 return tg->css.cgroup;
3900 else
3901 return &cgrp_dfl_root.cgrp;
3902}
3903
3904#define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
3905
3906#else /* CONFIG_EXT_GROUP_SCHED */
3907
3908#define SCX_INIT_TASK_ARGS_CGROUP(tg)
3909
3910#endif /* CONFIG_EXT_GROUP_SCHED */
3911
3912static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3913{
3914 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3915}
3916
3917static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3918{
3919 enum scx_task_state prev_state = scx_get_task_state(p);
3920 bool warn = false;
3921
3922 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3923
3924 switch (state) {
3925 case SCX_TASK_NONE:
3926 break;
3927 case SCX_TASK_INIT:
3928 warn = prev_state != SCX_TASK_NONE;
3929 break;
3930 case SCX_TASK_READY:
3931 warn = prev_state == SCX_TASK_NONE;
3932 break;
3933 case SCX_TASK_ENABLED:
3934 warn = prev_state != SCX_TASK_READY;
3935 break;
3936 default:
3937 warn = true;
3938 return;
3939 }
3940
3941 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3942 prev_state, state, p->comm, p->pid);
3943
3944 p->scx.flags &= ~SCX_TASK_STATE_MASK;
3945 p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3946}
3947
3948static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3949{
3950 int ret;
3951
3952 p->scx.disallow = false;
3953
3954 if (SCX_HAS_OP(init_task)) {
3955 struct scx_init_task_args args = {
3956 SCX_INIT_TASK_ARGS_CGROUP(tg)
3957 .fork = fork,
3958 };
3959
3960 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3961 if (unlikely(ret)) {
3962 ret = ops_sanitize_err("init_task", ret);
3963 return ret;
3964 }
3965 }
3966
3967 scx_set_task_state(p, SCX_TASK_INIT);
3968
3969 if (p->scx.disallow) {
3970 if (!fork) {
3971 struct rq *rq;
3972 struct rq_flags rf;
3973
3974 rq = task_rq_lock(p, &rf);
3975
3976 /*
3977 * We're in the load path and @p->policy will be applied
3978 * right after. Reverting @p->policy here and rejecting
3979 * %SCHED_EXT transitions from scx_check_setscheduler()
3980 * guarantees that if ops.init_task() sets @p->disallow,
3981 * @p can never be in SCX.
3982 */
3983 if (p->policy == SCHED_EXT) {
3984 p->policy = SCHED_NORMAL;
3985 atomic_long_inc(&scx_nr_rejected);
3986 }
3987
3988 task_rq_unlock(rq, p, &rf);
3989 } else if (p->policy == SCHED_EXT) {
3990 scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
3991 p->comm, p->pid);
3992 }
3993 }
3994
3995 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3996 return 0;
3997}
3998
3999static void scx_ops_enable_task(struct task_struct *p)
4000{
4001 u32 weight;
4002
4003 lockdep_assert_rq_held(task_rq(p));
4004
4005 /*
4006 * Set the weight before calling ops.enable() so that the scheduler
4007 * doesn't see a stale value if they inspect the task struct.
4008 */
4009 if (task_has_idle_policy(p))
4010 weight = WEIGHT_IDLEPRIO;
4011 else
4012 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
4013
4014 p->scx.weight = sched_weight_to_cgroup(weight);
4015
4016 if (SCX_HAS_OP(enable))
4017 SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
4018 scx_set_task_state(p, SCX_TASK_ENABLED);
4019
4020 if (SCX_HAS_OP(set_weight))
4021 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4022}
4023
4024static void scx_ops_disable_task(struct task_struct *p)
4025{
4026 lockdep_assert_rq_held(task_rq(p));
4027 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
4028
4029 if (SCX_HAS_OP(disable))
4030 SCX_CALL_OP_TASK(SCX_KF_REST, disable, p);
4031 scx_set_task_state(p, SCX_TASK_READY);
4032}
4033
4034static void scx_ops_exit_task(struct task_struct *p)
4035{
4036 struct scx_exit_task_args args = {
4037 .cancelled = false,
4038 };
4039
4040 lockdep_assert_rq_held(task_rq(p));
4041
4042 switch (scx_get_task_state(p)) {
4043 case SCX_TASK_NONE:
4044 return;
4045 case SCX_TASK_INIT:
4046 args.cancelled = true;
4047 break;
4048 case SCX_TASK_READY:
4049 break;
4050 case SCX_TASK_ENABLED:
4051 scx_ops_disable_task(p);
4052 break;
4053 default:
4054 WARN_ON_ONCE(true);
4055 return;
4056 }
4057
4058 if (SCX_HAS_OP(exit_task))
4059 SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args);
4060 scx_set_task_state(p, SCX_TASK_NONE);
4061}
4062
4063void init_scx_entity(struct sched_ext_entity *scx)
4064{
4065 memset(scx, 0, sizeof(*scx));
4066 INIT_LIST_HEAD(&scx->dsq_list.node);
4067 RB_CLEAR_NODE(&scx->dsq_priq);
4068 scx->sticky_cpu = -1;
4069 scx->holding_cpu = -1;
4070 INIT_LIST_HEAD(&scx->runnable_node);
4071 scx->runnable_at = jiffies;
4072 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
4073 scx->slice = SCX_SLICE_DFL;
4074}
4075
4076void scx_pre_fork(struct task_struct *p)
4077{
4078 /*
4079 * BPF scheduler enable/disable paths want to be able to iterate and
4080 * update all tasks which can become complex when racing forks. As
4081 * enable/disable are very cold paths, let's use a percpu_rwsem to
4082 * exclude forks.
4083 */
4084 percpu_down_read(&scx_fork_rwsem);
4085}
4086
4087int scx_fork(struct task_struct *p)
4088{
4089 percpu_rwsem_assert_held(&scx_fork_rwsem);
4090
4091 if (scx_ops_init_task_enabled)
4092 return scx_ops_init_task(p, task_group(p), true);
4093 else
4094 return 0;
4095}
4096
4097void scx_post_fork(struct task_struct *p)
4098{
4099 if (scx_ops_init_task_enabled) {
4100 scx_set_task_state(p, SCX_TASK_READY);
4101
4102 /*
4103 * Enable the task immediately if it's running on sched_ext.
4104 * Otherwise, it'll be enabled in switching_to_scx() if and
4105 * when it's ever configured to run with a SCHED_EXT policy.
4106 */
4107 if (p->sched_class == &ext_sched_class) {
4108 struct rq_flags rf;
4109 struct rq *rq;
4110
4111 rq = task_rq_lock(p, &rf);
4112 scx_ops_enable_task(p);
4113 task_rq_unlock(rq, p, &rf);
4114 }
4115 }
4116
4117 spin_lock_irq(&scx_tasks_lock);
4118 list_add_tail(&p->scx.tasks_node, &scx_tasks);
4119 spin_unlock_irq(&scx_tasks_lock);
4120
4121 percpu_up_read(&scx_fork_rwsem);
4122}
4123
4124void scx_cancel_fork(struct task_struct *p)
4125{
4126 if (scx_enabled()) {
4127 struct rq *rq;
4128 struct rq_flags rf;
4129
4130 rq = task_rq_lock(p, &rf);
4131 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
4132 scx_ops_exit_task(p);
4133 task_rq_unlock(rq, p, &rf);
4134 }
4135
4136 percpu_up_read(&scx_fork_rwsem);
4137}
4138
4139void sched_ext_free(struct task_struct *p)
4140{
4141 unsigned long flags;
4142
4143 spin_lock_irqsave(&scx_tasks_lock, flags);
4144 list_del_init(&p->scx.tasks_node);
4145 spin_unlock_irqrestore(&scx_tasks_lock, flags);
4146
4147 /*
4148 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
4149 * ENABLED transitions can't race us. Disable ops for @p.
4150 */
4151 if (scx_get_task_state(p) != SCX_TASK_NONE) {
4152 struct rq_flags rf;
4153 struct rq *rq;
4154
4155 rq = task_rq_lock(p, &rf);
4156 scx_ops_exit_task(p);
4157 task_rq_unlock(rq, p, &rf);
4158 }
4159}
4160
4161static void reweight_task_scx(struct rq *rq, struct task_struct *p,
4162 const struct load_weight *lw)
4163{
4164 lockdep_assert_rq_held(task_rq(p));
4165
4166 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
4167 if (SCX_HAS_OP(set_weight))
4168 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4169}
4170
4171static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
4172{
4173}
4174
4175static void switching_to_scx(struct rq *rq, struct task_struct *p)
4176{
4177 scx_ops_enable_task(p);
4178
4179 /*
4180 * set_cpus_allowed_scx() is not called while @p is associated with a
4181 * different scheduler class. Keep the BPF scheduler up-to-date.
4182 */
4183 if (SCX_HAS_OP(set_cpumask))
4184 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
4185 (struct cpumask *)p->cpus_ptr);
4186}
4187
4188static void switched_from_scx(struct rq *rq, struct task_struct *p)
4189{
4190 scx_ops_disable_task(p);
4191}
4192
4193static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
4194static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
4195
4196int scx_check_setscheduler(struct task_struct *p, int policy)
4197{
4198 lockdep_assert_rq_held(task_rq(p));
4199
4200 /* if disallow, reject transitioning into SCX */
4201 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
4202 p->policy != policy && policy == SCHED_EXT)
4203 return -EACCES;
4204
4205 return 0;
4206}
4207
4208#ifdef CONFIG_NO_HZ_FULL
4209bool scx_can_stop_tick(struct rq *rq)
4210{
4211 struct task_struct *p = rq->curr;
4212
4213 if (scx_rq_bypassing(rq))
4214 return false;
4215
4216 if (p->sched_class != &ext_sched_class)
4217 return true;
4218
4219 /*
4220 * @rq can dispatch from different DSQs, so we can't tell whether it
4221 * needs the tick or not by looking at nr_running. Allow stopping ticks
4222 * iff the BPF scheduler indicated so. See set_next_task_scx().
4223 */
4224 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4225}
4226#endif
4227
4228#ifdef CONFIG_EXT_GROUP_SCHED
4229
4230DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
4231static bool scx_cgroup_enabled;
4232static bool cgroup_warned_missing_weight;
4233static bool cgroup_warned_missing_idle;
4234
4235static void scx_cgroup_warn_missing_weight(struct task_group *tg)
4236{
4237 if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
4238 cgroup_warned_missing_weight)
4239 return;
4240
4241 if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
4242 return;
4243
4244 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
4245 scx_ops.name);
4246 cgroup_warned_missing_weight = true;
4247}
4248
4249static void scx_cgroup_warn_missing_idle(struct task_group *tg)
4250{
4251 if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
4252 return;
4253
4254 if (!tg->idle)
4255 return;
4256
4257 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
4258 scx_ops.name);
4259 cgroup_warned_missing_idle = true;
4260}
4261
4262int scx_tg_online(struct task_group *tg)
4263{
4264 int ret = 0;
4265
4266 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4267
4268 percpu_down_read(&scx_cgroup_rwsem);
4269
4270 scx_cgroup_warn_missing_weight(tg);
4271
4272 if (scx_cgroup_enabled) {
4273 if (SCX_HAS_OP(cgroup_init)) {
4274 struct scx_cgroup_init_args args =
4275 { .weight = tg->scx_weight };
4276
4277 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4278 tg->css.cgroup, &args);
4279 if (ret)
4280 ret = ops_sanitize_err("cgroup_init", ret);
4281 }
4282 if (ret == 0)
4283 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4284 } else {
4285 tg->scx_flags |= SCX_TG_ONLINE;
4286 }
4287
4288 percpu_up_read(&scx_cgroup_rwsem);
4289 return ret;
4290}
4291
4292void scx_tg_offline(struct task_group *tg)
4293{
4294 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
4295
4296 percpu_down_read(&scx_cgroup_rwsem);
4297
4298 if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
4299 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
4300 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4301
4302 percpu_up_read(&scx_cgroup_rwsem);
4303}
4304
4305int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4306{
4307 struct cgroup_subsys_state *css;
4308 struct task_struct *p;
4309 int ret;
4310
4311 /* released in scx_finish/cancel_attach() */
4312 percpu_down_read(&scx_cgroup_rwsem);
4313
4314 if (!scx_cgroup_enabled)
4315 return 0;
4316
4317 cgroup_taskset_for_each(p, css, tset) {
4318 struct cgroup *from = tg_cgrp(task_group(p));
4319 struct cgroup *to = tg_cgrp(css_tg(css));
4320
4321 WARN_ON_ONCE(p->scx.cgrp_moving_from);
4322
4323 /*
4324 * sched_move_task() omits identity migrations. Let's match the
4325 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4326 * always match one-to-one.
4327 */
4328 if (from == to)
4329 continue;
4330
4331 if (SCX_HAS_OP(cgroup_prep_move)) {
4332 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
4333 p, from, css->cgroup);
4334 if (ret)
4335 goto err;
4336 }
4337
4338 p->scx.cgrp_moving_from = from;
4339 }
4340
4341 return 0;
4342
4343err:
4344 cgroup_taskset_for_each(p, css, tset) {
4345 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4346 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4347 p->scx.cgrp_moving_from, css->cgroup);
4348 p->scx.cgrp_moving_from = NULL;
4349 }
4350
4351 percpu_up_read(&scx_cgroup_rwsem);
4352 return ops_sanitize_err("cgroup_prep_move", ret);
4353}
4354
4355void scx_cgroup_move_task(struct task_struct *p)
4356{
4357 if (!scx_cgroup_enabled)
4358 return;
4359
4360 /*
4361 * @p must have ops.cgroup_prep_move() called on it and thus
4362 * cgrp_moving_from set.
4363 */
4364 if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4365 SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
4366 p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
4367 p->scx.cgrp_moving_from = NULL;
4368}
4369
4370void scx_cgroup_finish_attach(void)
4371{
4372 percpu_up_read(&scx_cgroup_rwsem);
4373}
4374
4375void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4376{
4377 struct cgroup_subsys_state *css;
4378 struct task_struct *p;
4379
4380 if (!scx_cgroup_enabled)
4381 goto out_unlock;
4382
4383 cgroup_taskset_for_each(p, css, tset) {
4384 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4385 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4386 p->scx.cgrp_moving_from, css->cgroup);
4387 p->scx.cgrp_moving_from = NULL;
4388 }
4389out_unlock:
4390 percpu_up_read(&scx_cgroup_rwsem);
4391}
4392
4393void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4394{
4395 percpu_down_read(&scx_cgroup_rwsem);
4396
4397 if (scx_cgroup_enabled && tg->scx_weight != weight) {
4398 if (SCX_HAS_OP(cgroup_set_weight))
4399 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
4400 tg_cgrp(tg), weight);
4401 tg->scx_weight = weight;
4402 }
4403
4404 percpu_up_read(&scx_cgroup_rwsem);
4405}
4406
4407void scx_group_set_idle(struct task_group *tg, bool idle)
4408{
4409 percpu_down_read(&scx_cgroup_rwsem);
4410 scx_cgroup_warn_missing_idle(tg);
4411 percpu_up_read(&scx_cgroup_rwsem);
4412}
4413
4414static void scx_cgroup_lock(void)
4415{
4416 percpu_down_write(&scx_cgroup_rwsem);
4417}
4418
4419static void scx_cgroup_unlock(void)
4420{
4421 percpu_up_write(&scx_cgroup_rwsem);
4422}
4423
4424#else /* CONFIG_EXT_GROUP_SCHED */
4425
4426static inline void scx_cgroup_lock(void) {}
4427static inline void scx_cgroup_unlock(void) {}
4428
4429#endif /* CONFIG_EXT_GROUP_SCHED */
4430
4431/*
4432 * Omitted operations:
4433 *
4434 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4435 * isn't tied to the CPU at that point. Preemption is implemented by resetting
4436 * the victim task's slice to 0 and triggering reschedule on the target CPU.
4437 *
4438 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4439 *
4440 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4441 * their current sched_class. Call them directly from sched core instead.
4442 */
4443DEFINE_SCHED_CLASS(ext) = {
4444 .enqueue_task = enqueue_task_scx,
4445 .dequeue_task = dequeue_task_scx,
4446 .yield_task = yield_task_scx,
4447 .yield_to_task = yield_to_task_scx,
4448
4449 .wakeup_preempt = wakeup_preempt_scx,
4450
4451 .balance = balance_scx,
4452 .pick_task = pick_task_scx,
4453
4454 .put_prev_task = put_prev_task_scx,
4455 .set_next_task = set_next_task_scx,
4456
4457#ifdef CONFIG_SMP
4458 .select_task_rq = select_task_rq_scx,
4459 .task_woken = task_woken_scx,
4460 .set_cpus_allowed = set_cpus_allowed_scx,
4461
4462 .rq_online = rq_online_scx,
4463 .rq_offline = rq_offline_scx,
4464#endif
4465
4466 .task_tick = task_tick_scx,
4467
4468 .switching_to = switching_to_scx,
4469 .switched_from = switched_from_scx,
4470 .switched_to = switched_to_scx,
4471 .reweight_task = reweight_task_scx,
4472 .prio_changed = prio_changed_scx,
4473
4474 .update_curr = update_curr_scx,
4475
4476#ifdef CONFIG_UCLAMP_TASK
4477 .uclamp_enabled = 1,
4478#endif
4479};
4480
4481static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4482{
4483 memset(dsq, 0, sizeof(*dsq));
4484
4485 raw_spin_lock_init(&dsq->lock);
4486 INIT_LIST_HEAD(&dsq->list);
4487 dsq->id = dsq_id;
4488}
4489
4490static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
4491{
4492 struct scx_dispatch_q *dsq;
4493 int ret;
4494
4495 if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
4496 return ERR_PTR(-EINVAL);
4497
4498 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4499 if (!dsq)
4500 return ERR_PTR(-ENOMEM);
4501
4502 init_dsq(dsq, dsq_id);
4503
4504 ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
4505 dsq_hash_params);
4506 if (ret) {
4507 kfree(dsq);
4508 return ERR_PTR(ret);
4509 }
4510 return dsq;
4511}
4512
4513static void free_dsq_irq_workfn(struct irq_work *irq_work)
4514{
4515 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4516 struct scx_dispatch_q *dsq, *tmp_dsq;
4517
4518 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4519 kfree_rcu(dsq, rcu);
4520}
4521
4522static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4523
4524static void destroy_dsq(u64 dsq_id)
4525{
4526 struct scx_dispatch_q *dsq;
4527 unsigned long flags;
4528
4529 rcu_read_lock();
4530
4531 dsq = find_user_dsq(dsq_id);
4532 if (!dsq)
4533 goto out_unlock_rcu;
4534
4535 raw_spin_lock_irqsave(&dsq->lock, flags);
4536
4537 if (dsq->nr) {
4538 scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4539 dsq->id, dsq->nr);
4540 goto out_unlock_dsq;
4541 }
4542
4543 if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4544 goto out_unlock_dsq;
4545
4546 /*
4547 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4548 * queueing more tasks. As this function can be called from anywhere,
4549 * freeing is bounced through an irq work to avoid nesting RCU
4550 * operations inside scheduler locks.
4551 */
4552 dsq->id = SCX_DSQ_INVALID;
4553 llist_add(&dsq->free_node, &dsqs_to_free);
4554 irq_work_queue(&free_dsq_irq_work);
4555
4556out_unlock_dsq:
4557 raw_spin_unlock_irqrestore(&dsq->lock, flags);
4558out_unlock_rcu:
4559 rcu_read_unlock();
4560}
4561
4562#ifdef CONFIG_EXT_GROUP_SCHED
4563static void scx_cgroup_exit(void)
4564{
4565 struct cgroup_subsys_state *css;
4566
4567 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4568
4569 scx_cgroup_enabled = false;
4570
4571 /*
4572 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4573 * cgroups and exit all the inited ones, all online cgroups are exited.
4574 */
4575 rcu_read_lock();
4576 css_for_each_descendant_post(css, &root_task_group.css) {
4577 struct task_group *tg = css_tg(css);
4578
4579 if (!(tg->scx_flags & SCX_TG_INITED))
4580 continue;
4581 tg->scx_flags &= ~SCX_TG_INITED;
4582
4583 if (!scx_ops.cgroup_exit)
4584 continue;
4585
4586 if (WARN_ON_ONCE(!css_tryget(css)))
4587 continue;
4588 rcu_read_unlock();
4589
4590 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4591
4592 rcu_read_lock();
4593 css_put(css);
4594 }
4595 rcu_read_unlock();
4596}
4597
4598static int scx_cgroup_init(void)
4599{
4600 struct cgroup_subsys_state *css;
4601 int ret;
4602
4603 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4604
4605 cgroup_warned_missing_weight = false;
4606 cgroup_warned_missing_idle = false;
4607
4608 /*
4609 * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
4610 * cgroups and init, all online cgroups are initialized.
4611 */
4612 rcu_read_lock();
4613 css_for_each_descendant_pre(css, &root_task_group.css) {
4614 struct task_group *tg = css_tg(css);
4615 struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4616
4617 scx_cgroup_warn_missing_weight(tg);
4618 scx_cgroup_warn_missing_idle(tg);
4619
4620 if ((tg->scx_flags &
4621 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4622 continue;
4623
4624 if (!scx_ops.cgroup_init) {
4625 tg->scx_flags |= SCX_TG_INITED;
4626 continue;
4627 }
4628
4629 if (WARN_ON_ONCE(!css_tryget(css)))
4630 continue;
4631 rcu_read_unlock();
4632
4633 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4634 css->cgroup, &args);
4635 if (ret) {
4636 css_put(css);
4637 scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4638 return ret;
4639 }
4640 tg->scx_flags |= SCX_TG_INITED;
4641
4642 rcu_read_lock();
4643 css_put(css);
4644 }
4645 rcu_read_unlock();
4646
4647 WARN_ON_ONCE(scx_cgroup_enabled);
4648 scx_cgroup_enabled = true;
4649
4650 return 0;
4651}
4652
4653#else
4654static void scx_cgroup_exit(void) {}
4655static int scx_cgroup_init(void) { return 0; }
4656#endif
4657
4658
4659/********************************************************************************
4660 * Sysfs interface and ops enable/disable.
4661 */
4662
4663#define SCX_ATTR(_name) \
4664 static struct kobj_attribute scx_attr_##_name = { \
4665 .attr = { .name = __stringify(_name), .mode = 0444 }, \
4666 .show = scx_attr_##_name##_show, \
4667 }
4668
4669static ssize_t scx_attr_state_show(struct kobject *kobj,
4670 struct kobj_attribute *ka, char *buf)
4671{
4672 return sysfs_emit(buf, "%s\n",
4673 scx_ops_enable_state_str[scx_ops_enable_state()]);
4674}
4675SCX_ATTR(state);
4676
4677static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4678 struct kobj_attribute *ka, char *buf)
4679{
4680 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4681}
4682SCX_ATTR(switch_all);
4683
4684static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4685 struct kobj_attribute *ka, char *buf)
4686{
4687 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4688}
4689SCX_ATTR(nr_rejected);
4690
4691static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4692 struct kobj_attribute *ka, char *buf)
4693{
4694 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4695}
4696SCX_ATTR(hotplug_seq);
4697
4698static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4699 struct kobj_attribute *ka, char *buf)
4700{
4701 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4702}
4703SCX_ATTR(enable_seq);
4704
4705static struct attribute *scx_global_attrs[] = {
4706 &scx_attr_state.attr,
4707 &scx_attr_switch_all.attr,
4708 &scx_attr_nr_rejected.attr,
4709 &scx_attr_hotplug_seq.attr,
4710 &scx_attr_enable_seq.attr,
4711 NULL,
4712};
4713
4714static const struct attribute_group scx_global_attr_group = {
4715 .attrs = scx_global_attrs,
4716};
4717
4718static void scx_kobj_release(struct kobject *kobj)
4719{
4720 kfree(kobj);
4721}
4722
4723static ssize_t scx_attr_ops_show(struct kobject *kobj,
4724 struct kobj_attribute *ka, char *buf)
4725{
4726 return sysfs_emit(buf, "%s\n", scx_ops.name);
4727}
4728SCX_ATTR(ops);
4729
4730static struct attribute *scx_sched_attrs[] = {
4731 &scx_attr_ops.attr,
4732 NULL,
4733};
4734ATTRIBUTE_GROUPS(scx_sched);
4735
4736static const struct kobj_type scx_ktype = {
4737 .release = scx_kobj_release,
4738 .sysfs_ops = &kobj_sysfs_ops,
4739 .default_groups = scx_sched_groups,
4740};
4741
4742static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4743{
4744 return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4745}
4746
4747static const struct kset_uevent_ops scx_uevent_ops = {
4748 .uevent = scx_uevent,
4749};
4750
4751/*
4752 * Used by sched_fork() and __setscheduler_prio() to pick the matching
4753 * sched_class. dl/rt are already handled.
4754 */
4755bool task_should_scx(int policy)
4756{
4757 if (!scx_enabled() ||
4758 unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4759 return false;
4760 if (READ_ONCE(scx_switching_all))
4761 return true;
4762 return policy == SCHED_EXT;
4763}
4764
4765/**
4766 * scx_softlockup - sched_ext softlockup handler
4767 *
4768 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4769 * live-lock the system by making many CPUs target the same DSQ to the point
4770 * where soft-lockup detection triggers. This function is called from
4771 * soft-lockup watchdog when the triggering point is close and tries to unjam
4772 * the system by enabling the breather and aborting the BPF scheduler.
4773 */
4774void scx_softlockup(u32 dur_s)
4775{
4776 switch (scx_ops_enable_state()) {
4777 case SCX_OPS_ENABLING:
4778 case SCX_OPS_ENABLED:
4779 break;
4780 default:
4781 return;
4782 }
4783
4784 /* allow only one instance, cleared at the end of scx_ops_bypass() */
4785 if (test_and_set_bit(0, &scx_in_softlockup))
4786 return;
4787
4788 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4789 smp_processor_id(), dur_s, scx_ops.name);
4790
4791 /*
4792 * Some CPUs may be trapped in the dispatch paths. Enable breather
4793 * immediately; otherwise, we might even be able to get to
4794 * scx_ops_bypass().
4795 */
4796 atomic_inc(&scx_ops_breather_depth);
4797
4798 scx_ops_error("soft lockup - CPU#%d stuck for %us",
4799 smp_processor_id(), dur_s);
4800}
4801
4802static void scx_clear_softlockup(void)
4803{
4804 if (test_and_clear_bit(0, &scx_in_softlockup))
4805 atomic_dec(&scx_ops_breather_depth);
4806}
4807
4808/**
4809 * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4810 *
4811 * Bypassing guarantees that all runnable tasks make forward progress without
4812 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4813 * be held by tasks that the BPF scheduler is forgetting to run, which
4814 * unfortunately also excludes toggling the static branches.
4815 *
4816 * Let's work around by overriding a couple ops and modifying behaviors based on
4817 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4818 * to force global FIFO scheduling.
4819 *
4820 * - ops.select_cpu() is ignored and the default select_cpu() is used.
4821 *
4822 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4823 * %SCX_OPS_ENQ_LAST is also ignored.
4824 *
4825 * - ops.dispatch() is ignored.
4826 *
4827 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4828 * can't be trusted. Whenever a tick triggers, the running task is rotated to
4829 * the tail of the queue with core_sched_at touched.
4830 *
4831 * - pick_next_task() suppresses zero slice warning.
4832 *
4833 * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4834 * operations.
4835 *
4836 * - scx_prio_less() reverts to the default core_sched_at order.
4837 */
4838static void scx_ops_bypass(bool bypass)
4839{
4840 static DEFINE_RAW_SPINLOCK(bypass_lock);
4841 int cpu;
4842 unsigned long flags;
4843
4844 raw_spin_lock_irqsave(&bypass_lock, flags);
4845 if (bypass) {
4846 scx_ops_bypass_depth++;
4847 WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
4848 if (scx_ops_bypass_depth != 1)
4849 goto unlock;
4850 } else {
4851 scx_ops_bypass_depth--;
4852 WARN_ON_ONCE(scx_ops_bypass_depth < 0);
4853 if (scx_ops_bypass_depth != 0)
4854 goto unlock;
4855 }
4856
4857 atomic_inc(&scx_ops_breather_depth);
4858
4859 /*
4860 * No task property is changing. We just need to make sure all currently
4861 * queued tasks are re-queued according to the new scx_rq_bypassing()
4862 * state. As an optimization, walk each rq's runnable_list instead of
4863 * the scx_tasks list.
4864 *
4865 * This function can't trust the scheduler and thus can't use
4866 * cpus_read_lock(). Walk all possible CPUs instead of online.
4867 */
4868 for_each_possible_cpu(cpu) {
4869 struct rq *rq = cpu_rq(cpu);
4870 struct task_struct *p, *n;
4871
4872 raw_spin_rq_lock(rq);
4873
4874 if (bypass) {
4875 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4876 rq->scx.flags |= SCX_RQ_BYPASSING;
4877 } else {
4878 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4879 rq->scx.flags &= ~SCX_RQ_BYPASSING;
4880 }
4881
4882 /*
4883 * We need to guarantee that no tasks are on the BPF scheduler
4884 * while bypassing. Either we see enabled or the enable path
4885 * sees scx_rq_bypassing() before moving tasks to SCX.
4886 */
4887 if (!scx_enabled()) {
4888 raw_spin_rq_unlock(rq);
4889 continue;
4890 }
4891
4892 /*
4893 * The use of list_for_each_entry_safe_reverse() is required
4894 * because each task is going to be removed from and added back
4895 * to the runnable_list during iteration. Because they're added
4896 * to the tail of the list, safe reverse iteration can still
4897 * visit all nodes.
4898 */
4899 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4900 scx.runnable_node) {
4901 struct sched_enq_and_set_ctx ctx;
4902
4903 /* cycling deq/enq is enough, see the function comment */
4904 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4905 sched_enq_and_set_task(&ctx);
4906 }
4907
4908 /* resched to restore ticks and idle state */
4909 if (cpu_online(cpu) || cpu == smp_processor_id())
4910 resched_curr(rq);
4911
4912 raw_spin_rq_unlock(rq);
4913 }
4914
4915 atomic_dec(&scx_ops_breather_depth);
4916unlock:
4917 raw_spin_unlock_irqrestore(&bypass_lock, flags);
4918 scx_clear_softlockup();
4919}
4920
4921static void free_exit_info(struct scx_exit_info *ei)
4922{
4923 kfree(ei->dump);
4924 kfree(ei->msg);
4925 kfree(ei->bt);
4926 kfree(ei);
4927}
4928
4929static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4930{
4931 struct scx_exit_info *ei;
4932
4933 ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4934 if (!ei)
4935 return NULL;
4936
4937 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4938 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4939 ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4940
4941 if (!ei->bt || !ei->msg || !ei->dump) {
4942 free_exit_info(ei);
4943 return NULL;
4944 }
4945
4946 return ei;
4947}
4948
4949static const char *scx_exit_reason(enum scx_exit_kind kind)
4950{
4951 switch (kind) {
4952 case SCX_EXIT_UNREG:
4953 return "unregistered from user space";
4954 case SCX_EXIT_UNREG_BPF:
4955 return "unregistered from BPF";
4956 case SCX_EXIT_UNREG_KERN:
4957 return "unregistered from the main kernel";
4958 case SCX_EXIT_SYSRQ:
4959 return "disabled by sysrq-S";
4960 case SCX_EXIT_ERROR:
4961 return "runtime error";
4962 case SCX_EXIT_ERROR_BPF:
4963 return "scx_bpf_error";
4964 case SCX_EXIT_ERROR_STALL:
4965 return "runnable task stall";
4966 default:
4967 return "<UNKNOWN>";
4968 }
4969}
4970
4971static void scx_ops_disable_workfn(struct kthread_work *work)
4972{
4973 struct scx_exit_info *ei = scx_exit_info;
4974 struct scx_task_iter sti;
4975 struct task_struct *p;
4976 struct rhashtable_iter rht_iter;
4977 struct scx_dispatch_q *dsq;
4978 int i, kind;
4979
4980 kind = atomic_read(&scx_exit_kind);
4981 while (true) {
4982 /*
4983 * NONE indicates that a new scx_ops has been registered since
4984 * disable was scheduled - don't kill the new ops. DONE
4985 * indicates that the ops has already been disabled.
4986 */
4987 if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
4988 return;
4989 if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
4990 break;
4991 }
4992 ei->kind = kind;
4993 ei->reason = scx_exit_reason(ei->kind);
4994
4995 /* guarantee forward progress by bypassing scx_ops */
4996 scx_ops_bypass(true);
4997
4998 switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
4999 case SCX_OPS_DISABLING:
5000 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
5001 break;
5002 case SCX_OPS_DISABLED:
5003 pr_warn("sched_ext: ops error detected without ops (%s)\n",
5004 scx_exit_info->msg);
5005 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5006 SCX_OPS_DISABLING);
5007 goto done;
5008 default:
5009 break;
5010 }
5011
5012 /*
5013 * Here, every runnable task is guaranteed to make forward progress and
5014 * we can safely use blocking synchronization constructs. Actually
5015 * disable ops.
5016 */
5017 mutex_lock(&scx_ops_enable_mutex);
5018
5019 static_branch_disable(&__scx_switched_all);
5020 WRITE_ONCE(scx_switching_all, false);
5021
5022 /*
5023 * Shut down cgroup support before tasks so that the cgroup attach path
5024 * doesn't race against scx_ops_exit_task().
5025 */
5026 scx_cgroup_lock();
5027 scx_cgroup_exit();
5028 scx_cgroup_unlock();
5029
5030 /*
5031 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
5032 * must be switched out and exited synchronously.
5033 */
5034 percpu_down_write(&scx_fork_rwsem);
5035
5036 scx_ops_init_task_enabled = false;
5037
5038 scx_task_iter_start(&sti);
5039 while ((p = scx_task_iter_next_locked(&sti))) {
5040 const struct sched_class *old_class = p->sched_class;
5041 const struct sched_class *new_class =
5042 __setscheduler_class(p->policy, p->prio);
5043 struct sched_enq_and_set_ctx ctx;
5044
5045 if (old_class != new_class && p->se.sched_delayed)
5046 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5047
5048 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5049
5050 p->sched_class = new_class;
5051 check_class_changing(task_rq(p), p, old_class);
5052
5053 sched_enq_and_set_task(&ctx);
5054
5055 check_class_changed(task_rq(p), p, old_class, p->prio);
5056 scx_ops_exit_task(p);
5057 }
5058 scx_task_iter_stop(&sti);
5059 percpu_up_write(&scx_fork_rwsem);
5060
5061 /* no task is on scx, turn off all the switches and flush in-progress calls */
5062 static_branch_disable(&__scx_ops_enabled);
5063 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
5064 static_branch_disable(&scx_has_op[i]);
5065 static_branch_disable(&scx_ops_enq_last);
5066 static_branch_disable(&scx_ops_enq_exiting);
5067 static_branch_disable(&scx_ops_cpu_preempt);
5068 static_branch_disable(&scx_builtin_idle_enabled);
5069 synchronize_rcu();
5070
5071 if (ei->kind >= SCX_EXIT_ERROR) {
5072 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5073 scx_ops.name, ei->reason);
5074
5075 if (ei->msg[0] != '\0')
5076 pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
5077#ifdef CONFIG_STACKTRACE
5078 stack_trace_print(ei->bt, ei->bt_len, 2);
5079#endif
5080 } else {
5081 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5082 scx_ops.name, ei->reason);
5083 }
5084
5085 if (scx_ops.exit)
5086 SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
5087
5088 cancel_delayed_work_sync(&scx_watchdog_work);
5089
5090 /*
5091 * Delete the kobject from the hierarchy eagerly in addition to just
5092 * dropping a reference. Otherwise, if the object is deleted
5093 * asynchronously, sysfs could observe an object of the same name still
5094 * in the hierarchy when another scheduler is loaded.
5095 */
5096 kobject_del(scx_root_kobj);
5097 kobject_put(scx_root_kobj);
5098 scx_root_kobj = NULL;
5099
5100 memset(&scx_ops, 0, sizeof(scx_ops));
5101
5102 rhashtable_walk_enter(&dsq_hash, &rht_iter);
5103 do {
5104 rhashtable_walk_start(&rht_iter);
5105
5106 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
5107 destroy_dsq(dsq->id);
5108
5109 rhashtable_walk_stop(&rht_iter);
5110 } while (dsq == ERR_PTR(-EAGAIN));
5111 rhashtable_walk_exit(&rht_iter);
5112
5113 free_percpu(scx_dsp_ctx);
5114 scx_dsp_ctx = NULL;
5115 scx_dsp_max_batch = 0;
5116
5117 free_exit_info(scx_exit_info);
5118 scx_exit_info = NULL;
5119
5120 mutex_unlock(&scx_ops_enable_mutex);
5121
5122 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5123 SCX_OPS_DISABLING);
5124done:
5125 scx_ops_bypass(false);
5126}
5127
5128static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
5129
5130static void schedule_scx_ops_disable_work(void)
5131{
5132 struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
5133
5134 /*
5135 * We may be called spuriously before the first bpf_sched_ext_reg(). If
5136 * scx_ops_helper isn't set up yet, there's nothing to do.
5137 */
5138 if (helper)
5139 kthread_queue_work(helper, &scx_ops_disable_work);
5140}
5141
5142static void scx_ops_disable(enum scx_exit_kind kind)
5143{
5144 int none = SCX_EXIT_NONE;
5145
5146 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5147 kind = SCX_EXIT_ERROR;
5148
5149 atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
5150
5151 schedule_scx_ops_disable_work();
5152}
5153
5154static void dump_newline(struct seq_buf *s)
5155{
5156 trace_sched_ext_dump("");
5157
5158 /* @s may be zero sized and seq_buf triggers WARN if so */
5159 if (s->size)
5160 seq_buf_putc(s, '\n');
5161}
5162
5163static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
5164{
5165 va_list args;
5166
5167#ifdef CONFIG_TRACEPOINTS
5168 if (trace_sched_ext_dump_enabled()) {
5169 /* protected by scx_dump_state()::dump_lock */
5170 static char line_buf[SCX_EXIT_MSG_LEN];
5171
5172 va_start(args, fmt);
5173 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
5174 va_end(args);
5175
5176 trace_sched_ext_dump(line_buf);
5177 }
5178#endif
5179 /* @s may be zero sized and seq_buf triggers WARN if so */
5180 if (s->size) {
5181 va_start(args, fmt);
5182 seq_buf_vprintf(s, fmt, args);
5183 va_end(args);
5184
5185 seq_buf_putc(s, '\n');
5186 }
5187}
5188
5189static void dump_stack_trace(struct seq_buf *s, const char *prefix,
5190 const unsigned long *bt, unsigned int len)
5191{
5192 unsigned int i;
5193
5194 for (i = 0; i < len; i++)
5195 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
5196}
5197
5198static void ops_dump_init(struct seq_buf *s, const char *prefix)
5199{
5200 struct scx_dump_data *dd = &scx_dump_data;
5201
5202 lockdep_assert_irqs_disabled();
5203
5204 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
5205 dd->first = true;
5206 dd->cursor = 0;
5207 dd->s = s;
5208 dd->prefix = prefix;
5209}
5210
5211static void ops_dump_flush(void)
5212{
5213 struct scx_dump_data *dd = &scx_dump_data;
5214 char *line = dd->buf.line;
5215
5216 if (!dd->cursor)
5217 return;
5218
5219 /*
5220 * There's something to flush and this is the first line. Insert a blank
5221 * line to distinguish ops dump.
5222 */
5223 if (dd->first) {
5224 dump_newline(dd->s);
5225 dd->first = false;
5226 }
5227
5228 /*
5229 * There may be multiple lines in $line. Scan and emit each line
5230 * separately.
5231 */
5232 while (true) {
5233 char *end = line;
5234 char c;
5235
5236 while (*end != '\n' && *end != '\0')
5237 end++;
5238
5239 /*
5240 * If $line overflowed, it may not have newline at the end.
5241 * Always emit with a newline.
5242 */
5243 c = *end;
5244 *end = '\0';
5245 dump_line(dd->s, "%s%s", dd->prefix, line);
5246 if (c == '\0')
5247 break;
5248
5249 /* move to the next line */
5250 end++;
5251 if (*end == '\0')
5252 break;
5253 line = end;
5254 }
5255
5256 dd->cursor = 0;
5257}
5258
5259static void ops_dump_exit(void)
5260{
5261 ops_dump_flush();
5262 scx_dump_data.cpu = -1;
5263}
5264
5265static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
5266 struct task_struct *p, char marker)
5267{
5268 static unsigned long bt[SCX_EXIT_BT_LEN];
5269 char dsq_id_buf[19] = "(n/a)";
5270 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
5271 unsigned int bt_len = 0;
5272
5273 if (p->scx.dsq)
5274 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
5275 (unsigned long long)p->scx.dsq->id);
5276
5277 dump_newline(s);
5278 dump_line(s, " %c%c %s[%d] %+ldms",
5279 marker, task_state_to_char(p), p->comm, p->pid,
5280 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
5281 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
5282 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
5283 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
5284 ops_state >> SCX_OPSS_QSEQ_SHIFT);
5285 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
5286 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
5287 p->scx.dsq_vtime);
5288 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5289
5290 if (SCX_HAS_OP(dump_task)) {
5291 ops_dump_init(s, " ");
5292 SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
5293 ops_dump_exit();
5294 }
5295
5296#ifdef CONFIG_STACKTRACE
5297 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5298#endif
5299 if (bt_len) {
5300 dump_newline(s);
5301 dump_stack_trace(s, " ", bt, bt_len);
5302 }
5303}
5304
5305static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5306{
5307 static DEFINE_SPINLOCK(dump_lock);
5308 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5309 struct scx_dump_ctx dctx = {
5310 .kind = ei->kind,
5311 .exit_code = ei->exit_code,
5312 .reason = ei->reason,
5313 .at_ns = ktime_get_ns(),
5314 .at_jiffies = jiffies,
5315 };
5316 struct seq_buf s;
5317 unsigned long flags;
5318 char *buf;
5319 int cpu;
5320
5321 spin_lock_irqsave(&dump_lock, flags);
5322
5323 seq_buf_init(&s, ei->dump, dump_len);
5324
5325 if (ei->kind == SCX_EXIT_NONE) {
5326 dump_line(&s, "Debug dump triggered by %s", ei->reason);
5327 } else {
5328 dump_line(&s, "%s[%d] triggered exit kind %d:",
5329 current->comm, current->pid, ei->kind);
5330 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
5331 dump_newline(&s);
5332 dump_line(&s, "Backtrace:");
5333 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
5334 }
5335
5336 if (SCX_HAS_OP(dump)) {
5337 ops_dump_init(&s, "");
5338 SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
5339 ops_dump_exit();
5340 }
5341
5342 dump_newline(&s);
5343 dump_line(&s, "CPU states");
5344 dump_line(&s, "----------");
5345
5346 for_each_possible_cpu(cpu) {
5347 struct rq *rq = cpu_rq(cpu);
5348 struct rq_flags rf;
5349 struct task_struct *p;
5350 struct seq_buf ns;
5351 size_t avail, used;
5352 bool idle;
5353
5354 rq_lock(rq, &rf);
5355
5356 idle = list_empty(&rq->scx.runnable_list) &&
5357 rq->curr->sched_class == &idle_sched_class;
5358
5359 if (idle && !SCX_HAS_OP(dump_cpu))
5360 goto next;
5361
5362 /*
5363 * We don't yet know whether ops.dump_cpu() will produce output
5364 * and we may want to skip the default CPU dump if it doesn't.
5365 * Use a nested seq_buf to generate the standard dump so that we
5366 * can decide whether to commit later.
5367 */
5368 avail = seq_buf_get_buf(&s, &buf);
5369 seq_buf_init(&ns, buf, avail);
5370
5371 dump_newline(&ns);
5372 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5373 cpu, rq->scx.nr_running, rq->scx.flags,
5374 rq->scx.cpu_released, rq->scx.ops_qseq,
5375 rq->scx.pnt_seq);
5376 dump_line(&ns, " curr=%s[%d] class=%ps",
5377 rq->curr->comm, rq->curr->pid,
5378 rq->curr->sched_class);
5379 if (!cpumask_empty(rq->scx.cpus_to_kick))
5380 dump_line(&ns, " cpus_to_kick : %*pb",
5381 cpumask_pr_args(rq->scx.cpus_to_kick));
5382 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5383 dump_line(&ns, " idle_to_kick : %*pb",
5384 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5385 if (!cpumask_empty(rq->scx.cpus_to_preempt))
5386 dump_line(&ns, " cpus_to_preempt: %*pb",
5387 cpumask_pr_args(rq->scx.cpus_to_preempt));
5388 if (!cpumask_empty(rq->scx.cpus_to_wait))
5389 dump_line(&ns, " cpus_to_wait : %*pb",
5390 cpumask_pr_args(rq->scx.cpus_to_wait));
5391
5392 used = seq_buf_used(&ns);
5393 if (SCX_HAS_OP(dump_cpu)) {
5394 ops_dump_init(&ns, " ");
5395 SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
5396 ops_dump_exit();
5397 }
5398
5399 /*
5400 * If idle && nothing generated by ops.dump_cpu(), there's
5401 * nothing interesting. Skip.
5402 */
5403 if (idle && used == seq_buf_used(&ns))
5404 goto next;
5405
5406 /*
5407 * $s may already have overflowed when $ns was created. If so,
5408 * calling commit on it will trigger BUG.
5409 */
5410 if (avail) {
5411 seq_buf_commit(&s, seq_buf_used(&ns));
5412 if (seq_buf_has_overflowed(&ns))
5413 seq_buf_set_overflow(&s);
5414 }
5415
5416 if (rq->curr->sched_class == &ext_sched_class)
5417 scx_dump_task(&s, &dctx, rq->curr, '*');
5418
5419 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5420 scx_dump_task(&s, &dctx, p, ' ');
5421 next:
5422 rq_unlock(rq, &rf);
5423 }
5424
5425 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5426 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5427 trunc_marker, sizeof(trunc_marker));
5428
5429 spin_unlock_irqrestore(&dump_lock, flags);
5430}
5431
5432static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
5433{
5434 struct scx_exit_info *ei = scx_exit_info;
5435
5436 if (ei->kind >= SCX_EXIT_ERROR)
5437 scx_dump_state(ei, scx_ops.exit_dump_len);
5438
5439 schedule_scx_ops_disable_work();
5440}
5441
5442static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
5443
5444static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
5445 s64 exit_code,
5446 const char *fmt, ...)
5447{
5448 struct scx_exit_info *ei = scx_exit_info;
5449 int none = SCX_EXIT_NONE;
5450 va_list args;
5451
5452 if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
5453 return;
5454
5455 ei->exit_code = exit_code;
5456#ifdef CONFIG_STACKTRACE
5457 if (kind >= SCX_EXIT_ERROR)
5458 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5459#endif
5460 va_start(args, fmt);
5461 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5462 va_end(args);
5463
5464 /*
5465 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5466 * in scx_ops_disable_workfn().
5467 */
5468 ei->kind = kind;
5469 ei->reason = scx_exit_reason(ei->kind);
5470
5471 irq_work_queue(&scx_ops_error_irq_work);
5472}
5473
5474static struct kthread_worker *scx_create_rt_helper(const char *name)
5475{
5476 struct kthread_worker *helper;
5477
5478 helper = kthread_create_worker(0, name);
5479 if (helper)
5480 sched_set_fifo(helper->task);
5481 return helper;
5482}
5483
5484static void check_hotplug_seq(const struct sched_ext_ops *ops)
5485{
5486 unsigned long long global_hotplug_seq;
5487
5488 /*
5489 * If a hotplug event has occurred between when a scheduler was
5490 * initialized, and when we were able to attach, exit and notify user
5491 * space about it.
5492 */
5493 if (ops->hotplug_seq) {
5494 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5495 if (ops->hotplug_seq != global_hotplug_seq) {
5496 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5497 "expected hotplug seq %llu did not match actual %llu",
5498 ops->hotplug_seq, global_hotplug_seq);
5499 }
5500 }
5501}
5502
5503static int validate_ops(const struct sched_ext_ops *ops)
5504{
5505 /*
5506 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5507 * ops.enqueue() callback isn't implemented.
5508 */
5509 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5510 scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5511 return -EINVAL;
5512 }
5513
5514 return 0;
5515}
5516
5517static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5518{
5519 struct scx_task_iter sti;
5520 struct task_struct *p;
5521 unsigned long timeout;
5522 int i, cpu, node, ret;
5523
5524 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5525 cpu_possible_mask)) {
5526 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5527 return -EINVAL;
5528 }
5529
5530 mutex_lock(&scx_ops_enable_mutex);
5531
5532 if (!scx_ops_helper) {
5533 WRITE_ONCE(scx_ops_helper,
5534 scx_create_rt_helper("sched_ext_ops_helper"));
5535 if (!scx_ops_helper) {
5536 ret = -ENOMEM;
5537 goto err_unlock;
5538 }
5539 }
5540
5541 if (!global_dsqs) {
5542 struct scx_dispatch_q **dsqs;
5543
5544 dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
5545 if (!dsqs) {
5546 ret = -ENOMEM;
5547 goto err_unlock;
5548 }
5549
5550 for_each_node_state(node, N_POSSIBLE) {
5551 struct scx_dispatch_q *dsq;
5552
5553 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5554 if (!dsq) {
5555 for_each_node_state(node, N_POSSIBLE)
5556 kfree(dsqs[node]);
5557 kfree(dsqs);
5558 ret = -ENOMEM;
5559 goto err_unlock;
5560 }
5561
5562 init_dsq(dsq, SCX_DSQ_GLOBAL);
5563 dsqs[node] = dsq;
5564 }
5565
5566 global_dsqs = dsqs;
5567 }
5568
5569 if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5570 ret = -EBUSY;
5571 goto err_unlock;
5572 }
5573
5574 scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5575 if (!scx_root_kobj) {
5576 ret = -ENOMEM;
5577 goto err_unlock;
5578 }
5579
5580 scx_root_kobj->kset = scx_kset;
5581 ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5582 if (ret < 0)
5583 goto err;
5584
5585 scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5586 if (!scx_exit_info) {
5587 ret = -ENOMEM;
5588 goto err_del;
5589 }
5590
5591 /*
5592 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5593 * disable path. Failure triggers full disabling from here on.
5594 */
5595 scx_ops = *ops;
5596
5597 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5598 SCX_OPS_DISABLED);
5599
5600 atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5601 scx_warned_zero_slice = false;
5602
5603 atomic_long_set(&scx_nr_rejected, 0);
5604
5605 for_each_possible_cpu(cpu)
5606 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5607
5608 /*
5609 * Keep CPUs stable during enable so that the BPF scheduler can track
5610 * online CPUs by watching ->on/offline_cpu() after ->init().
5611 */
5612 cpus_read_lock();
5613
5614 if (scx_ops.init) {
5615 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5616 if (ret) {
5617 ret = ops_sanitize_err("init", ret);
5618 cpus_read_unlock();
5619 scx_ops_error("ops.init() failed (%d)", ret);
5620 goto err_disable;
5621 }
5622 }
5623
5624 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5625 if (((void (**)(void))ops)[i])
5626 static_branch_enable_cpuslocked(&scx_has_op[i]);
5627
5628 check_hotplug_seq(ops);
5629#ifdef CONFIG_SMP
5630 update_selcpu_topology();
5631#endif
5632 cpus_read_unlock();
5633
5634 ret = validate_ops(ops);
5635 if (ret)
5636 goto err_disable;
5637
5638 WARN_ON_ONCE(scx_dsp_ctx);
5639 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5640 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5641 scx_dsp_max_batch),
5642 __alignof__(struct scx_dsp_ctx));
5643 if (!scx_dsp_ctx) {
5644 ret = -ENOMEM;
5645 goto err_disable;
5646 }
5647
5648 if (ops->timeout_ms)
5649 timeout = msecs_to_jiffies(ops->timeout_ms);
5650 else
5651 timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5652
5653 WRITE_ONCE(scx_watchdog_timeout, timeout);
5654 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5655 queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5656 scx_watchdog_timeout / 2);
5657
5658 /*
5659 * Once __scx_ops_enabled is set, %current can be switched to SCX
5660 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5661 * userspace scheduling) may not function correctly before all tasks are
5662 * switched. Init in bypass mode to guarantee forward progress.
5663 */
5664 scx_ops_bypass(true);
5665
5666 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5667 if (((void (**)(void))ops)[i])
5668 static_branch_enable(&scx_has_op[i]);
5669
5670 if (ops->flags & SCX_OPS_ENQ_LAST)
5671 static_branch_enable(&scx_ops_enq_last);
5672
5673 if (ops->flags & SCX_OPS_ENQ_EXITING)
5674 static_branch_enable(&scx_ops_enq_exiting);
5675 if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5676 static_branch_enable(&scx_ops_cpu_preempt);
5677
5678 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5679 reset_idle_masks();
5680 static_branch_enable(&scx_builtin_idle_enabled);
5681 } else {
5682 static_branch_disable(&scx_builtin_idle_enabled);
5683 }
5684
5685 /*
5686 * Lock out forks, cgroup on/offlining and moves before opening the
5687 * floodgate so that they don't wander into the operations prematurely.
5688 */
5689 percpu_down_write(&scx_fork_rwsem);
5690
5691 WARN_ON_ONCE(scx_ops_init_task_enabled);
5692 scx_ops_init_task_enabled = true;
5693
5694 /*
5695 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5696 * preventing new tasks from being added. No need to exclude tasks
5697 * leaving as sched_ext_free() can handle both prepped and enabled
5698 * tasks. Prep all tasks first and then enable them with preemption
5699 * disabled.
5700 *
5701 * All cgroups should be initialized before scx_ops_init_task() so that
5702 * the BPF scheduler can reliably track each task's cgroup membership
5703 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5704 * migrations while tasks are being initialized so that
5705 * scx_cgroup_can_attach() never sees uninitialized tasks.
5706 */
5707 scx_cgroup_lock();
5708 ret = scx_cgroup_init();
5709 if (ret)
5710 goto err_disable_unlock_all;
5711
5712 scx_task_iter_start(&sti);
5713 while ((p = scx_task_iter_next_locked(&sti))) {
5714 /*
5715 * @p may already be dead, have lost all its usages counts and
5716 * be waiting for RCU grace period before being freed. @p can't
5717 * be initialized for SCX in such cases and should be ignored.
5718 */
5719 if (!tryget_task_struct(p))
5720 continue;
5721
5722 scx_task_iter_unlock(&sti);
5723
5724 ret = scx_ops_init_task(p, task_group(p), false);
5725 if (ret) {
5726 put_task_struct(p);
5727 scx_task_iter_relock(&sti);
5728 scx_task_iter_stop(&sti);
5729 scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5730 ret, p->comm, p->pid);
5731 goto err_disable_unlock_all;
5732 }
5733
5734 scx_set_task_state(p, SCX_TASK_READY);
5735
5736 put_task_struct(p);
5737 scx_task_iter_relock(&sti);
5738 }
5739 scx_task_iter_stop(&sti);
5740 scx_cgroup_unlock();
5741 percpu_up_write(&scx_fork_rwsem);
5742
5743 /*
5744 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5745 * all eligible tasks.
5746 */
5747 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5748 static_branch_enable(&__scx_ops_enabled);
5749
5750 /*
5751 * We're fully committed and can't fail. The task READY -> ENABLED
5752 * transitions here are synchronized against sched_ext_free() through
5753 * scx_tasks_lock.
5754 */
5755 percpu_down_write(&scx_fork_rwsem);
5756 scx_task_iter_start(&sti);
5757 while ((p = scx_task_iter_next_locked(&sti))) {
5758 const struct sched_class *old_class = p->sched_class;
5759 const struct sched_class *new_class =
5760 __setscheduler_class(p->policy, p->prio);
5761 struct sched_enq_and_set_ctx ctx;
5762
5763 if (old_class != new_class && p->se.sched_delayed)
5764 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5765
5766 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5767
5768 p->scx.slice = SCX_SLICE_DFL;
5769 p->sched_class = new_class;
5770 check_class_changing(task_rq(p), p, old_class);
5771
5772 sched_enq_and_set_task(&ctx);
5773
5774 check_class_changed(task_rq(p), p, old_class, p->prio);
5775 }
5776 scx_task_iter_stop(&sti);
5777 percpu_up_write(&scx_fork_rwsem);
5778
5779 scx_ops_bypass(false);
5780
5781 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5782 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5783 goto err_disable;
5784 }
5785
5786 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5787 static_branch_enable(&__scx_switched_all);
5788
5789 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5790 scx_ops.name, scx_switched_all() ? "" : " (partial)");
5791 kobject_uevent(scx_root_kobj, KOBJ_ADD);
5792 mutex_unlock(&scx_ops_enable_mutex);
5793
5794 atomic_long_inc(&scx_enable_seq);
5795
5796 return 0;
5797
5798err_del:
5799 kobject_del(scx_root_kobj);
5800err:
5801 kobject_put(scx_root_kobj);
5802 scx_root_kobj = NULL;
5803 if (scx_exit_info) {
5804 free_exit_info(scx_exit_info);
5805 scx_exit_info = NULL;
5806 }
5807err_unlock:
5808 mutex_unlock(&scx_ops_enable_mutex);
5809 return ret;
5810
5811err_disable_unlock_all:
5812 scx_cgroup_unlock();
5813 percpu_up_write(&scx_fork_rwsem);
5814 scx_ops_bypass(false);
5815err_disable:
5816 mutex_unlock(&scx_ops_enable_mutex);
5817 /*
5818 * Returning an error code here would not pass all the error information
5819 * to userspace. Record errno using scx_ops_error() for cases
5820 * scx_ops_error() wasn't already invoked and exit indicating success so
5821 * that the error is notified through ops.exit() with all the details.
5822 *
5823 * Flush scx_ops_disable_work to ensure that error is reported before
5824 * init completion.
5825 */
5826 scx_ops_error("scx_ops_enable() failed (%d)", ret);
5827 kthread_flush_work(&scx_ops_disable_work);
5828 return 0;
5829}
5830
5831
5832/********************************************************************************
5833 * bpf_struct_ops plumbing.
5834 */
5835#include <linux/bpf_verifier.h>
5836#include <linux/bpf.h>
5837#include <linux/btf.h>
5838
5839static const struct btf_type *task_struct_type;
5840
5841static bool bpf_scx_is_valid_access(int off, int size,
5842 enum bpf_access_type type,
5843 const struct bpf_prog *prog,
5844 struct bpf_insn_access_aux *info)
5845{
5846 if (type != BPF_READ)
5847 return false;
5848 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5849 return false;
5850 if (off % size != 0)
5851 return false;
5852
5853 return btf_ctx_access(off, size, type, prog, info);
5854}
5855
5856static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5857 const struct bpf_reg_state *reg, int off,
5858 int size)
5859{
5860 const struct btf_type *t;
5861
5862 t = btf_type_by_id(reg->btf, reg->btf_id);
5863 if (t == task_struct_type) {
5864 if (off >= offsetof(struct task_struct, scx.slice) &&
5865 off + size <= offsetofend(struct task_struct, scx.slice))
5866 return SCALAR_VALUE;
5867 if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5868 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5869 return SCALAR_VALUE;
5870 if (off >= offsetof(struct task_struct, scx.disallow) &&
5871 off + size <= offsetofend(struct task_struct, scx.disallow))
5872 return SCALAR_VALUE;
5873 }
5874
5875 return -EACCES;
5876}
5877
5878static const struct bpf_func_proto *
5879bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5880{
5881 switch (func_id) {
5882 case BPF_FUNC_task_storage_get:
5883 return &bpf_task_storage_get_proto;
5884 case BPF_FUNC_task_storage_delete:
5885 return &bpf_task_storage_delete_proto;
5886 default:
5887 return bpf_base_func_proto(func_id, prog);
5888 }
5889}
5890
5891static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5892 .get_func_proto = bpf_scx_get_func_proto,
5893 .is_valid_access = bpf_scx_is_valid_access,
5894 .btf_struct_access = bpf_scx_btf_struct_access,
5895};
5896
5897static int bpf_scx_init_member(const struct btf_type *t,
5898 const struct btf_member *member,
5899 void *kdata, const void *udata)
5900{
5901 const struct sched_ext_ops *uops = udata;
5902 struct sched_ext_ops *ops = kdata;
5903 u32 moff = __btf_member_bit_offset(t, member) / 8;
5904 int ret;
5905
5906 switch (moff) {
5907 case offsetof(struct sched_ext_ops, dispatch_max_batch):
5908 if (*(u32 *)(udata + moff) > INT_MAX)
5909 return -E2BIG;
5910 ops->dispatch_max_batch = *(u32 *)(udata + moff);
5911 return 1;
5912 case offsetof(struct sched_ext_ops, flags):
5913 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5914 return -EINVAL;
5915 ops->flags = *(u64 *)(udata + moff);
5916 return 1;
5917 case offsetof(struct sched_ext_ops, name):
5918 ret = bpf_obj_name_cpy(ops->name, uops->name,
5919 sizeof(ops->name));
5920 if (ret < 0)
5921 return ret;
5922 if (ret == 0)
5923 return -EINVAL;
5924 return 1;
5925 case offsetof(struct sched_ext_ops, timeout_ms):
5926 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5927 SCX_WATCHDOG_MAX_TIMEOUT)
5928 return -E2BIG;
5929 ops->timeout_ms = *(u32 *)(udata + moff);
5930 return 1;
5931 case offsetof(struct sched_ext_ops, exit_dump_len):
5932 ops->exit_dump_len =
5933 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5934 return 1;
5935 case offsetof(struct sched_ext_ops, hotplug_seq):
5936 ops->hotplug_seq = *(u64 *)(udata + moff);
5937 return 1;
5938 }
5939
5940 return 0;
5941}
5942
5943static int bpf_scx_check_member(const struct btf_type *t,
5944 const struct btf_member *member,
5945 const struct bpf_prog *prog)
5946{
5947 u32 moff = __btf_member_bit_offset(t, member) / 8;
5948
5949 switch (moff) {
5950 case offsetof(struct sched_ext_ops, init_task):
5951#ifdef CONFIG_EXT_GROUP_SCHED
5952 case offsetof(struct sched_ext_ops, cgroup_init):
5953 case offsetof(struct sched_ext_ops, cgroup_exit):
5954 case offsetof(struct sched_ext_ops, cgroup_prep_move):
5955#endif
5956 case offsetof(struct sched_ext_ops, cpu_online):
5957 case offsetof(struct sched_ext_ops, cpu_offline):
5958 case offsetof(struct sched_ext_ops, init):
5959 case offsetof(struct sched_ext_ops, exit):
5960 break;
5961 default:
5962 if (prog->sleepable)
5963 return -EINVAL;
5964 }
5965
5966 return 0;
5967}
5968
5969static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5970{
5971 return scx_ops_enable(kdata, link);
5972}
5973
5974static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5975{
5976 scx_ops_disable(SCX_EXIT_UNREG);
5977 kthread_flush_work(&scx_ops_disable_work);
5978}
5979
5980static int bpf_scx_init(struct btf *btf)
5981{
5982 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5983
5984 return 0;
5985}
5986
5987static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5988{
5989 /*
5990 * sched_ext does not support updating the actively-loaded BPF
5991 * scheduler, as registering a BPF scheduler can always fail if the
5992 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5993 * etc. Similarly, we can always race with unregistration happening
5994 * elsewhere, such as with sysrq.
5995 */
5996 return -EOPNOTSUPP;
5997}
5998
5999static int bpf_scx_validate(void *kdata)
6000{
6001 return 0;
6002}
6003
6004static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
6005static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
6006static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
6007static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
6008static void sched_ext_ops__tick(struct task_struct *p) {}
6009static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
6010static void sched_ext_ops__running(struct task_struct *p) {}
6011static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
6012static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
6013static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
6014static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
6015static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
6016static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
6017static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
6018static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
6019static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
6020static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
6021static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
6022static void sched_ext_ops__enable(struct task_struct *p) {}
6023static void sched_ext_ops__disable(struct task_struct *p) {}
6024#ifdef CONFIG_EXT_GROUP_SCHED
6025static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
6026static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
6027static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
6028static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6029static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6030static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
6031#endif
6032static void sched_ext_ops__cpu_online(s32 cpu) {}
6033static void sched_ext_ops__cpu_offline(s32 cpu) {}
6034static s32 sched_ext_ops__init(void) { return -EINVAL; }
6035static void sched_ext_ops__exit(struct scx_exit_info *info) {}
6036static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
6037static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
6038static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
6039
6040static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
6041 .select_cpu = sched_ext_ops__select_cpu,
6042 .enqueue = sched_ext_ops__enqueue,
6043 .dequeue = sched_ext_ops__dequeue,
6044 .dispatch = sched_ext_ops__dispatch,
6045 .tick = sched_ext_ops__tick,
6046 .runnable = sched_ext_ops__runnable,
6047 .running = sched_ext_ops__running,
6048 .stopping = sched_ext_ops__stopping,
6049 .quiescent = sched_ext_ops__quiescent,
6050 .yield = sched_ext_ops__yield,
6051 .core_sched_before = sched_ext_ops__core_sched_before,
6052 .set_weight = sched_ext_ops__set_weight,
6053 .set_cpumask = sched_ext_ops__set_cpumask,
6054 .update_idle = sched_ext_ops__update_idle,
6055 .cpu_acquire = sched_ext_ops__cpu_acquire,
6056 .cpu_release = sched_ext_ops__cpu_release,
6057 .init_task = sched_ext_ops__init_task,
6058 .exit_task = sched_ext_ops__exit_task,
6059 .enable = sched_ext_ops__enable,
6060 .disable = sched_ext_ops__disable,
6061#ifdef CONFIG_EXT_GROUP_SCHED
6062 .cgroup_init = sched_ext_ops__cgroup_init,
6063 .cgroup_exit = sched_ext_ops__cgroup_exit,
6064 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move,
6065 .cgroup_move = sched_ext_ops__cgroup_move,
6066 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move,
6067 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight,
6068#endif
6069 .cpu_online = sched_ext_ops__cpu_online,
6070 .cpu_offline = sched_ext_ops__cpu_offline,
6071 .init = sched_ext_ops__init,
6072 .exit = sched_ext_ops__exit,
6073 .dump = sched_ext_ops__dump,
6074 .dump_cpu = sched_ext_ops__dump_cpu,
6075 .dump_task = sched_ext_ops__dump_task,
6076};
6077
6078static struct bpf_struct_ops bpf_sched_ext_ops = {
6079 .verifier_ops = &bpf_scx_verifier_ops,
6080 .reg = bpf_scx_reg,
6081 .unreg = bpf_scx_unreg,
6082 .check_member = bpf_scx_check_member,
6083 .init_member = bpf_scx_init_member,
6084 .init = bpf_scx_init,
6085 .update = bpf_scx_update,
6086 .validate = bpf_scx_validate,
6087 .name = "sched_ext_ops",
6088 .owner = THIS_MODULE,
6089 .cfi_stubs = &__bpf_ops_sched_ext_ops
6090};
6091
6092
6093/********************************************************************************
6094 * System integration and init.
6095 */
6096
6097static void sysrq_handle_sched_ext_reset(u8 key)
6098{
6099 if (scx_ops_helper)
6100 scx_ops_disable(SCX_EXIT_SYSRQ);
6101 else
6102 pr_info("sched_ext: BPF scheduler not yet used\n");
6103}
6104
6105static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
6106 .handler = sysrq_handle_sched_ext_reset,
6107 .help_msg = "reset-sched-ext(S)",
6108 .action_msg = "Disable sched_ext and revert all tasks to CFS",
6109 .enable_mask = SYSRQ_ENABLE_RTNICE,
6110};
6111
6112static void sysrq_handle_sched_ext_dump(u8 key)
6113{
6114 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
6115
6116 if (scx_enabled())
6117 scx_dump_state(&ei, 0);
6118}
6119
6120static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
6121 .handler = sysrq_handle_sched_ext_dump,
6122 .help_msg = "dump-sched-ext(D)",
6123 .action_msg = "Trigger sched_ext debug dump",
6124 .enable_mask = SYSRQ_ENABLE_RTNICE,
6125};
6126
6127static bool can_skip_idle_kick(struct rq *rq)
6128{
6129 lockdep_assert_rq_held(rq);
6130
6131 /*
6132 * We can skip idle kicking if @rq is going to go through at least one
6133 * full SCX scheduling cycle before going idle. Just checking whether
6134 * curr is not idle is insufficient because we could be racing
6135 * balance_one() trying to pull the next task from a remote rq, which
6136 * may fail, and @rq may become idle afterwards.
6137 *
6138 * The race window is small and we don't and can't guarantee that @rq is
6139 * only kicked while idle anyway. Skip only when sure.
6140 */
6141 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
6142}
6143
6144static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
6145{
6146 struct rq *rq = cpu_rq(cpu);
6147 struct scx_rq *this_scx = &this_rq->scx;
6148 bool should_wait = false;
6149 unsigned long flags;
6150
6151 raw_spin_rq_lock_irqsave(rq, flags);
6152
6153 /*
6154 * During CPU hotplug, a CPU may depend on kicking itself to make
6155 * forward progress. Allow kicking self regardless of online state.
6156 */
6157 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
6158 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
6159 if (rq->curr->sched_class == &ext_sched_class)
6160 rq->curr->scx.slice = 0;
6161 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6162 }
6163
6164 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
6165 pseqs[cpu] = rq->scx.pnt_seq;
6166 should_wait = true;
6167 }
6168
6169 resched_curr(rq);
6170 } else {
6171 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6172 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6173 }
6174
6175 raw_spin_rq_unlock_irqrestore(rq, flags);
6176
6177 return should_wait;
6178}
6179
6180static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
6181{
6182 struct rq *rq = cpu_rq(cpu);
6183 unsigned long flags;
6184
6185 raw_spin_rq_lock_irqsave(rq, flags);
6186
6187 if (!can_skip_idle_kick(rq) &&
6188 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
6189 resched_curr(rq);
6190
6191 raw_spin_rq_unlock_irqrestore(rq, flags);
6192}
6193
6194static void kick_cpus_irq_workfn(struct irq_work *irq_work)
6195{
6196 struct rq *this_rq = this_rq();
6197 struct scx_rq *this_scx = &this_rq->scx;
6198 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
6199 bool should_wait = false;
6200 s32 cpu;
6201
6202 for_each_cpu(cpu, this_scx->cpus_to_kick) {
6203 should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
6204 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
6205 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6206 }
6207
6208 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
6209 kick_one_cpu_if_idle(cpu, this_rq);
6210 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6211 }
6212
6213 if (!should_wait)
6214 return;
6215
6216 for_each_cpu(cpu, this_scx->cpus_to_wait) {
6217 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
6218
6219 if (cpu != cpu_of(this_rq)) {
6220 /*
6221 * Pairs with smp_store_release() issued by this CPU in
6222 * switch_class() on the resched path.
6223 *
6224 * We busy-wait here to guarantee that no other task can
6225 * be scheduled on our core before the target CPU has
6226 * entered the resched path.
6227 */
6228 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
6229 cpu_relax();
6230 }
6231
6232 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6233 }
6234}
6235
6236/**
6237 * print_scx_info - print out sched_ext scheduler state
6238 * @log_lvl: the log level to use when printing
6239 * @p: target task
6240 *
6241 * If a sched_ext scheduler is enabled, print the name and state of the
6242 * scheduler. If @p is on sched_ext, print further information about the task.
6243 *
6244 * This function can be safely called on any task as long as the task_struct
6245 * itself is accessible. While safe, this function isn't synchronized and may
6246 * print out mixups or garbages of limited length.
6247 */
6248void print_scx_info(const char *log_lvl, struct task_struct *p)
6249{
6250 enum scx_ops_enable_state state = scx_ops_enable_state();
6251 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
6252 char runnable_at_buf[22] = "?";
6253 struct sched_class *class;
6254 unsigned long runnable_at;
6255
6256 if (state == SCX_OPS_DISABLED)
6257 return;
6258
6259 /*
6260 * Carefully check if the task was running on sched_ext, and then
6261 * carefully copy the time it's been runnable, and its state.
6262 */
6263 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6264 class != &ext_sched_class) {
6265 printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
6266 scx_ops_enable_state_str[state], all);
6267 return;
6268 }
6269
6270 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6271 sizeof(runnable_at)))
6272 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6273 jiffies_delta_msecs(runnable_at, jiffies));
6274
6275 /* print everything onto one line to conserve console space */
6276 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6277 log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
6278 runnable_at_buf);
6279}
6280
6281static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6282{
6283 /*
6284 * SCX schedulers often have userspace components which are sometimes
6285 * involved in critial scheduling paths. PM operations involve freezing
6286 * userspace which can lead to scheduling misbehaviors including stalls.
6287 * Let's bypass while PM operations are in progress.
6288 */
6289 switch (event) {
6290 case PM_HIBERNATION_PREPARE:
6291 case PM_SUSPEND_PREPARE:
6292 case PM_RESTORE_PREPARE:
6293 scx_ops_bypass(true);
6294 break;
6295 case PM_POST_HIBERNATION:
6296 case PM_POST_SUSPEND:
6297 case PM_POST_RESTORE:
6298 scx_ops_bypass(false);
6299 break;
6300 }
6301
6302 return NOTIFY_OK;
6303}
6304
6305static struct notifier_block scx_pm_notifier = {
6306 .notifier_call = scx_pm_handler,
6307};
6308
6309void __init init_sched_ext_class(void)
6310{
6311 s32 cpu, v;
6312
6313 /*
6314 * The following is to prevent the compiler from optimizing out the enum
6315 * definitions so that BPF scheduler implementations can use them
6316 * through the generated vmlinux.h.
6317 */
6318 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6319 SCX_TG_ONLINE);
6320
6321 BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
6322#ifdef CONFIG_SMP
6323 BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
6324 BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
6325#endif
6326 scx_kick_cpus_pnt_seqs =
6327 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6328 __alignof__(scx_kick_cpus_pnt_seqs[0]));
6329 BUG_ON(!scx_kick_cpus_pnt_seqs);
6330
6331 for_each_possible_cpu(cpu) {
6332 struct rq *rq = cpu_rq(cpu);
6333
6334 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6335 INIT_LIST_HEAD(&rq->scx.runnable_list);
6336 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6337
6338 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
6339 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
6340 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
6341 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
6342 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6343 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6344
6345 if (cpu_online(cpu))
6346 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6347 }
6348
6349 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6350 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6351 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6352}
6353
6354
6355/********************************************************************************
6356 * Helpers that can be called from the BPF scheduler.
6357 */
6358#include <linux/btf_ids.h>
6359
6360__bpf_kfunc_start_defs();
6361
6362/**
6363 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
6364 * @p: task_struct to select a CPU for
6365 * @prev_cpu: CPU @p was on previously
6366 * @wake_flags: %SCX_WAKE_* flags
6367 * @is_idle: out parameter indicating whether the returned CPU is idle
6368 *
6369 * Can only be called from ops.select_cpu() if the built-in CPU selection is
6370 * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
6371 * @p, @prev_cpu and @wake_flags match ops.select_cpu().
6372 *
6373 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
6374 * currently idle and thus a good candidate for direct dispatching.
6375 */
6376__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
6377 u64 wake_flags, bool *is_idle)
6378{
6379 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6380 scx_ops_error("built-in idle tracking is disabled");
6381 goto prev_cpu;
6382 }
6383
6384 if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
6385 goto prev_cpu;
6386
6387#ifdef CONFIG_SMP
6388 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
6389#endif
6390
6391prev_cpu:
6392 *is_idle = false;
6393 return prev_cpu;
6394}
6395
6396__bpf_kfunc_end_defs();
6397
6398BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
6399BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
6400BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
6401
6402static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
6403 .owner = THIS_MODULE,
6404 .set = &scx_kfunc_ids_select_cpu,
6405};
6406
6407static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6408{
6409 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6410 return false;
6411
6412 lockdep_assert_irqs_disabled();
6413
6414 if (unlikely(!p)) {
6415 scx_ops_error("called with NULL task");
6416 return false;
6417 }
6418
6419 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6420 scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
6421 return false;
6422 }
6423
6424 return true;
6425}
6426
6427static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6428 u64 enq_flags)
6429{
6430 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6431 struct task_struct *ddsp_task;
6432
6433 ddsp_task = __this_cpu_read(direct_dispatch_task);
6434 if (ddsp_task) {
6435 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6436 return;
6437 }
6438
6439 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6440 scx_ops_error("dispatch buffer overflow");
6441 return;
6442 }
6443
6444 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6445 .task = p,
6446 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6447 .dsq_id = dsq_id,
6448 .enq_flags = enq_flags,
6449 };
6450}
6451
6452__bpf_kfunc_start_defs();
6453
6454/**
6455 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6456 * @p: task_struct to insert
6457 * @dsq_id: DSQ to insert into
6458 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6459 * @enq_flags: SCX_ENQ_*
6460 *
6461 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6462 * call this function spuriously. Can be called from ops.enqueue(),
6463 * ops.select_cpu(), and ops.dispatch().
6464 *
6465 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6466 * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
6467 * used to target the local DSQ of a CPU other than the enqueueing one. Use
6468 * ops.select_cpu() to be on the target CPU in the first place.
6469 *
6470 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6471 * will be directly inserted into the corresponding dispatch queue after
6472 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6473 * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6474 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6475 * task is inserted.
6476 *
6477 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6478 * and this function can be called upto ops.dispatch_max_batch times to insert
6479 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6480 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6481 *
6482 * This function doesn't have any locking restrictions and may be called under
6483 * BPF locks (in the future when BPF introduces more flexible locking).
6484 *
6485 * @p is allowed to run for @slice. The scheduling path is triggered on slice
6486 * exhaustion. If zero, the current residual slice is maintained. If
6487 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6488 * scx_bpf_kick_cpu() to trigger scheduling.
6489 */
6490__bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6491 u64 enq_flags)
6492{
6493 if (!scx_dsq_insert_preamble(p, enq_flags))
6494 return;
6495
6496 if (slice)
6497 p->scx.slice = slice;
6498 else
6499 p->scx.slice = p->scx.slice ?: 1;
6500
6501 scx_dsq_insert_commit(p, dsq_id, enq_flags);
6502}
6503
6504/* for backward compatibility, will be removed in v6.15 */
6505__bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6506 u64 enq_flags)
6507{
6508 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6509 scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6510}
6511
6512/**
6513 * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6514 * @p: task_struct to insert
6515 * @dsq_id: DSQ to insert into
6516 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6517 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6518 * @enq_flags: SCX_ENQ_*
6519 *
6520 * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6521 * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6522 * are identical to scx_bpf_dsq_insert().
6523 *
6524 * @vtime ordering is according to time_before64() which considers wrapping. A
6525 * numerically larger vtime may indicate an earlier position in the ordering and
6526 * vice-versa.
6527 *
6528 * A DSQ can only be used as a FIFO or priority queue at any given time and this
6529 * function must not be called on a DSQ which already has one or more FIFO tasks
6530 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6531 * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6532 */
6533__bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6534 u64 slice, u64 vtime, u64 enq_flags)
6535{
6536 if (!scx_dsq_insert_preamble(p, enq_flags))
6537 return;
6538
6539 if (slice)
6540 p->scx.slice = slice;
6541 else
6542 p->scx.slice = p->scx.slice ?: 1;
6543
6544 p->scx.dsq_vtime = vtime;
6545
6546 scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6547}
6548
6549/* for backward compatibility, will be removed in v6.15 */
6550__bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6551 u64 slice, u64 vtime, u64 enq_flags)
6552{
6553 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6554 scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6555}
6556
6557__bpf_kfunc_end_defs();
6558
6559BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6560BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6561BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6562BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6563BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6564BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6565
6566static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6567 .owner = THIS_MODULE,
6568 .set = &scx_kfunc_ids_enqueue_dispatch,
6569};
6570
6571static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6572 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6573{
6574 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6575 struct rq *this_rq, *src_rq, *locked_rq;
6576 bool dispatched = false;
6577 bool in_balance;
6578 unsigned long flags;
6579
6580 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6581 return false;
6582
6583 /*
6584 * Can be called from either ops.dispatch() locking this_rq() or any
6585 * context where no rq lock is held. If latter, lock @p's task_rq which
6586 * we'll likely need anyway.
6587 */
6588 src_rq = task_rq(p);
6589
6590 local_irq_save(flags);
6591 this_rq = this_rq();
6592 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6593
6594 if (in_balance) {
6595 if (this_rq != src_rq) {
6596 raw_spin_rq_unlock(this_rq);
6597 raw_spin_rq_lock(src_rq);
6598 }
6599 } else {
6600 raw_spin_rq_lock(src_rq);
6601 }
6602
6603 /*
6604 * If the BPF scheduler keeps calling this function repeatedly, it can
6605 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6606 * breather if necessary.
6607 */
6608 scx_ops_breather(src_rq);
6609
6610 locked_rq = src_rq;
6611 raw_spin_lock(&src_dsq->lock);
6612
6613 /*
6614 * Did someone else get to it? @p could have already left $src_dsq, got
6615 * re-enqueud, or be in the process of being consumed by someone else.
6616 */
6617 if (unlikely(p->scx.dsq != src_dsq ||
6618 u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6619 p->scx.holding_cpu >= 0) ||
6620 WARN_ON_ONCE(src_rq != task_rq(p))) {
6621 raw_spin_unlock(&src_dsq->lock);
6622 goto out;
6623 }
6624
6625 /* @p is still on $src_dsq and stable, determine the destination */
6626 dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6627
6628 /*
6629 * Apply vtime and slice updates before moving so that the new time is
6630 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6631 * this is safe as we're locking it.
6632 */
6633 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6634 p->scx.dsq_vtime = kit->vtime;
6635 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6636 p->scx.slice = kit->slice;
6637
6638 /* execute move */
6639 locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
6640 dispatched = true;
6641out:
6642 if (in_balance) {
6643 if (this_rq != locked_rq) {
6644 raw_spin_rq_unlock(locked_rq);
6645 raw_spin_rq_lock(this_rq);
6646 }
6647 } else {
6648 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6649 }
6650
6651 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6652 __SCX_DSQ_ITER_HAS_VTIME);
6653 return dispatched;
6654}
6655
6656__bpf_kfunc_start_defs();
6657
6658/**
6659 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6660 *
6661 * Can only be called from ops.dispatch().
6662 */
6663__bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6664{
6665 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6666 return 0;
6667
6668 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6669}
6670
6671/**
6672 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6673 *
6674 * Cancel the latest dispatch. Can be called multiple times to cancel further
6675 * dispatches. Can only be called from ops.dispatch().
6676 */
6677__bpf_kfunc void scx_bpf_dispatch_cancel(void)
6678{
6679 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6680
6681 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6682 return;
6683
6684 if (dspc->cursor > 0)
6685 dspc->cursor--;
6686 else
6687 scx_ops_error("dispatch buffer underflow");
6688}
6689
6690/**
6691 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6692 * @dsq_id: DSQ to move task from
6693 *
6694 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6695 * local DSQ for execution. Can only be called from ops.dispatch().
6696 *
6697 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6698 * before trying to move from the specified DSQ. It may also grab rq locks and
6699 * thus can't be called under any BPF locks.
6700 *
6701 * Returns %true if a task has been moved, %false if there isn't any task to
6702 * move.
6703 */
6704__bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6705{
6706 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6707 struct scx_dispatch_q *dsq;
6708
6709 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6710 return false;
6711
6712 flush_dispatch_buf(dspc->rq);
6713
6714 dsq = find_user_dsq(dsq_id);
6715 if (unlikely(!dsq)) {
6716 scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6717 return false;
6718 }
6719
6720 if (consume_dispatch_q(dspc->rq, dsq)) {
6721 /*
6722 * A successfully consumed task can be dequeued before it starts
6723 * running while the CPU is trying to migrate other dispatched
6724 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6725 * local DSQ.
6726 */
6727 dspc->nr_tasks++;
6728 return true;
6729 } else {
6730 return false;
6731 }
6732}
6733
6734/* for backward compatibility, will be removed in v6.15 */
6735__bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6736{
6737 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6738 return scx_bpf_dsq_move_to_local(dsq_id);
6739}
6740
6741/**
6742 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6743 * @it__iter: DSQ iterator in progress
6744 * @slice: duration the moved task can run for in nsecs
6745 *
6746 * Override the slice of the next task that will be moved from @it__iter using
6747 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6748 * slice duration is kept.
6749 */
6750__bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6751 u64 slice)
6752{
6753 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6754
6755 kit->slice = slice;
6756 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6757}
6758
6759/* for backward compatibility, will be removed in v6.15 */
6760__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6761 struct bpf_iter_scx_dsq *it__iter, u64 slice)
6762{
6763 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6764 scx_bpf_dsq_move_set_slice(it__iter, slice);
6765}
6766
6767/**
6768 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6769 * @it__iter: DSQ iterator in progress
6770 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6771 *
6772 * Override the vtime of the next task that will be moved from @it__iter using
6773 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6774 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6775 * override is ignored and cleared.
6776 */
6777__bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6778 u64 vtime)
6779{
6780 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6781
6782 kit->vtime = vtime;
6783 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6784}
6785
6786/* for backward compatibility, will be removed in v6.15 */
6787__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6788 struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6789{
6790 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6791 scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6792}
6793
6794/**
6795 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6796 * @it__iter: DSQ iterator in progress
6797 * @p: task to transfer
6798 * @dsq_id: DSQ to move @p to
6799 * @enq_flags: SCX_ENQ_*
6800 *
6801 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6802 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6803 * be the destination.
6804 *
6805 * For the transfer to be successful, @p must still be on the DSQ and have been
6806 * queued before the DSQ iteration started. This function doesn't care whether
6807 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6808 * been queued before the iteration started.
6809 *
6810 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6811 *
6812 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6813 * lock (e.g. BPF timers or SYSCALL programs).
6814 *
6815 * Returns %true if @p has been consumed, %false if @p had already been consumed
6816 * or dequeued.
6817 */
6818__bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6819 struct task_struct *p, u64 dsq_id,
6820 u64 enq_flags)
6821{
6822 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6823 p, dsq_id, enq_flags);
6824}
6825
6826/* for backward compatibility, will be removed in v6.15 */
6827__bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6828 struct task_struct *p, u64 dsq_id,
6829 u64 enq_flags)
6830{
6831 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6832 return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6833}
6834
6835/**
6836 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6837 * @it__iter: DSQ iterator in progress
6838 * @p: task to transfer
6839 * @dsq_id: DSQ to move @p to
6840 * @enq_flags: SCX_ENQ_*
6841 *
6842 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6843 * priority queue of the DSQ specified by @dsq_id. The destination must be a
6844 * user DSQ as only user DSQs support priority queue.
6845 *
6846 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6847 * and scx_bpf_dsq_move_set_vtime() to update.
6848 *
6849 * All other aspects are identical to scx_bpf_dsq_move(). See
6850 * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6851 */
6852__bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6853 struct task_struct *p, u64 dsq_id,
6854 u64 enq_flags)
6855{
6856 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6857 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6858}
6859
6860/* for backward compatibility, will be removed in v6.15 */
6861__bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6862 struct task_struct *p, u64 dsq_id,
6863 u64 enq_flags)
6864{
6865 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6866 return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6867}
6868
6869__bpf_kfunc_end_defs();
6870
6871BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6872BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6873BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6874BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6875BTF_ID_FLAGS(func, scx_bpf_consume)
6876BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6877BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6878BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6879BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6880BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6881BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6882BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6883BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6884BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6885
6886static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6887 .owner = THIS_MODULE,
6888 .set = &scx_kfunc_ids_dispatch,
6889};
6890
6891__bpf_kfunc_start_defs();
6892
6893/**
6894 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6895 *
6896 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6897 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6898 * processed tasks. Can only be called from ops.cpu_release().
6899 */
6900__bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6901{
6902 LIST_HEAD(tasks);
6903 u32 nr_enqueued = 0;
6904 struct rq *rq;
6905 struct task_struct *p, *n;
6906
6907 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6908 return 0;
6909
6910 rq = cpu_rq(smp_processor_id());
6911 lockdep_assert_rq_held(rq);
6912
6913 /*
6914 * The BPF scheduler may choose to dispatch tasks back to
6915 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6916 * first to avoid processing the same tasks repeatedly.
6917 */
6918 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6919 scx.dsq_list.node) {
6920 /*
6921 * If @p is being migrated, @p's current CPU may not agree with
6922 * its allowed CPUs and the migration_cpu_stop is about to
6923 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6924 *
6925 * While racing sched property changes may also dequeue and
6926 * re-enqueue a migrating task while its current CPU and allowed
6927 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6928 * the current local DSQ for running tasks and thus are not
6929 * visible to the BPF scheduler.
6930 */
6931 if (p->migration_pending)
6932 continue;
6933
6934 dispatch_dequeue(rq, p);
6935 list_add_tail(&p->scx.dsq_list.node, &tasks);
6936 }
6937
6938 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6939 list_del_init(&p->scx.dsq_list.node);
6940 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6941 nr_enqueued++;
6942 }
6943
6944 return nr_enqueued;
6945}
6946
6947__bpf_kfunc_end_defs();
6948
6949BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6950BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6951BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6952
6953static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6954 .owner = THIS_MODULE,
6955 .set = &scx_kfunc_ids_cpu_release,
6956};
6957
6958__bpf_kfunc_start_defs();
6959
6960/**
6961 * scx_bpf_create_dsq - Create a custom DSQ
6962 * @dsq_id: DSQ to create
6963 * @node: NUMA node to allocate from
6964 *
6965 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6966 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6967 */
6968__bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6969{
6970 if (unlikely(node >= (int)nr_node_ids ||
6971 (node < 0 && node != NUMA_NO_NODE)))
6972 return -EINVAL;
6973 return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
6974}
6975
6976__bpf_kfunc_end_defs();
6977
6978BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6979BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6980BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6981BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6982BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6983BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6984BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6985BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6986BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6987BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6988BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6989
6990static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6991 .owner = THIS_MODULE,
6992 .set = &scx_kfunc_ids_unlocked,
6993};
6994
6995__bpf_kfunc_start_defs();
6996
6997/**
6998 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6999 * @cpu: cpu to kick
7000 * @flags: %SCX_KICK_* flags
7001 *
7002 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
7003 * trigger rescheduling on a busy CPU. This can be called from any online
7004 * scx_ops operation and the actual kicking is performed asynchronously through
7005 * an irq work.
7006 */
7007__bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
7008{
7009 struct rq *this_rq;
7010 unsigned long irq_flags;
7011
7012 if (!ops_cpu_valid(cpu, NULL))
7013 return;
7014
7015 local_irq_save(irq_flags);
7016
7017 this_rq = this_rq();
7018
7019 /*
7020 * While bypassing for PM ops, IRQ handling may not be online which can
7021 * lead to irq_work_queue() malfunction such as infinite busy wait for
7022 * IRQ status update. Suppress kicking.
7023 */
7024 if (scx_rq_bypassing(this_rq))
7025 goto out;
7026
7027 /*
7028 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
7029 * rq locks. We can probably be smarter and avoid bouncing if called
7030 * from ops which don't hold a rq lock.
7031 */
7032 if (flags & SCX_KICK_IDLE) {
7033 struct rq *target_rq = cpu_rq(cpu);
7034
7035 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
7036 scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
7037
7038 if (raw_spin_rq_trylock(target_rq)) {
7039 if (can_skip_idle_kick(target_rq)) {
7040 raw_spin_rq_unlock(target_rq);
7041 goto out;
7042 }
7043 raw_spin_rq_unlock(target_rq);
7044 }
7045 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
7046 } else {
7047 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
7048
7049 if (flags & SCX_KICK_PREEMPT)
7050 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
7051 if (flags & SCX_KICK_WAIT)
7052 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
7053 }
7054
7055 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
7056out:
7057 local_irq_restore(irq_flags);
7058}
7059
7060/**
7061 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
7062 * @dsq_id: id of the DSQ
7063 *
7064 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
7065 * -%ENOENT is returned.
7066 */
7067__bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
7068{
7069 struct scx_dispatch_q *dsq;
7070 s32 ret;
7071
7072 preempt_disable();
7073
7074 if (dsq_id == SCX_DSQ_LOCAL) {
7075 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
7076 goto out;
7077 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
7078 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
7079
7080 if (ops_cpu_valid(cpu, NULL)) {
7081 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
7082 goto out;
7083 }
7084 } else {
7085 dsq = find_user_dsq(dsq_id);
7086 if (dsq) {
7087 ret = READ_ONCE(dsq->nr);
7088 goto out;
7089 }
7090 }
7091 ret = -ENOENT;
7092out:
7093 preempt_enable();
7094 return ret;
7095}
7096
7097/**
7098 * scx_bpf_destroy_dsq - Destroy a custom DSQ
7099 * @dsq_id: DSQ to destroy
7100 *
7101 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
7102 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
7103 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
7104 * which doesn't exist. Can be called from any online scx_ops operations.
7105 */
7106__bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
7107{
7108 destroy_dsq(dsq_id);
7109}
7110
7111/**
7112 * bpf_iter_scx_dsq_new - Create a DSQ iterator
7113 * @it: iterator to initialize
7114 * @dsq_id: DSQ to iterate
7115 * @flags: %SCX_DSQ_ITER_*
7116 *
7117 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
7118 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
7119 * tasks which are already queued when this function is invoked.
7120 */
7121__bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
7122 u64 flags)
7123{
7124 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7125
7126 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
7127 sizeof(struct bpf_iter_scx_dsq));
7128 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
7129 __alignof__(struct bpf_iter_scx_dsq));
7130
7131 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
7132 return -EINVAL;
7133
7134 kit->dsq = find_user_dsq(dsq_id);
7135 if (!kit->dsq)
7136 return -ENOENT;
7137
7138 INIT_LIST_HEAD(&kit->cursor.node);
7139 kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
7140 kit->cursor.priv = READ_ONCE(kit->dsq->seq);
7141
7142 return 0;
7143}
7144
7145/**
7146 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
7147 * @it: iterator to progress
7148 *
7149 * Return the next task. See bpf_iter_scx_dsq_new().
7150 */
7151__bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
7152{
7153 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7154 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
7155 struct task_struct *p;
7156 unsigned long flags;
7157
7158 if (!kit->dsq)
7159 return NULL;
7160
7161 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7162
7163 if (list_empty(&kit->cursor.node))
7164 p = NULL;
7165 else
7166 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
7167
7168 /*
7169 * Only tasks which were queued before the iteration started are
7170 * visible. This bounds BPF iterations and guarantees that vtime never
7171 * jumps in the other direction while iterating.
7172 */
7173 do {
7174 p = nldsq_next_task(kit->dsq, p, rev);
7175 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
7176
7177 if (p) {
7178 if (rev)
7179 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
7180 else
7181 list_move(&kit->cursor.node, &p->scx.dsq_list.node);
7182 } else {
7183 list_del_init(&kit->cursor.node);
7184 }
7185
7186 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7187
7188 return p;
7189}
7190
7191/**
7192 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
7193 * @it: iterator to destroy
7194 *
7195 * Undo scx_iter_scx_dsq_new().
7196 */
7197__bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
7198{
7199 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7200
7201 if (!kit->dsq)
7202 return;
7203
7204 if (!list_empty(&kit->cursor.node)) {
7205 unsigned long flags;
7206
7207 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7208 list_del_init(&kit->cursor.node);
7209 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7210 }
7211 kit->dsq = NULL;
7212}
7213
7214__bpf_kfunc_end_defs();
7215
7216static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
7217 char *fmt, unsigned long long *data, u32 data__sz)
7218{
7219 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
7220 s32 ret;
7221
7222 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
7223 (data__sz && !data)) {
7224 scx_ops_error("invalid data=%p and data__sz=%u",
7225 (void *)data, data__sz);
7226 return -EINVAL;
7227 }
7228
7229 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
7230 if (ret < 0) {
7231 scx_ops_error("failed to read data fields (%d)", ret);
7232 return ret;
7233 }
7234
7235 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
7236 &bprintf_data);
7237 if (ret < 0) {
7238 scx_ops_error("format preparation failed (%d)", ret);
7239 return ret;
7240 }
7241
7242 ret = bstr_printf(line_buf, line_size, fmt,
7243 bprintf_data.bin_args);
7244 bpf_bprintf_cleanup(&bprintf_data);
7245 if (ret < 0) {
7246 scx_ops_error("(\"%s\", %p, %u) failed to format",
7247 fmt, data, data__sz);
7248 return ret;
7249 }
7250
7251 return ret;
7252}
7253
7254static s32 bstr_format(struct scx_bstr_buf *buf,
7255 char *fmt, unsigned long long *data, u32 data__sz)
7256{
7257 return __bstr_format(buf->data, buf->line, sizeof(buf->line),
7258 fmt, data, data__sz);
7259}
7260
7261__bpf_kfunc_start_defs();
7262
7263/**
7264 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
7265 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
7266 * @fmt: error message format string
7267 * @data: format string parameters packaged using ___bpf_fill() macro
7268 * @data__sz: @data len, must end in '__sz' for the verifier
7269 *
7270 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
7271 * disabling.
7272 */
7273__bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
7274 unsigned long long *data, u32 data__sz)
7275{
7276 unsigned long flags;
7277
7278 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7279 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7280 scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
7281 scx_exit_bstr_buf.line);
7282 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7283}
7284
7285/**
7286 * scx_bpf_error_bstr - Indicate fatal error
7287 * @fmt: error message format string
7288 * @data: format string parameters packaged using ___bpf_fill() macro
7289 * @data__sz: @data len, must end in '__sz' for the verifier
7290 *
7291 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
7292 * disabling.
7293 */
7294__bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
7295 u32 data__sz)
7296{
7297 unsigned long flags;
7298
7299 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7300 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7301 scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
7302 scx_exit_bstr_buf.line);
7303 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7304}
7305
7306/**
7307 * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
7308 * @fmt: format string
7309 * @data: format string parameters packaged using ___bpf_fill() macro
7310 * @data__sz: @data len, must end in '__sz' for the verifier
7311 *
7312 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7313 * dump_task() to generate extra debug dump specific to the BPF scheduler.
7314 *
7315 * The extra dump may be multiple lines. A single line may be split over
7316 * multiple calls. The last line is automatically terminated.
7317 */
7318__bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7319 u32 data__sz)
7320{
7321 struct scx_dump_data *dd = &scx_dump_data;
7322 struct scx_bstr_buf *buf = &dd->buf;
7323 s32 ret;
7324
7325 if (raw_smp_processor_id() != dd->cpu) {
7326 scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7327 return;
7328 }
7329
7330 /* append the formatted string to the line buf */
7331 ret = __bstr_format(buf->data, buf->line + dd->cursor,
7332 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7333 if (ret < 0) {
7334 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7335 dd->prefix, fmt, data, data__sz, ret);
7336 return;
7337 }
7338
7339 dd->cursor += ret;
7340 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7341
7342 if (!dd->cursor)
7343 return;
7344
7345 /*
7346 * If the line buf overflowed or ends in a newline, flush it into the
7347 * dump. This is to allow the caller to generate a single line over
7348 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7349 * the line buf, the only case which can lead to an unexpected
7350 * truncation is when the caller keeps generating newlines in the middle
7351 * instead of the end consecutively. Don't do that.
7352 */
7353 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7354 ops_dump_flush();
7355}
7356
7357/**
7358 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7359 * @cpu: CPU of interest
7360 *
7361 * Return the maximum relative capacity of @cpu in relation to the most
7362 * performant CPU in the system. The return value is in the range [1,
7363 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7364 */
7365__bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7366{
7367 if (ops_cpu_valid(cpu, NULL))
7368 return arch_scale_cpu_capacity(cpu);
7369 else
7370 return SCX_CPUPERF_ONE;
7371}
7372
7373/**
7374 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7375 * @cpu: CPU of interest
7376 *
7377 * Return the current relative performance of @cpu in relation to its maximum.
7378 * The return value is in the range [1, %SCX_CPUPERF_ONE].
7379 *
7380 * The current performance level of a CPU in relation to the maximum performance
7381 * available in the system can be calculated as follows:
7382 *
7383 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7384 *
7385 * The result is in the range [1, %SCX_CPUPERF_ONE].
7386 */
7387__bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7388{
7389 if (ops_cpu_valid(cpu, NULL))
7390 return arch_scale_freq_capacity(cpu);
7391 else
7392 return SCX_CPUPERF_ONE;
7393}
7394
7395/**
7396 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7397 * @cpu: CPU of interest
7398 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7399 * @flags: %SCX_CPUPERF_* flags
7400 *
7401 * Set the target performance level of @cpu to @perf. @perf is in linear
7402 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7403 * schedutil cpufreq governor chooses the target frequency.
7404 *
7405 * The actual performance level chosen, CPU grouping, and the overhead and
7406 * latency of the operations are dependent on the hardware and cpufreq driver in
7407 * use. Consult hardware and cpufreq documentation for more information. The
7408 * current performance level can be monitored using scx_bpf_cpuperf_cur().
7409 */
7410__bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7411{
7412 if (unlikely(perf > SCX_CPUPERF_ONE)) {
7413 scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7414 return;
7415 }
7416
7417 if (ops_cpu_valid(cpu, NULL)) {
7418 struct rq *rq = cpu_rq(cpu);
7419
7420 rq->scx.cpuperf_target = perf;
7421
7422 rcu_read_lock_sched_notrace();
7423 cpufreq_update_util(cpu_rq(cpu), 0);
7424 rcu_read_unlock_sched_notrace();
7425 }
7426}
7427
7428/**
7429 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7430 *
7431 * All valid CPU IDs in the system are smaller than the returned value.
7432 */
7433__bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7434{
7435 return nr_cpu_ids;
7436}
7437
7438/**
7439 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7440 */
7441__bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7442{
7443 return cpu_possible_mask;
7444}
7445
7446/**
7447 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7448 */
7449__bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7450{
7451 return cpu_online_mask;
7452}
7453
7454/**
7455 * scx_bpf_put_cpumask - Release a possible/online cpumask
7456 * @cpumask: cpumask to release
7457 */
7458__bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7459{
7460 /*
7461 * Empty function body because we aren't actually acquiring or releasing
7462 * a reference to a global cpumask, which is read-only in the caller and
7463 * is never released. The acquire / release semantics here are just used
7464 * to make the cpumask is a trusted pointer in the caller.
7465 */
7466}
7467
7468/**
7469 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
7470 * per-CPU cpumask.
7471 *
7472 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7473 */
7474__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
7475{
7476 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7477 scx_ops_error("built-in idle tracking is disabled");
7478 return cpu_none_mask;
7479 }
7480
7481#ifdef CONFIG_SMP
7482 return idle_masks.cpu;
7483#else
7484 return cpu_none_mask;
7485#endif
7486}
7487
7488/**
7489 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
7490 * per-physical-core cpumask. Can be used to determine if an entire physical
7491 * core is free.
7492 *
7493 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7494 */
7495__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
7496{
7497 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7498 scx_ops_error("built-in idle tracking is disabled");
7499 return cpu_none_mask;
7500 }
7501
7502#ifdef CONFIG_SMP
7503 if (sched_smt_active())
7504 return idle_masks.smt;
7505 else
7506 return idle_masks.cpu;
7507#else
7508 return cpu_none_mask;
7509#endif
7510}
7511
7512/**
7513 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
7514 * either the percpu, or SMT idle-tracking cpumask.
7515 */
7516__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
7517{
7518 /*
7519 * Empty function body because we aren't actually acquiring or releasing
7520 * a reference to a global idle cpumask, which is read-only in the
7521 * caller and is never released. The acquire / release semantics here
7522 * are just used to make the cpumask a trusted pointer in the caller.
7523 */
7524}
7525
7526/**
7527 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
7528 * @cpu: cpu to test and clear idle for
7529 *
7530 * Returns %true if @cpu was idle and its idle state was successfully cleared.
7531 * %false otherwise.
7532 *
7533 * Unavailable if ops.update_idle() is implemented and
7534 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7535 */
7536__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
7537{
7538 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7539 scx_ops_error("built-in idle tracking is disabled");
7540 return false;
7541 }
7542
7543 if (ops_cpu_valid(cpu, NULL))
7544 return test_and_clear_cpu_idle(cpu);
7545 else
7546 return false;
7547}
7548
7549/**
7550 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
7551 * @cpus_allowed: Allowed cpumask
7552 * @flags: %SCX_PICK_IDLE_CPU_* flags
7553 *
7554 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
7555 * number on success. -%EBUSY if no matching cpu was found.
7556 *
7557 * Idle CPU tracking may race against CPU scheduling state transitions. For
7558 * example, this function may return -%EBUSY as CPUs are transitioning into the
7559 * idle state. If the caller then assumes that there will be dispatch events on
7560 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
7561 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7562 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7563 * event in the near future.
7564 *
7565 * Unavailable if ops.update_idle() is implemented and
7566 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7567 */
7568__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7569 u64 flags)
7570{
7571 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7572 scx_ops_error("built-in idle tracking is disabled");
7573 return -EBUSY;
7574 }
7575
7576 return scx_pick_idle_cpu(cpus_allowed, flags);
7577}
7578
7579/**
7580 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7581 * @cpus_allowed: Allowed cpumask
7582 * @flags: %SCX_PICK_IDLE_CPU_* flags
7583 *
7584 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7585 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7586 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7587 * empty.
7588 *
7589 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7590 * set, this function can't tell which CPUs are idle and will always pick any
7591 * CPU.
7592 */
7593__bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7594 u64 flags)
7595{
7596 s32 cpu;
7597
7598 if (static_branch_likely(&scx_builtin_idle_enabled)) {
7599 cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7600 if (cpu >= 0)
7601 return cpu;
7602 }
7603
7604 cpu = cpumask_any_distribute(cpus_allowed);
7605 if (cpu < nr_cpu_ids)
7606 return cpu;
7607 else
7608 return -EBUSY;
7609}
7610
7611/**
7612 * scx_bpf_task_running - Is task currently running?
7613 * @p: task of interest
7614 */
7615__bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7616{
7617 return task_rq(p)->curr == p;
7618}
7619
7620/**
7621 * scx_bpf_task_cpu - CPU a task is currently associated with
7622 * @p: task of interest
7623 */
7624__bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7625{
7626 return task_cpu(p);
7627}
7628
7629/**
7630 * scx_bpf_cpu_rq - Fetch the rq of a CPU
7631 * @cpu: CPU of the rq
7632 */
7633__bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7634{
7635 if (!ops_cpu_valid(cpu, NULL))
7636 return NULL;
7637
7638 return cpu_rq(cpu);
7639}
7640
7641/**
7642 * scx_bpf_task_cgroup - Return the sched cgroup of a task
7643 * @p: task of interest
7644 *
7645 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7646 * from the scheduler's POV. SCX operations should use this function to
7647 * determine @p's current cgroup as, unlike following @p->cgroups,
7648 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7649 * rq-locked operations. Can be called on the parameter tasks of rq-locked
7650 * operations. The restriction guarantees that @p's rq is locked by the caller.
7651 */
7652#ifdef CONFIG_CGROUP_SCHED
7653__bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7654{
7655 struct task_group *tg = p->sched_task_group;
7656 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7657
7658 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7659 goto out;
7660
7661 cgrp = tg_cgrp(tg);
7662
7663out:
7664 cgroup_get(cgrp);
7665 return cgrp;
7666}
7667#endif
7668
7669__bpf_kfunc_end_defs();
7670
7671BTF_KFUNCS_START(scx_kfunc_ids_any)
7672BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7673BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7674BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7675BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7676BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7677BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7678BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7679BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7680BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7681BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7682BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7683BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7684BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7685BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7686BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7687BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7688BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7689BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7690BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7691BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7692BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7693BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7694BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7695BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7696BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7697#ifdef CONFIG_CGROUP_SCHED
7698BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7699#endif
7700BTF_KFUNCS_END(scx_kfunc_ids_any)
7701
7702static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7703 .owner = THIS_MODULE,
7704 .set = &scx_kfunc_ids_any,
7705};
7706
7707static int __init scx_init(void)
7708{
7709 int ret;
7710
7711 /*
7712 * kfunc registration can't be done from init_sched_ext_class() as
7713 * register_btf_kfunc_id_set() needs most of the system to be up.
7714 *
7715 * Some kfuncs are context-sensitive and can only be called from
7716 * specific SCX ops. They are grouped into BTF sets accordingly.
7717 * Unfortunately, BPF currently doesn't have a way of enforcing such
7718 * restrictions. Eventually, the verifier should be able to enforce
7719 * them. For now, register them the same and make each kfunc explicitly
7720 * check using scx_kf_allowed().
7721 */
7722 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7723 &scx_kfunc_set_select_cpu)) ||
7724 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7725 &scx_kfunc_set_enqueue_dispatch)) ||
7726 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7727 &scx_kfunc_set_dispatch)) ||
7728 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7729 &scx_kfunc_set_cpu_release)) ||
7730 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7731 &scx_kfunc_set_unlocked)) ||
7732 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7733 &scx_kfunc_set_unlocked)) ||
7734 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7735 &scx_kfunc_set_any)) ||
7736 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7737 &scx_kfunc_set_any)) ||
7738 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7739 &scx_kfunc_set_any))) {
7740 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7741 return ret;
7742 }
7743
7744 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7745 if (ret) {
7746 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7747 return ret;
7748 }
7749
7750 ret = register_pm_notifier(&scx_pm_notifier);
7751 if (ret) {
7752 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7753 return ret;
7754 }
7755
7756 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7757 if (!scx_kset) {
7758 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7759 return -ENOMEM;
7760 }
7761
7762 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7763 if (ret < 0) {
7764 pr_err("sched_ext: Failed to add global attributes\n");
7765 return ret;
7766 }
7767
7768 return 0;
7769}
7770__initcall(scx_init);