Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * A demo sched_ext flattened cgroup hierarchy scheduler. It implements
  4 * hierarchical weight-based cgroup CPU control by flattening the cgroup
  5 * hierarchy into a single layer by compounding the active weight share at each
  6 * level. Consider the following hierarchy with weights in parentheses:
  7 *
  8 * R + A (100) + B (100)
  9 *   |         \ C (100)
 10 *   \ D (200)
 11 *
 12 * Ignoring the root and threaded cgroups, only B, C and D can contain tasks.
 13 * Let's say all three have runnable tasks. The total share that each of these
 14 * three cgroups is entitled to can be calculated by compounding its share at
 15 * each level.
 16 *
 17 * For example, B is competing against C and in that competition its share is
 18 * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's
 19 * share in that competition is 100/(200+100) == 1/3. B's eventual share in the
 20 * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's
 21 * eventual shaer is the same at 1/6. D is only competing at the top level and
 22 * its share is 200/(100+200) == 2/3.
 23 *
 24 * So, instead of hierarchically scheduling level-by-level, we can consider it
 25 * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3
 26 * and keep updating the eventual shares as the cgroups' runnable states change.
 27 *
 28 * This flattening of hierarchy can bring a substantial performance gain when
 29 * the cgroup hierarchy is nested multiple levels. in a simple benchmark using
 30 * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it
 31 * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two
 32 * apache instances competing with 2:1 weight ratio nested four level deep.
 33 *
 34 * However, the gain comes at the cost of not being able to properly handle
 35 * thundering herd of cgroups. For example, if many cgroups which are nested
 36 * behind a low priority parent cgroup wake up around the same time, they may be
 37 * able to consume more CPU cycles than they are entitled to. In many use cases,
 38 * this isn't a real concern especially given the performance gain. Also, there
 39 * are ways to mitigate the problem further by e.g. introducing an extra
 40 * scheduling layer on cgroup delegation boundaries.
 41 *
 42 * The scheduler first picks the cgroup to run and then schedule the tasks
 43 * within by using nested weighted vtime scheduling by default. The
 44 * cgroup-internal scheduling can be switched to FIFO with the -f option.
 45 */
 46#include <scx/common.bpf.h>
 47#include "scx_flatcg.h"
 48
 49/*
 50 * Maximum amount of retries to find a valid cgroup.
 51 */
 52enum {
 53	FALLBACK_DSQ		= 0,
 54	CGROUP_MAX_RETRIES	= 1024,
 55};
 56
 57char _license[] SEC("license") = "GPL";
 58
 59const volatile u32 nr_cpus = 32;	/* !0 for veristat, set during init */
 60const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL;
 61const volatile bool fifo_sched;
 62
 63u64 cvtime_now;
 64UEI_DEFINE(uei);
 65
 66struct {
 67	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 68	__type(key, u32);
 69	__type(value, u64);
 70	__uint(max_entries, FCG_NR_STATS);
 71} stats SEC(".maps");
 72
 73static void stat_inc(enum fcg_stat_idx idx)
 74{
 75	u32 idx_v = idx;
 76
 77	u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v);
 78	if (cnt_p)
 79		(*cnt_p)++;
 80}
 81
 82struct fcg_cpu_ctx {
 83	u64			cur_cgid;
 84	u64			cur_at;
 85};
 86
 87struct {
 88	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 89	__type(key, u32);
 90	__type(value, struct fcg_cpu_ctx);
 91	__uint(max_entries, 1);
 92} cpu_ctx SEC(".maps");
 93
 94struct {
 95	__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
 96	__uint(map_flags, BPF_F_NO_PREALLOC);
 97	__type(key, int);
 98	__type(value, struct fcg_cgrp_ctx);
 99} cgrp_ctx SEC(".maps");
100
101struct cgv_node {
102	struct bpf_rb_node	rb_node;
103	__u64			cvtime;
104	__u64			cgid;
105};
106
107private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock;
108private(CGV_TREE) struct bpf_rb_root cgv_tree __contains(cgv_node, rb_node);
109
110struct cgv_node_stash {
111	struct cgv_node __kptr *node;
112};
113
114struct {
115	__uint(type, BPF_MAP_TYPE_HASH);
116	__uint(max_entries, 16384);
117	__type(key, __u64);
118	__type(value, struct cgv_node_stash);
119} cgv_node_stash SEC(".maps");
120
121struct fcg_task_ctx {
122	u64		bypassed_at;
123};
124
125struct {
126	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
127	__uint(map_flags, BPF_F_NO_PREALLOC);
128	__type(key, int);
129	__type(value, struct fcg_task_ctx);
130} task_ctx SEC(".maps");
131
132/* gets inc'd on weight tree changes to expire the cached hweights */
133u64 hweight_gen = 1;
134
135static u64 div_round_up(u64 dividend, u64 divisor)
136{
137	return (dividend + divisor - 1) / divisor;
138}
139
140static bool vtime_before(u64 a, u64 b)
141{
142	return (s64)(a - b) < 0;
143}
144
145static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
146{
147	struct cgv_node *cgc_a, *cgc_b;
148
149	cgc_a = container_of(a, struct cgv_node, rb_node);
150	cgc_b = container_of(b, struct cgv_node, rb_node);
151
152	return cgc_a->cvtime < cgc_b->cvtime;
153}
154
155static struct fcg_cpu_ctx *find_cpu_ctx(void)
156{
157	struct fcg_cpu_ctx *cpuc;
158	u32 idx = 0;
159
160	cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx);
161	if (!cpuc) {
162		scx_bpf_error("cpu_ctx lookup failed");
163		return NULL;
164	}
165	return cpuc;
166}
167
168static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp)
169{
170	struct fcg_cgrp_ctx *cgc;
171
172	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
173	if (!cgc) {
174		scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id);
175		return NULL;
176	}
177	return cgc;
178}
179
180static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level)
181{
182	struct fcg_cgrp_ctx *cgc;
183
184	cgrp = bpf_cgroup_ancestor(cgrp, level);
185	if (!cgrp) {
186		scx_bpf_error("ancestor cgroup lookup failed");
187		return NULL;
188	}
189
190	cgc = find_cgrp_ctx(cgrp);
191	if (!cgc)
192		scx_bpf_error("ancestor cgrp_ctx lookup failed");
193	bpf_cgroup_release(cgrp);
194	return cgc;
195}
196
197static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
198{
199	int level;
200
201	if (!cgc->nr_active) {
202		stat_inc(FCG_STAT_HWT_SKIP);
203		return;
204	}
205
206	if (cgc->hweight_gen == hweight_gen) {
207		stat_inc(FCG_STAT_HWT_CACHE);
208		return;
209	}
210
211	stat_inc(FCG_STAT_HWT_UPDATES);
212	bpf_for(level, 0, cgrp->level + 1) {
213		struct fcg_cgrp_ctx *cgc;
214		bool is_active;
215
216		cgc = find_ancestor_cgrp_ctx(cgrp, level);
217		if (!cgc)
218			break;
219
220		if (!level) {
221			cgc->hweight = FCG_HWEIGHT_ONE;
222			cgc->hweight_gen = hweight_gen;
223		} else {
224			struct fcg_cgrp_ctx *pcgc;
225
226			pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
227			if (!pcgc)
228				break;
229
230			/*
231			 * We can be opportunistic here and not grab the
232			 * cgv_tree_lock and deal with the occasional races.
233			 * However, hweight updates are already cached and
234			 * relatively low-frequency. Let's just do the
235			 * straightforward thing.
236			 */
237			bpf_spin_lock(&cgv_tree_lock);
238			is_active = cgc->nr_active;
239			if (is_active) {
240				cgc->hweight_gen = pcgc->hweight_gen;
241				cgc->hweight =
242					div_round_up(pcgc->hweight * cgc->weight,
243						     pcgc->child_weight_sum);
244			}
245			bpf_spin_unlock(&cgv_tree_lock);
246
247			if (!is_active) {
248				stat_inc(FCG_STAT_HWT_RACE);
249				break;
250			}
251		}
252	}
253}
254
255static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
256{
257	u64 delta, cvtime, max_budget;
258
259	/*
260	 * A node which is on the rbtree can't be pointed to from elsewhere yet
261	 * and thus can't be updated and repositioned. Instead, we collect the
262	 * vtime deltas separately and apply it asynchronously here.
263	 */
264	delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
265	cvtime = cgv_node->cvtime + delta;
266
267	/*
268	 * Allow a cgroup to carry the maximum budget proportional to its
269	 * hweight such that a full-hweight cgroup can immediately take up half
270	 * of the CPUs at the most while staying at the front of the rbtree.
271	 */
272	max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
273		(2 * FCG_HWEIGHT_ONE);
274	if (vtime_before(cvtime, cvtime_now - max_budget))
275		cvtime = cvtime_now - max_budget;
276
277	cgv_node->cvtime = cvtime;
278}
279
280static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
281{
282	struct cgv_node_stash *stash;
283	struct cgv_node *cgv_node;
284	u64 cgid = cgrp->kn->id;
285
286	/* paired with cmpxchg in try_pick_next_cgroup() */
287	if (__sync_val_compare_and_swap(&cgc->queued, 0, 1)) {
288		stat_inc(FCG_STAT_ENQ_SKIP);
289		return;
290	}
291
292	stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
293	if (!stash) {
294		scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid);
295		return;
296	}
297
298	/* NULL if the node is already on the rbtree */
299	cgv_node = bpf_kptr_xchg(&stash->node, NULL);
300	if (!cgv_node) {
301		stat_inc(FCG_STAT_ENQ_RACE);
302		return;
303	}
304
305	bpf_spin_lock(&cgv_tree_lock);
306	cgrp_cap_budget(cgv_node, cgc);
307	bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
308	bpf_spin_unlock(&cgv_tree_lock);
309}
310
311static void set_bypassed_at(struct task_struct *p, struct fcg_task_ctx *taskc)
312{
313	/*
314	 * Tell fcg_stopping() that this bypassed the regular scheduling path
315	 * and should be force charged to the cgroup. 0 is used to indicate that
316	 * the task isn't bypassing, so if the current runtime is 0, go back by
317	 * one nanosecond.
318	 */
319	taskc->bypassed_at = p->se.sum_exec_runtime ?: (u64)-1;
320}
321
322s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
323{
324	struct fcg_task_ctx *taskc;
325	bool is_idle = false;
326	s32 cpu;
327
328	cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
329
330	taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
331	if (!taskc) {
332		scx_bpf_error("task_ctx lookup failed");
333		return cpu;
334	}
335
336	/*
337	 * If select_cpu_dfl() is recommending local enqueue, the target CPU is
338	 * idle. Follow it and charge the cgroup later in fcg_stopping() after
339	 * the fact.
340	 */
341	if (is_idle) {
342		set_bypassed_at(p, taskc);
343		stat_inc(FCG_STAT_LOCAL);
344		scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
345	}
346
347	return cpu;
348}
349
350void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
351{
352	struct fcg_task_ctx *taskc;
353	struct cgroup *cgrp;
354	struct fcg_cgrp_ctx *cgc;
355
356	taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
357	if (!taskc) {
358		scx_bpf_error("task_ctx lookup failed");
359		return;
360	}
361
362	/*
363	 * Use the direct dispatching and force charging to deal with tasks with
364	 * custom affinities so that we don't have to worry about per-cgroup
365	 * dq's containing tasks that can't be executed from some CPUs.
366	 */
367	if (p->nr_cpus_allowed != nr_cpus) {
368		set_bypassed_at(p, taskc);
369
370		/*
371		 * The global dq is deprioritized as we don't want to let tasks
372		 * to boost themselves by constraining its cpumask. The
373		 * deprioritization is rather severe, so let's not apply that to
374		 * per-cpu kernel threads. This is ham-fisted. We probably wanna
375		 * implement per-cgroup fallback dq's instead so that we have
376		 * more control over when tasks with custom cpumask get issued.
377		 */
378		if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) {
379			stat_inc(FCG_STAT_LOCAL);
380			scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
381					   enq_flags);
382		} else {
383			stat_inc(FCG_STAT_GLOBAL);
384			scx_bpf_dsq_insert(p, FALLBACK_DSQ, SCX_SLICE_DFL,
385					   enq_flags);
386		}
387		return;
388	}
389
390	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
391	cgc = find_cgrp_ctx(cgrp);
392	if (!cgc)
393		goto out_release;
394
395	if (fifo_sched) {
396		scx_bpf_dsq_insert(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags);
397	} else {
398		u64 tvtime = p->scx.dsq_vtime;
399
400		/*
401		 * Limit the amount of budget that an idling task can accumulate
402		 * to one slice.
403		 */
404		if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
405			tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
406
407		scx_bpf_dsq_insert_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
408					 tvtime, enq_flags);
409	}
410
411	cgrp_enqueued(cgrp, cgc);
412out_release:
413	bpf_cgroup_release(cgrp);
414}
415
416/*
417 * Walk the cgroup tree to update the active weight sums as tasks wake up and
418 * sleep. The weight sums are used as the base when calculating the proportion a
419 * given cgroup or task is entitled to at each level.
420 */
421static void update_active_weight_sums(struct cgroup *cgrp, bool runnable)
422{
423	struct fcg_cgrp_ctx *cgc;
424	bool updated = false;
425	int idx;
426
427	cgc = find_cgrp_ctx(cgrp);
428	if (!cgc)
429		return;
430
431	/*
432	 * In most cases, a hot cgroup would have multiple threads going to
433	 * sleep and waking up while the whole cgroup stays active. In leaf
434	 * cgroups, ->nr_runnable which is updated with __sync operations gates
435	 * ->nr_active updates, so that we don't have to grab the cgv_tree_lock
436	 * repeatedly for a busy cgroup which is staying active.
437	 */
438	if (runnable) {
439		if (__sync_fetch_and_add(&cgc->nr_runnable, 1))
440			return;
441		stat_inc(FCG_STAT_ACT);
442	} else {
443		if (__sync_sub_and_fetch(&cgc->nr_runnable, 1))
444			return;
445		stat_inc(FCG_STAT_DEACT);
446	}
447
448	/*
449	 * If @cgrp is becoming runnable, its hweight should be refreshed after
450	 * it's added to the weight tree so that enqueue has the up-to-date
451	 * value. If @cgrp is becoming quiescent, the hweight should be
452	 * refreshed before it's removed from the weight tree so that the usage
453	 * charging which happens afterwards has access to the latest value.
454	 */
455	if (!runnable)
456		cgrp_refresh_hweight(cgrp, cgc);
457
458	/* propagate upwards */
459	bpf_for(idx, 0, cgrp->level) {
460		int level = cgrp->level - idx;
461		struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
462		bool propagate = false;
463
464		cgc = find_ancestor_cgrp_ctx(cgrp, level);
465		if (!cgc)
466			break;
467		if (level) {
468			pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
469			if (!pcgc)
470				break;
471		}
472
473		/*
474		 * We need the propagation protected by a lock to synchronize
475		 * against weight changes. There's no reason to drop the lock at
476		 * each level but bpf_spin_lock() doesn't want any function
477		 * calls while locked.
478		 */
479		bpf_spin_lock(&cgv_tree_lock);
480
481		if (runnable) {
482			if (!cgc->nr_active++) {
483				updated = true;
484				if (pcgc) {
485					propagate = true;
486					pcgc->child_weight_sum += cgc->weight;
487				}
488			}
489		} else {
490			if (!--cgc->nr_active) {
491				updated = true;
492				if (pcgc) {
493					propagate = true;
494					pcgc->child_weight_sum -= cgc->weight;
495				}
496			}
497		}
498
499		bpf_spin_unlock(&cgv_tree_lock);
500
501		if (!propagate)
502			break;
503	}
504
505	if (updated)
506		__sync_fetch_and_add(&hweight_gen, 1);
507
508	if (runnable)
509		cgrp_refresh_hweight(cgrp, cgc);
510}
511
512void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
513{
514	struct cgroup *cgrp;
515
516	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
517	update_active_weight_sums(cgrp, true);
518	bpf_cgroup_release(cgrp);
519}
520
521void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
522{
523	struct cgroup *cgrp;
524	struct fcg_cgrp_ctx *cgc;
525
526	if (fifo_sched)
527		return;
528
529	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
530	cgc = find_cgrp_ctx(cgrp);
531	if (cgc) {
532		/*
533		 * @cgc->tvtime_now always progresses forward as tasks start
534		 * executing. The test and update can be performed concurrently
535		 * from multiple CPUs and thus racy. Any error should be
536		 * contained and temporary. Let's just live with it.
537		 */
538		if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime))
539			cgc->tvtime_now = p->scx.dsq_vtime;
540	}
541	bpf_cgroup_release(cgrp);
542}
543
544void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
545{
546	struct fcg_task_ctx *taskc;
547	struct cgroup *cgrp;
548	struct fcg_cgrp_ctx *cgc;
549
550	/*
551	 * Scale the execution time by the inverse of the weight and charge.
552	 *
553	 * Note that the default yield implementation yields by setting
554	 * @p->scx.slice to zero and the following would treat the yielding task
555	 * as if it has consumed all its slice. If this penalizes yielding tasks
556	 * too much, determine the execution time by taking explicit timestamps
557	 * instead of depending on @p->scx.slice.
558	 */
559	if (!fifo_sched)
560		p->scx.dsq_vtime +=
561			(SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
562
563	taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
564	if (!taskc) {
565		scx_bpf_error("task_ctx lookup failed");
566		return;
567	}
568
569	if (!taskc->bypassed_at)
570		return;
571
572	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
573	cgc = find_cgrp_ctx(cgrp);
574	if (cgc) {
575		__sync_fetch_and_add(&cgc->cvtime_delta,
576				     p->se.sum_exec_runtime - taskc->bypassed_at);
577		taskc->bypassed_at = 0;
578	}
579	bpf_cgroup_release(cgrp);
580}
581
582void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
583{
584	struct cgroup *cgrp;
585
586	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
587	update_active_weight_sums(cgrp, false);
588	bpf_cgroup_release(cgrp);
589}
590
591void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
592{
593	struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
594
595	cgc = find_cgrp_ctx(cgrp);
596	if (!cgc)
597		return;
598
599	if (cgrp->level) {
600		pcgc = find_ancestor_cgrp_ctx(cgrp, cgrp->level - 1);
601		if (!pcgc)
602			return;
603	}
604
605	bpf_spin_lock(&cgv_tree_lock);
606	if (pcgc && cgc->nr_active)
607		pcgc->child_weight_sum += (s64)weight - cgc->weight;
608	cgc->weight = weight;
609	bpf_spin_unlock(&cgv_tree_lock);
610}
611
612static bool try_pick_next_cgroup(u64 *cgidp)
613{
614	struct bpf_rb_node *rb_node;
615	struct cgv_node_stash *stash;
616	struct cgv_node *cgv_node;
617	struct fcg_cgrp_ctx *cgc;
618	struct cgroup *cgrp;
619	u64 cgid;
620
621	/* pop the front cgroup and wind cvtime_now accordingly */
622	bpf_spin_lock(&cgv_tree_lock);
623
624	rb_node = bpf_rbtree_first(&cgv_tree);
625	if (!rb_node) {
626		bpf_spin_unlock(&cgv_tree_lock);
627		stat_inc(FCG_STAT_PNC_NO_CGRP);
628		*cgidp = 0;
629		return true;
630	}
631
632	rb_node = bpf_rbtree_remove(&cgv_tree, rb_node);
633	bpf_spin_unlock(&cgv_tree_lock);
634
635	if (!rb_node) {
636		/*
637		 * This should never happen. bpf_rbtree_first() was called
638		 * above while the tree lock was held, so the node should
639		 * always be present.
640		 */
641		scx_bpf_error("node could not be removed");
642		return true;
643	}
644
645	cgv_node = container_of(rb_node, struct cgv_node, rb_node);
646	cgid = cgv_node->cgid;
647
648	if (vtime_before(cvtime_now, cgv_node->cvtime))
649		cvtime_now = cgv_node->cvtime;
650
651	/*
652	 * If lookup fails, the cgroup's gone. Free and move on. See
653	 * fcg_cgroup_exit().
654	 */
655	cgrp = bpf_cgroup_from_id(cgid);
656	if (!cgrp) {
657		stat_inc(FCG_STAT_PNC_GONE);
658		goto out_free;
659	}
660
661	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
662	if (!cgc) {
663		bpf_cgroup_release(cgrp);
664		stat_inc(FCG_STAT_PNC_GONE);
665		goto out_free;
666	}
667
668	if (!scx_bpf_dsq_move_to_local(cgid)) {
669		bpf_cgroup_release(cgrp);
670		stat_inc(FCG_STAT_PNC_EMPTY);
671		goto out_stash;
672	}
673
674	/*
675	 * Successfully consumed from the cgroup. This will be our current
676	 * cgroup for the new slice. Refresh its hweight.
677	 */
678	cgrp_refresh_hweight(cgrp, cgc);
679
680	bpf_cgroup_release(cgrp);
681
682	/*
683	 * As the cgroup may have more tasks, add it back to the rbtree. Note
684	 * that here we charge the full slice upfront and then exact later
685	 * according to the actual consumption. This prevents lowpri thundering
686	 * herd from saturating the machine.
687	 */
688	bpf_spin_lock(&cgv_tree_lock);
689	cgv_node->cvtime += cgrp_slice_ns * FCG_HWEIGHT_ONE / (cgc->hweight ?: 1);
690	cgrp_cap_budget(cgv_node, cgc);
691	bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
692	bpf_spin_unlock(&cgv_tree_lock);
693
694	*cgidp = cgid;
695	stat_inc(FCG_STAT_PNC_NEXT);
696	return true;
697
698out_stash:
699	stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
700	if (!stash) {
701		stat_inc(FCG_STAT_PNC_GONE);
702		goto out_free;
703	}
704
705	/*
706	 * Paired with cmpxchg in cgrp_enqueued(). If they see the following
707	 * transition, they'll enqueue the cgroup. If they are earlier, we'll
708	 * see their task in the dq below and requeue the cgroup.
709	 */
710	__sync_val_compare_and_swap(&cgc->queued, 1, 0);
711
712	if (scx_bpf_dsq_nr_queued(cgid)) {
713		bpf_spin_lock(&cgv_tree_lock);
714		bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
715		bpf_spin_unlock(&cgv_tree_lock);
716		stat_inc(FCG_STAT_PNC_RACE);
717	} else {
718		cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
719		if (cgv_node) {
720			scx_bpf_error("unexpected !NULL cgv_node stash");
721			goto out_free;
722		}
723	}
724
725	return false;
726
727out_free:
728	bpf_obj_drop(cgv_node);
729	return false;
730}
731
732void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
733{
734	struct fcg_cpu_ctx *cpuc;
735	struct fcg_cgrp_ctx *cgc;
736	struct cgroup *cgrp;
737	u64 now = bpf_ktime_get_ns();
738	bool picked_next = false;
739
740	cpuc = find_cpu_ctx();
741	if (!cpuc)
742		return;
743
744	if (!cpuc->cur_cgid)
745		goto pick_next_cgroup;
746
747	if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
748		if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) {
749			stat_inc(FCG_STAT_CNS_KEEP);
750			return;
751		}
752		stat_inc(FCG_STAT_CNS_EMPTY);
753	} else {
754		stat_inc(FCG_STAT_CNS_EXPIRE);
755	}
756
757	/*
758	 * The current cgroup is expiring. It was already charged a full slice.
759	 * Calculate the actual usage and accumulate the delta.
760	 */
761	cgrp = bpf_cgroup_from_id(cpuc->cur_cgid);
762	if (!cgrp) {
763		stat_inc(FCG_STAT_CNS_GONE);
764		goto pick_next_cgroup;
765	}
766
767	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
768	if (cgc) {
769		/*
770		 * We want to update the vtime delta and then look for the next
771		 * cgroup to execute but the latter needs to be done in a loop
772		 * and we can't keep the lock held. Oh well...
773		 */
774		bpf_spin_lock(&cgv_tree_lock);
775		__sync_fetch_and_add(&cgc->cvtime_delta,
776				     (cpuc->cur_at + cgrp_slice_ns - now) *
777				     FCG_HWEIGHT_ONE / (cgc->hweight ?: 1));
778		bpf_spin_unlock(&cgv_tree_lock);
779	} else {
780		stat_inc(FCG_STAT_CNS_GONE);
781	}
782
783	bpf_cgroup_release(cgrp);
784
785pick_next_cgroup:
786	cpuc->cur_at = now;
787
788	if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ)) {
789		cpuc->cur_cgid = 0;
790		return;
791	}
792
793	bpf_repeat(CGROUP_MAX_RETRIES) {
794		if (try_pick_next_cgroup(&cpuc->cur_cgid)) {
795			picked_next = true;
796			break;
797		}
798	}
799
800	/*
801	 * This only happens if try_pick_next_cgroup() races against enqueue
802	 * path for more than CGROUP_MAX_RETRIES times, which is extremely
803	 * unlikely and likely indicates an underlying bug. There shouldn't be
804	 * any stall risk as the race is against enqueue.
805	 */
806	if (!picked_next)
807		stat_inc(FCG_STAT_PNC_FAIL);
808}
809
810s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
811		   struct scx_init_task_args *args)
812{
813	struct fcg_task_ctx *taskc;
814	struct fcg_cgrp_ctx *cgc;
815
816	/*
817	 * @p is new. Let's ensure that its task_ctx is available. We can sleep
818	 * in this function and the following will automatically use GFP_KERNEL.
819	 */
820	taskc = bpf_task_storage_get(&task_ctx, p, 0,
821				     BPF_LOCAL_STORAGE_GET_F_CREATE);
822	if (!taskc)
823		return -ENOMEM;
824
825	taskc->bypassed_at = 0;
826
827	if (!(cgc = find_cgrp_ctx(args->cgroup)))
828		return -ENOENT;
829
830	p->scx.dsq_vtime = cgc->tvtime_now;
831
832	return 0;
833}
834
835int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp,
836			     struct scx_cgroup_init_args *args)
837{
838	struct fcg_cgrp_ctx *cgc;
839	struct cgv_node *cgv_node;
840	struct cgv_node_stash empty_stash = {}, *stash;
841	u64 cgid = cgrp->kn->id;
842	int ret;
843
844	/*
845	 * Technically incorrect as cgroup ID is full 64bit while dsq ID is
846	 * 63bit. Should not be a problem in practice and easy to spot in the
847	 * unlikely case that it breaks.
848	 */
849	ret = scx_bpf_create_dsq(cgid, -1);
850	if (ret)
851		return ret;
852
853	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0,
854				   BPF_LOCAL_STORAGE_GET_F_CREATE);
855	if (!cgc) {
856		ret = -ENOMEM;
857		goto err_destroy_dsq;
858	}
859
860	cgc->weight = args->weight;
861	cgc->hweight = FCG_HWEIGHT_ONE;
862
863	ret = bpf_map_update_elem(&cgv_node_stash, &cgid, &empty_stash,
864				  BPF_NOEXIST);
865	if (ret) {
866		if (ret != -ENOMEM)
867			scx_bpf_error("unexpected stash creation error (%d)",
868				      ret);
869		goto err_destroy_dsq;
870	}
871
872	stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
873	if (!stash) {
874		scx_bpf_error("unexpected cgv_node stash lookup failure");
875		ret = -ENOENT;
876		goto err_destroy_dsq;
877	}
878
879	cgv_node = bpf_obj_new(struct cgv_node);
880	if (!cgv_node) {
881		ret = -ENOMEM;
882		goto err_del_cgv_node;
883	}
884
885	cgv_node->cgid = cgid;
886	cgv_node->cvtime = cvtime_now;
887
888	cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
889	if (cgv_node) {
890		scx_bpf_error("unexpected !NULL cgv_node stash");
891		ret = -EBUSY;
892		goto err_drop;
893	}
894
895	return 0;
896
897err_drop:
898	bpf_obj_drop(cgv_node);
899err_del_cgv_node:
900	bpf_map_delete_elem(&cgv_node_stash, &cgid);
901err_destroy_dsq:
902	scx_bpf_destroy_dsq(cgid);
903	return ret;
904}
905
906void BPF_STRUCT_OPS(fcg_cgroup_exit, struct cgroup *cgrp)
907{
908	u64 cgid = cgrp->kn->id;
909
910	/*
911	 * For now, there's no way find and remove the cgv_node if it's on the
912	 * cgv_tree. Let's drain them in the dispatch path as they get popped
913	 * off the front of the tree.
914	 */
915	bpf_map_delete_elem(&cgv_node_stash, &cgid);
916	scx_bpf_destroy_dsq(cgid);
917}
918
919void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
920		    struct cgroup *from, struct cgroup *to)
921{
922	struct fcg_cgrp_ctx *from_cgc, *to_cgc;
923	s64 vtime_delta;
924
925	/* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
926	if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
927		return;
928
929	vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now;
930	p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta;
931}
932
933s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
934{
935	return scx_bpf_create_dsq(FALLBACK_DSQ, -1);
936}
937
938void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei)
939{
940	UEI_RECORD(uei, ei);
941}
942
943SCX_OPS_DEFINE(flatcg_ops,
944	       .select_cpu		= (void *)fcg_select_cpu,
945	       .enqueue			= (void *)fcg_enqueue,
946	       .dispatch		= (void *)fcg_dispatch,
947	       .runnable		= (void *)fcg_runnable,
948	       .running			= (void *)fcg_running,
949	       .stopping		= (void *)fcg_stopping,
950	       .quiescent		= (void *)fcg_quiescent,
951	       .init_task		= (void *)fcg_init_task,
952	       .cgroup_set_weight	= (void *)fcg_cgroup_set_weight,
953	       .cgroup_init		= (void *)fcg_cgroup_init,
954	       .cgroup_exit		= (void *)fcg_cgroup_exit,
955	       .cgroup_move		= (void *)fcg_cgroup_move,
956	       .init			= (void *)fcg_init,
957	       .exit			= (void *)fcg_exit,
958	       .flags			= SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,
959	       .name			= "flatcg");