Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include "cgroup-internal.h"
  3
  4#include <linux/sched/cputime.h>
  5
  6#include <linux/bpf.h>
  7#include <linux/btf.h>
  8#include <linux/btf_ids.h>
  9
 
 
 10static DEFINE_SPINLOCK(cgroup_rstat_lock);
 11static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
 12
 13static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
 14
 15static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
 16{
 17	return per_cpu_ptr(cgrp->rstat_cpu, cpu);
 18}
 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20/**
 21 * cgroup_rstat_updated - keep track of updated rstat_cpu
 22 * @cgrp: target cgroup
 23 * @cpu: cpu on which rstat_cpu was updated
 24 *
 25 * @cgrp's rstat_cpu on @cpu was updated.  Put it on the parent's matching
 26 * rstat_cpu->updated_children list.  See the comment on top of
 27 * cgroup_rstat_cpu definition for details.
 28 */
 29void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
 30{
 31	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
 32	unsigned long flags;
 33
 34	/*
 35	 * Speculative already-on-list test. This may race leading to
 36	 * temporary inaccuracies, which is fine.
 37	 *
 38	 * Because @parent's updated_children is terminated with @parent
 39	 * instead of NULL, we can tell whether @cgrp is on the list by
 40	 * testing the next pointer for NULL.
 41	 */
 42	if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next))
 43		return;
 44
 45	raw_spin_lock_irqsave(cpu_lock, flags);
 46
 47	/* put @cgrp and all ancestors on the corresponding updated lists */
 48	while (true) {
 49		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
 50		struct cgroup *parent = cgroup_parent(cgrp);
 51		struct cgroup_rstat_cpu *prstatc;
 52
 53		/*
 54		 * Both additions and removals are bottom-up.  If a cgroup
 55		 * is already in the tree, all ancestors are.
 56		 */
 57		if (rstatc->updated_next)
 58			break;
 59
 60		/* Root has no parent to link it to, but mark it busy */
 61		if (!parent) {
 62			rstatc->updated_next = cgrp;
 63			break;
 64		}
 65
 66		prstatc = cgroup_rstat_cpu(parent, cpu);
 67		rstatc->updated_next = prstatc->updated_children;
 68		prstatc->updated_children = cgrp;
 69
 70		cgrp = parent;
 71	}
 72
 73	raw_spin_unlock_irqrestore(cpu_lock, flags);
 74}
 75
 76/**
 77 * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
 78 * @pos: current position
 79 * @root: root of the tree to traversal
 80 * @cpu: target cpu
 
 81 *
 82 * Walks the updated rstat_cpu tree on @cpu from @root.  %NULL @pos starts
 83 * the traversal and %NULL return indicates the end.  During traversal,
 84 * each returned cgroup is unlinked from the tree.  Must be called with the
 85 * matching cgroup_rstat_cpu_lock held.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86 *
 87 * The only ordering guarantee is that, for a parent and a child pair
 88 * covered by a given traversal, if a child is visited, its parent is
 89 * guaranteed to be visited afterwards.
 
 
 
 
 
 90 */
 91static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
 92						   struct cgroup *root, int cpu)
 93{
 94	struct cgroup_rstat_cpu *rstatc;
 95	struct cgroup *parent;
 96
 97	if (pos == root)
 98		return NULL;
 99
100	/*
101	 * We're gonna walk down to the first leaf and visit/remove it.  We
102	 * can pick whatever unvisited node as the starting point.
103	 */
104	if (!pos) {
105		pos = root;
106		/* return NULL if this subtree is not on-list */
107		if (!cgroup_rstat_cpu(pos, cpu)->updated_next)
108			return NULL;
109	} else {
110		pos = cgroup_parent(pos);
111	}
112
113	/* walk down to the first leaf */
114	while (true) {
115		rstatc = cgroup_rstat_cpu(pos, cpu);
116		if (rstatc->updated_children == pos)
117			break;
118		pos = rstatc->updated_children;
119	}
120
121	/*
122	 * Unlink @pos from the tree.  As the updated_children list is
123	 * singly linked, we have to walk it to find the removal point.
124	 * However, due to the way we traverse, @pos will be the first
125	 * child in most cases. The only exception is @root.
126	 */
127	parent = cgroup_parent(pos);
128	if (parent) {
129		struct cgroup_rstat_cpu *prstatc;
130		struct cgroup **nextp;
131
132		prstatc = cgroup_rstat_cpu(parent, cpu);
133		nextp = &prstatc->updated_children;
134		while (*nextp != pos) {
135			struct cgroup_rstat_cpu *nrstatc;
136
137			nrstatc = cgroup_rstat_cpu(*nextp, cpu);
138			WARN_ON_ONCE(*nextp == parent);
139			nextp = &nrstatc->updated_next;
140		}
141		*nextp = rstatc->updated_next;
142	}
143
144	rstatc->updated_next = NULL;
145	return pos;
 
 
 
 
 
 
 
 
 
 
146}
147
148/*
149 * A hook for bpf stat collectors to attach to and flush their stats.
150 * Together with providing bpf kfuncs for cgroup_rstat_updated() and
151 * cgroup_rstat_flush(), this enables a complete workflow where bpf progs that
152 * collect cgroup stats can integrate with rstat for efficient flushing.
153 *
154 * A static noinline declaration here could cause the compiler to optimize away
155 * the function. A global noinline declaration will keep the definition, but may
156 * optimize away the callsite. Therefore, __weak is needed to ensure that the
157 * call is still emitted, by telling the compiler that we don't know what the
158 * function might eventually be.
159 *
160 * __diag_* below are needed to dismiss the missing prototype warning.
161 */
162__diag_push();
163__diag_ignore_all("-Wmissing-prototypes",
164		  "kfuncs which will be used in BPF programs");
165
166__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
167				     struct cgroup *parent, int cpu)
168{
169}
170
171__diag_pop();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
173/* see cgroup_rstat_flush() */
174static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
175	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
176{
177	int cpu;
178
179	lockdep_assert_held(&cgroup_rstat_lock);
180
181	for_each_possible_cpu(cpu) {
182		raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
183						       cpu);
184		struct cgroup *pos = NULL;
185		unsigned long flags;
186
187		/*
188		 * The _irqsave() is needed because cgroup_rstat_lock is
189		 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
190		 * this lock with the _irq() suffix only disables interrupts on
191		 * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
192		 * interrupts on both configurations. The _irqsave() ensures
193		 * that interrupts are always disabled and later restored.
194		 */
195		raw_spin_lock_irqsave(cpu_lock, flags);
196		while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
197			struct cgroup_subsys_state *css;
198
199			cgroup_base_stat_flush(pos, cpu);
200			bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
201
202			rcu_read_lock();
203			list_for_each_entry_rcu(css, &pos->rstat_css_list,
204						rstat_css_node)
205				css->ss->css_rstat_flush(css, cpu);
206			rcu_read_unlock();
207		}
208		raw_spin_unlock_irqrestore(cpu_lock, flags);
209
210		/* if @may_sleep, play nice and yield if necessary */
211		if (may_sleep && (need_resched() ||
212				  spin_needbreak(&cgroup_rstat_lock))) {
213			spin_unlock_irq(&cgroup_rstat_lock);
214			if (!cond_resched())
215				cpu_relax();
216			spin_lock_irq(&cgroup_rstat_lock);
217		}
218	}
219}
220
221/**
222 * cgroup_rstat_flush - flush stats in @cgrp's subtree
223 * @cgrp: target cgroup
224 *
225 * Collect all per-cpu stats in @cgrp's subtree into the global counters
226 * and propagate them upwards.  After this function returns, all cgroups in
227 * the subtree have up-to-date ->stat.
228 *
229 * This also gets all cgroups in the subtree including @cgrp off the
230 * ->updated_children lists.
231 *
232 * This function may block.
233 */
234void cgroup_rstat_flush(struct cgroup *cgrp)
235{
236	might_sleep();
237
238	spin_lock_irq(&cgroup_rstat_lock);
239	cgroup_rstat_flush_locked(cgrp, true);
240	spin_unlock_irq(&cgroup_rstat_lock);
241}
242
243/**
244 * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
245 * @cgrp: target cgroup
246 *
247 * This function can be called from any context.
248 */
249void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
250{
251	unsigned long flags;
252
253	spin_lock_irqsave(&cgroup_rstat_lock, flags);
254	cgroup_rstat_flush_locked(cgrp, false);
255	spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
256}
257
258/**
259 * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
260 * @cgrp: target cgroup
261 *
262 * Flush stats in @cgrp's subtree and prevent further flushes.  Must be
263 * paired with cgroup_rstat_flush_release().
264 *
265 * This function may block.
266 */
267void cgroup_rstat_flush_hold(struct cgroup *cgrp)
268	__acquires(&cgroup_rstat_lock)
269{
270	might_sleep();
271	spin_lock_irq(&cgroup_rstat_lock);
272	cgroup_rstat_flush_locked(cgrp, true);
273}
274
275/**
276 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
 
277 */
278void cgroup_rstat_flush_release(void)
279	__releases(&cgroup_rstat_lock)
280{
281	spin_unlock_irq(&cgroup_rstat_lock);
282}
283
284int cgroup_rstat_init(struct cgroup *cgrp)
285{
286	int cpu;
287
288	/* the root cgrp has rstat_cpu preallocated */
289	if (!cgrp->rstat_cpu) {
290		cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
291		if (!cgrp->rstat_cpu)
292			return -ENOMEM;
293	}
294
295	/* ->updated_children list is self terminated */
296	for_each_possible_cpu(cpu) {
297		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
298
299		rstatc->updated_children = cgrp;
300		u64_stats_init(&rstatc->bsync);
301	}
302
303	return 0;
304}
305
306void cgroup_rstat_exit(struct cgroup *cgrp)
307{
308	int cpu;
309
310	cgroup_rstat_flush(cgrp);
311
312	/* sanity check */
313	for_each_possible_cpu(cpu) {
314		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
315
316		if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
317		    WARN_ON_ONCE(rstatc->updated_next))
318			return;
319	}
320
321	free_percpu(cgrp->rstat_cpu);
322	cgrp->rstat_cpu = NULL;
323}
324
325void __init cgroup_rstat_boot(void)
326{
327	int cpu;
328
329	for_each_possible_cpu(cpu)
330		raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
331}
332
333/*
334 * Functions for cgroup basic resource statistics implemented on top of
335 * rstat.
336 */
337static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
338				 struct cgroup_base_stat *src_bstat)
339{
340	dst_bstat->cputime.utime += src_bstat->cputime.utime;
341	dst_bstat->cputime.stime += src_bstat->cputime.stime;
342	dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
343#ifdef CONFIG_SCHED_CORE
344	dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
345#endif
 
346}
347
348static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
349				 struct cgroup_base_stat *src_bstat)
350{
351	dst_bstat->cputime.utime -= src_bstat->cputime.utime;
352	dst_bstat->cputime.stime -= src_bstat->cputime.stime;
353	dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
354#ifdef CONFIG_SCHED_CORE
355	dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
356#endif
 
357}
358
359static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
360{
361	struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
362	struct cgroup *parent = cgroup_parent(cgrp);
 
363	struct cgroup_base_stat delta;
364	unsigned seq;
365
366	/* Root-level stats are sourced from system-wide CPU stats */
367	if (!parent)
368		return;
369
370	/* fetch the current per-cpu values */
371	do {
372		seq = __u64_stats_fetch_begin(&rstatc->bsync);
373		delta = rstatc->bstat;
374	} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
375
376	/* propagate percpu delta to global */
377	cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
378	cgroup_base_stat_add(&cgrp->bstat, &delta);
379	cgroup_base_stat_add(&rstatc->last_bstat, &delta);
 
380
381	/* propagate global delta to parent (unless that's root) */
382	if (cgroup_parent(parent)) {
383		delta = cgrp->bstat;
384		cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
385		cgroup_base_stat_add(&parent->bstat, &delta);
386		cgroup_base_stat_add(&cgrp->last_bstat, &delta);
 
 
 
 
 
 
387	}
388}
389
390static struct cgroup_rstat_cpu *
391cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
392{
393	struct cgroup_rstat_cpu *rstatc;
394
395	rstatc = get_cpu_ptr(cgrp->rstat_cpu);
396	*flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
397	return rstatc;
398}
399
400static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
401						 struct cgroup_rstat_cpu *rstatc,
402						 unsigned long flags)
403{
404	u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
405	cgroup_rstat_updated(cgrp, smp_processor_id());
406	put_cpu_ptr(rstatc);
407}
408
409void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
410{
411	struct cgroup_rstat_cpu *rstatc;
412	unsigned long flags;
413
414	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
415	rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
416	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
417}
418
419void __cgroup_account_cputime_field(struct cgroup *cgrp,
420				    enum cpu_usage_stat index, u64 delta_exec)
421{
422	struct cgroup_rstat_cpu *rstatc;
423	unsigned long flags;
424
425	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
426
427	switch (index) {
428	case CPUTIME_USER:
429	case CPUTIME_NICE:
 
 
 
430		rstatc->bstat.cputime.utime += delta_exec;
431		break;
432	case CPUTIME_SYSTEM:
433	case CPUTIME_IRQ:
434	case CPUTIME_SOFTIRQ:
435		rstatc->bstat.cputime.stime += delta_exec;
436		break;
437#ifdef CONFIG_SCHED_CORE
438	case CPUTIME_FORCEIDLE:
439		rstatc->bstat.forceidle_sum += delta_exec;
440		break;
441#endif
442	default:
443		break;
444	}
445
446	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
447}
448
449/*
450 * compute the cputime for the root cgroup by getting the per cpu data
451 * at a global level, then categorizing the fields in a manner consistent
452 * with how it is done by __cgroup_account_cputime_field for each bit of
453 * cpu time attributed to a cgroup.
454 */
455static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
456{
457	struct task_cputime *cputime = &bstat->cputime;
458	int i;
459
460	cputime->stime = 0;
461	cputime->utime = 0;
462	cputime->sum_exec_runtime = 0;
463	for_each_possible_cpu(i) {
464		struct kernel_cpustat kcpustat;
465		u64 *cpustat = kcpustat.cpustat;
466		u64 user = 0;
467		u64 sys = 0;
468
469		kcpustat_cpu_fetch(&kcpustat, i);
470
471		user += cpustat[CPUTIME_USER];
472		user += cpustat[CPUTIME_NICE];
473		cputime->utime += user;
474
475		sys += cpustat[CPUTIME_SYSTEM];
476		sys += cpustat[CPUTIME_IRQ];
477		sys += cpustat[CPUTIME_SOFTIRQ];
478		cputime->stime += sys;
479
480		cputime->sum_exec_runtime += user;
481		cputime->sum_exec_runtime += sys;
482		cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
483
484#ifdef CONFIG_SCHED_CORE
485		bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
486#endif
 
487	}
488}
489
490void cgroup_base_stat_cputime_show(struct seq_file *seq)
 
491{
492	struct cgroup *cgrp = seq_css(seq)->cgroup;
493	u64 usage, utime, stime;
494	struct cgroup_base_stat bstat;
495#ifdef CONFIG_SCHED_CORE
496	u64 forceidle_time;
 
 
 
497#endif
 
 
 
 
 
 
498
499	if (cgroup_parent(cgrp)) {
500		cgroup_rstat_flush_hold(cgrp);
501		usage = cgrp->bstat.cputime.sum_exec_runtime;
502		cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
503			       &utime, &stime);
504#ifdef CONFIG_SCHED_CORE
505		forceidle_time = cgrp->bstat.forceidle_sum;
506#endif
507		cgroup_rstat_flush_release();
508	} else {
509		root_cgroup_cputime(&bstat);
510		usage = bstat.cputime.sum_exec_runtime;
511		utime = bstat.cputime.utime;
512		stime = bstat.cputime.stime;
513#ifdef CONFIG_SCHED_CORE
514		forceidle_time = bstat.forceidle_sum;
515#endif
516	}
517
518	do_div(usage, NSEC_PER_USEC);
519	do_div(utime, NSEC_PER_USEC);
520	do_div(stime, NSEC_PER_USEC);
521#ifdef CONFIG_SCHED_CORE
522	do_div(forceidle_time, NSEC_PER_USEC);
523#endif
524
525	seq_printf(seq, "usage_usec %llu\n"
526		   "user_usec %llu\n"
527		   "system_usec %llu\n",
528		   usage, utime, stime);
 
529
530#ifdef CONFIG_SCHED_CORE
531	seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
532#endif
533}
534
535/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
536BTF_SET8_START(bpf_rstat_kfunc_ids)
537BTF_ID_FLAGS(func, cgroup_rstat_updated)
538BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
539BTF_SET8_END(bpf_rstat_kfunc_ids)
540
541static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
542	.owner          = THIS_MODULE,
543	.set            = &bpf_rstat_kfunc_ids,
544};
545
546static int __init bpf_rstat_kfunc_init(void)
547{
548	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
549					 &bpf_rstat_kfunc_set);
550}
551late_initcall(bpf_rstat_kfunc_init);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include "cgroup-internal.h"
  3
  4#include <linux/sched/cputime.h>
  5
  6#include <linux/bpf.h>
  7#include <linux/btf.h>
  8#include <linux/btf_ids.h>
  9
 10#include <trace/events/cgroup.h>
 11
 12static DEFINE_SPINLOCK(cgroup_rstat_lock);
 13static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
 14
 15static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
 16
 17static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
 18{
 19	return per_cpu_ptr(cgrp->rstat_cpu, cpu);
 20}
 21
 22/*
 23 * Helper functions for rstat per CPU lock (cgroup_rstat_cpu_lock).
 24 *
 25 * This makes it easier to diagnose locking issues and contention in
 26 * production environments. The parameter @fast_path determine the
 27 * tracepoints being added, allowing us to diagnose "flush" related
 28 * operations without handling high-frequency fast-path "update" events.
 29 */
 30static __always_inline
 31unsigned long _cgroup_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu,
 32				     struct cgroup *cgrp, const bool fast_path)
 33{
 34	unsigned long flags;
 35	bool contended;
 36
 37	/*
 38	 * The _irqsave() is needed because cgroup_rstat_lock is
 39	 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
 40	 * this lock with the _irq() suffix only disables interrupts on
 41	 * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
 42	 * interrupts on both configurations. The _irqsave() ensures
 43	 * that interrupts are always disabled and later restored.
 44	 */
 45	contended = !raw_spin_trylock_irqsave(cpu_lock, flags);
 46	if (contended) {
 47		if (fast_path)
 48			trace_cgroup_rstat_cpu_lock_contended_fastpath(cgrp, cpu, contended);
 49		else
 50			trace_cgroup_rstat_cpu_lock_contended(cgrp, cpu, contended);
 51
 52		raw_spin_lock_irqsave(cpu_lock, flags);
 53	}
 54
 55	if (fast_path)
 56		trace_cgroup_rstat_cpu_locked_fastpath(cgrp, cpu, contended);
 57	else
 58		trace_cgroup_rstat_cpu_locked(cgrp, cpu, contended);
 59
 60	return flags;
 61}
 62
 63static __always_inline
 64void _cgroup_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,
 65			      struct cgroup *cgrp, unsigned long flags,
 66			      const bool fast_path)
 67{
 68	if (fast_path)
 69		trace_cgroup_rstat_cpu_unlock_fastpath(cgrp, cpu, false);
 70	else
 71		trace_cgroup_rstat_cpu_unlock(cgrp, cpu, false);
 72
 73	raw_spin_unlock_irqrestore(cpu_lock, flags);
 74}
 75
 76/**
 77 * cgroup_rstat_updated - keep track of updated rstat_cpu
 78 * @cgrp: target cgroup
 79 * @cpu: cpu on which rstat_cpu was updated
 80 *
 81 * @cgrp's rstat_cpu on @cpu was updated.  Put it on the parent's matching
 82 * rstat_cpu->updated_children list.  See the comment on top of
 83 * cgroup_rstat_cpu definition for details.
 84 */
 85__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
 86{
 87	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
 88	unsigned long flags;
 89
 90	/*
 91	 * Speculative already-on-list test. This may race leading to
 92	 * temporary inaccuracies, which is fine.
 93	 *
 94	 * Because @parent's updated_children is terminated with @parent
 95	 * instead of NULL, we can tell whether @cgrp is on the list by
 96	 * testing the next pointer for NULL.
 97	 */
 98	if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next))
 99		return;
100
101	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, true);
102
103	/* put @cgrp and all ancestors on the corresponding updated lists */
104	while (true) {
105		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
106		struct cgroup *parent = cgroup_parent(cgrp);
107		struct cgroup_rstat_cpu *prstatc;
108
109		/*
110		 * Both additions and removals are bottom-up.  If a cgroup
111		 * is already in the tree, all ancestors are.
112		 */
113		if (rstatc->updated_next)
114			break;
115
116		/* Root has no parent to link it to, but mark it busy */
117		if (!parent) {
118			rstatc->updated_next = cgrp;
119			break;
120		}
121
122		prstatc = cgroup_rstat_cpu(parent, cpu);
123		rstatc->updated_next = prstatc->updated_children;
124		prstatc->updated_children = cgrp;
125
126		cgrp = parent;
127	}
128
129	_cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, true);
130}
131
132/**
133 * cgroup_rstat_push_children - push children cgroups into the given list
134 * @head: current head of the list (= subtree root)
135 * @child: first child of the root
136 * @cpu: target cpu
137 * Return: A new singly linked list of cgroups to be flush
138 *
139 * Iteratively traverse down the cgroup_rstat_cpu updated tree level by
140 * level and push all the parents first before their next level children
141 * into a singly linked list built from the tail backward like "pushing"
142 * cgroups into a stack. The root is pushed by the caller.
143 */
144static struct cgroup *cgroup_rstat_push_children(struct cgroup *head,
145						 struct cgroup *child, int cpu)
146{
147	struct cgroup *chead = child;	/* Head of child cgroup level */
148	struct cgroup *ghead = NULL;	/* Head of grandchild cgroup level */
149	struct cgroup *parent, *grandchild;
150	struct cgroup_rstat_cpu *crstatc;
151
152	child->rstat_flush_next = NULL;
153
154next_level:
155	while (chead) {
156		child = chead;
157		chead = child->rstat_flush_next;
158		parent = cgroup_parent(child);
159
160		/* updated_next is parent cgroup terminated */
161		while (child != parent) {
162			child->rstat_flush_next = head;
163			head = child;
164			crstatc = cgroup_rstat_cpu(child, cpu);
165			grandchild = crstatc->updated_children;
166			if (grandchild != child) {
167				/* Push the grand child to the next level */
168				crstatc->updated_children = child;
169				grandchild->rstat_flush_next = ghead;
170				ghead = grandchild;
171			}
172			child = crstatc->updated_next;
173			crstatc->updated_next = NULL;
174		}
175	}
176
177	if (ghead) {
178		chead = ghead;
179		ghead = NULL;
180		goto next_level;
181	}
182	return head;
183}
184
185/**
186 * cgroup_rstat_updated_list - return a list of updated cgroups to be flushed
187 * @root: root of the cgroup subtree to traverse
188 * @cpu: target cpu
189 * Return: A singly linked list of cgroups to be flushed
190 *
191 * Walks the updated rstat_cpu tree on @cpu from @root.  During traversal,
192 * each returned cgroup is unlinked from the updated tree.
193 *
194 * The only ordering guarantee is that, for a parent and a child pair
195 * covered by a given traversal, the child is before its parent in
196 * the list.
197 *
198 * Note that updated_children is self terminated and points to a list of
199 * child cgroups if not empty. Whereas updated_next is like a sibling link
200 * within the children list and terminated by the parent cgroup. An exception
201 * here is the cgroup root whose updated_next can be self terminated.
202 */
203static struct cgroup *cgroup_rstat_updated_list(struct cgroup *root, int cpu)
 
204{
205	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
206	struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(root, cpu);
207	struct cgroup *head = NULL, *parent, *child;
208	unsigned long flags;
 
209
210	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, root, false);
 
 
 
 
 
 
 
 
 
 
 
211
212	/* Return NULL if this subtree is not on-list */
213	if (!rstatc->updated_next)
214		goto unlock_ret;
 
 
 
 
215
216	/*
217	 * Unlink @root from its parent. As the updated_children list is
218	 * singly linked, we have to walk it to find the removal point.
 
 
219	 */
220	parent = cgroup_parent(root);
221	if (parent) {
222		struct cgroup_rstat_cpu *prstatc;
223		struct cgroup **nextp;
224
225		prstatc = cgroup_rstat_cpu(parent, cpu);
226		nextp = &prstatc->updated_children;
227		while (*nextp != root) {
228			struct cgroup_rstat_cpu *nrstatc;
229
230			nrstatc = cgroup_rstat_cpu(*nextp, cpu);
231			WARN_ON_ONCE(*nextp == parent);
232			nextp = &nrstatc->updated_next;
233		}
234		*nextp = rstatc->updated_next;
235	}
236
237	rstatc->updated_next = NULL;
238
239	/* Push @root to the list first before pushing the children */
240	head = root;
241	root->rstat_flush_next = NULL;
242	child = rstatc->updated_children;
243	rstatc->updated_children = root;
244	if (child != root)
245		head = cgroup_rstat_push_children(head, child, cpu);
246unlock_ret:
247	_cgroup_rstat_cpu_unlock(cpu_lock, cpu, root, flags, false);
248	return head;
249}
250
251/*
252 * A hook for bpf stat collectors to attach to and flush their stats.
253 * Together with providing bpf kfuncs for cgroup_rstat_updated() and
254 * cgroup_rstat_flush(), this enables a complete workflow where bpf progs that
255 * collect cgroup stats can integrate with rstat for efficient flushing.
256 *
257 * A static noinline declaration here could cause the compiler to optimize away
258 * the function. A global noinline declaration will keep the definition, but may
259 * optimize away the callsite. Therefore, __weak is needed to ensure that the
260 * call is still emitted, by telling the compiler that we don't know what the
261 * function might eventually be.
 
 
262 */
263
264__bpf_hook_start();
 
265
266__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
267				     struct cgroup *parent, int cpu)
268{
269}
270
271__bpf_hook_end();
272
273/*
274 * Helper functions for locking cgroup_rstat_lock.
275 *
276 * This makes it easier to diagnose locking issues and contention in
277 * production environments.  The parameter @cpu_in_loop indicate lock
278 * was released and re-taken when collection data from the CPUs. The
279 * value -1 is used when obtaining the main lock else this is the CPU
280 * number processed last.
281 */
282static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop)
283	__acquires(&cgroup_rstat_lock)
284{
285	bool contended;
286
287	contended = !spin_trylock_irq(&cgroup_rstat_lock);
288	if (contended) {
289		trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended);
290		spin_lock_irq(&cgroup_rstat_lock);
291	}
292	trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);
293}
294
295static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
296	__releases(&cgroup_rstat_lock)
297{
298	trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false);
299	spin_unlock_irq(&cgroup_rstat_lock);
300}
301
302/* see cgroup_rstat_flush() */
303static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
304	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
305{
306	int cpu;
307
308	lockdep_assert_held(&cgroup_rstat_lock);
309
310	for_each_possible_cpu(cpu) {
311		struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
 
 
 
312
313		for (; pos; pos = pos->rstat_flush_next) {
 
 
 
 
 
 
 
 
 
314			struct cgroup_subsys_state *css;
315
316			cgroup_base_stat_flush(pos, cpu);
317			bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
318
319			rcu_read_lock();
320			list_for_each_entry_rcu(css, &pos->rstat_css_list,
321						rstat_css_node)
322				css->ss->css_rstat_flush(css, cpu);
323			rcu_read_unlock();
324		}
 
325
326		/* play nice and yield if necessary */
327		if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
328			__cgroup_rstat_unlock(cgrp, cpu);
 
329			if (!cond_resched())
330				cpu_relax();
331			__cgroup_rstat_lock(cgrp, cpu);
332		}
333	}
334}
335
336/**
337 * cgroup_rstat_flush - flush stats in @cgrp's subtree
338 * @cgrp: target cgroup
339 *
340 * Collect all per-cpu stats in @cgrp's subtree into the global counters
341 * and propagate them upwards.  After this function returns, all cgroups in
342 * the subtree have up-to-date ->stat.
343 *
344 * This also gets all cgroups in the subtree including @cgrp off the
345 * ->updated_children lists.
346 *
347 * This function may block.
348 */
349__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
350{
351	might_sleep();
352
353	__cgroup_rstat_lock(cgrp, -1);
354	cgroup_rstat_flush_locked(cgrp);
355	__cgroup_rstat_unlock(cgrp, -1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356}
357
358/**
359 * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
360 * @cgrp: target cgroup
361 *
362 * Flush stats in @cgrp's subtree and prevent further flushes.  Must be
363 * paired with cgroup_rstat_flush_release().
364 *
365 * This function may block.
366 */
367void cgroup_rstat_flush_hold(struct cgroup *cgrp)
368	__acquires(&cgroup_rstat_lock)
369{
370	might_sleep();
371	__cgroup_rstat_lock(cgrp, -1);
372	cgroup_rstat_flush_locked(cgrp);
373}
374
375/**
376 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
377 * @cgrp: cgroup used by tracepoint
378 */
379void cgroup_rstat_flush_release(struct cgroup *cgrp)
380	__releases(&cgroup_rstat_lock)
381{
382	__cgroup_rstat_unlock(cgrp, -1);
383}
384
385int cgroup_rstat_init(struct cgroup *cgrp)
386{
387	int cpu;
388
389	/* the root cgrp has rstat_cpu preallocated */
390	if (!cgrp->rstat_cpu) {
391		cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
392		if (!cgrp->rstat_cpu)
393			return -ENOMEM;
394	}
395
396	/* ->updated_children list is self terminated */
397	for_each_possible_cpu(cpu) {
398		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
399
400		rstatc->updated_children = cgrp;
401		u64_stats_init(&rstatc->bsync);
402	}
403
404	return 0;
405}
406
407void cgroup_rstat_exit(struct cgroup *cgrp)
408{
409	int cpu;
410
411	cgroup_rstat_flush(cgrp);
412
413	/* sanity check */
414	for_each_possible_cpu(cpu) {
415		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
416
417		if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
418		    WARN_ON_ONCE(rstatc->updated_next))
419			return;
420	}
421
422	free_percpu(cgrp->rstat_cpu);
423	cgrp->rstat_cpu = NULL;
424}
425
426void __init cgroup_rstat_boot(void)
427{
428	int cpu;
429
430	for_each_possible_cpu(cpu)
431		raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
432}
433
434/*
435 * Functions for cgroup basic resource statistics implemented on top of
436 * rstat.
437 */
438static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
439				 struct cgroup_base_stat *src_bstat)
440{
441	dst_bstat->cputime.utime += src_bstat->cputime.utime;
442	dst_bstat->cputime.stime += src_bstat->cputime.stime;
443	dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
444#ifdef CONFIG_SCHED_CORE
445	dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
446#endif
447	dst_bstat->ntime += src_bstat->ntime;
448}
449
450static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
451				 struct cgroup_base_stat *src_bstat)
452{
453	dst_bstat->cputime.utime -= src_bstat->cputime.utime;
454	dst_bstat->cputime.stime -= src_bstat->cputime.stime;
455	dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
456#ifdef CONFIG_SCHED_CORE
457	dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
458#endif
459	dst_bstat->ntime -= src_bstat->ntime;
460}
461
462static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
463{
464	struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
465	struct cgroup *parent = cgroup_parent(cgrp);
466	struct cgroup_rstat_cpu *prstatc;
467	struct cgroup_base_stat delta;
468	unsigned seq;
469
470	/* Root-level stats are sourced from system-wide CPU stats */
471	if (!parent)
472		return;
473
474	/* fetch the current per-cpu values */
475	do {
476		seq = __u64_stats_fetch_begin(&rstatc->bsync);
477		delta = rstatc->bstat;
478	} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
479
480	/* propagate per-cpu delta to cgroup and per-cpu global statistics */
481	cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
482	cgroup_base_stat_add(&cgrp->bstat, &delta);
483	cgroup_base_stat_add(&rstatc->last_bstat, &delta);
484	cgroup_base_stat_add(&rstatc->subtree_bstat, &delta);
485
486	/* propagate cgroup and per-cpu global delta to parent (unless that's root) */
487	if (cgroup_parent(parent)) {
488		delta = cgrp->bstat;
489		cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
490		cgroup_base_stat_add(&parent->bstat, &delta);
491		cgroup_base_stat_add(&cgrp->last_bstat, &delta);
492
493		delta = rstatc->subtree_bstat;
494		prstatc = cgroup_rstat_cpu(parent, cpu);
495		cgroup_base_stat_sub(&delta, &rstatc->last_subtree_bstat);
496		cgroup_base_stat_add(&prstatc->subtree_bstat, &delta);
497		cgroup_base_stat_add(&rstatc->last_subtree_bstat, &delta);
498	}
499}
500
501static struct cgroup_rstat_cpu *
502cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
503{
504	struct cgroup_rstat_cpu *rstatc;
505
506	rstatc = get_cpu_ptr(cgrp->rstat_cpu);
507	*flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
508	return rstatc;
509}
510
511static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
512						 struct cgroup_rstat_cpu *rstatc,
513						 unsigned long flags)
514{
515	u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
516	cgroup_rstat_updated(cgrp, smp_processor_id());
517	put_cpu_ptr(rstatc);
518}
519
520void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
521{
522	struct cgroup_rstat_cpu *rstatc;
523	unsigned long flags;
524
525	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
526	rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
527	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
528}
529
530void __cgroup_account_cputime_field(struct cgroup *cgrp,
531				    enum cpu_usage_stat index, u64 delta_exec)
532{
533	struct cgroup_rstat_cpu *rstatc;
534	unsigned long flags;
535
536	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
537
538	switch (index) {
 
539	case CPUTIME_NICE:
540		rstatc->bstat.ntime += delta_exec;
541		fallthrough;
542	case CPUTIME_USER:
543		rstatc->bstat.cputime.utime += delta_exec;
544		break;
545	case CPUTIME_SYSTEM:
546	case CPUTIME_IRQ:
547	case CPUTIME_SOFTIRQ:
548		rstatc->bstat.cputime.stime += delta_exec;
549		break;
550#ifdef CONFIG_SCHED_CORE
551	case CPUTIME_FORCEIDLE:
552		rstatc->bstat.forceidle_sum += delta_exec;
553		break;
554#endif
555	default:
556		break;
557	}
558
559	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
560}
561
562/*
563 * compute the cputime for the root cgroup by getting the per cpu data
564 * at a global level, then categorizing the fields in a manner consistent
565 * with how it is done by __cgroup_account_cputime_field for each bit of
566 * cpu time attributed to a cgroup.
567 */
568static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
569{
570	struct task_cputime *cputime = &bstat->cputime;
571	int i;
572
573	memset(bstat, 0, sizeof(*bstat));
 
 
574	for_each_possible_cpu(i) {
575		struct kernel_cpustat kcpustat;
576		u64 *cpustat = kcpustat.cpustat;
577		u64 user = 0;
578		u64 sys = 0;
579
580		kcpustat_cpu_fetch(&kcpustat, i);
581
582		user += cpustat[CPUTIME_USER];
583		user += cpustat[CPUTIME_NICE];
584		cputime->utime += user;
585
586		sys += cpustat[CPUTIME_SYSTEM];
587		sys += cpustat[CPUTIME_IRQ];
588		sys += cpustat[CPUTIME_SOFTIRQ];
589		cputime->stime += sys;
590
591		cputime->sum_exec_runtime += user;
592		cputime->sum_exec_runtime += sys;
 
593
594#ifdef CONFIG_SCHED_CORE
595		bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
596#endif
597		bstat->ntime += cpustat[CPUTIME_NICE];
598	}
599}
600
601
602static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat *bstat)
603{
 
 
 
604#ifdef CONFIG_SCHED_CORE
605	u64 forceidle_time = bstat->forceidle_sum;
606
607	do_div(forceidle_time, NSEC_PER_USEC);
608	seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
609#endif
610}
611
612void cgroup_base_stat_cputime_show(struct seq_file *seq)
613{
614	struct cgroup *cgrp = seq_css(seq)->cgroup;
615	u64 usage, utime, stime, ntime;
616
617	if (cgroup_parent(cgrp)) {
618		cgroup_rstat_flush_hold(cgrp);
619		usage = cgrp->bstat.cputime.sum_exec_runtime;
620		cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
621			       &utime, &stime);
622		ntime = cgrp->bstat.ntime;
623		cgroup_rstat_flush_release(cgrp);
 
 
624	} else {
625		/* cgrp->bstat of root is not actually used, reuse it */
626		root_cgroup_cputime(&cgrp->bstat);
627		usage = cgrp->bstat.cputime.sum_exec_runtime;
628		utime = cgrp->bstat.cputime.utime;
629		stime = cgrp->bstat.cputime.stime;
630		ntime = cgrp->bstat.ntime;
 
631	}
632
633	do_div(usage, NSEC_PER_USEC);
634	do_div(utime, NSEC_PER_USEC);
635	do_div(stime, NSEC_PER_USEC);
636	do_div(ntime, NSEC_PER_USEC);
 
 
637
638	seq_printf(seq, "usage_usec %llu\n"
639			"user_usec %llu\n"
640			"system_usec %llu\n"
641			"nice_usec %llu\n",
642			usage, utime, stime, ntime);
643
644	cgroup_force_idle_show(seq, &cgrp->bstat);
 
 
645}
646
647/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
648BTF_KFUNCS_START(bpf_rstat_kfunc_ids)
649BTF_ID_FLAGS(func, cgroup_rstat_updated)
650BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
651BTF_KFUNCS_END(bpf_rstat_kfunc_ids)
652
653static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
654	.owner          = THIS_MODULE,
655	.set            = &bpf_rstat_kfunc_ids,
656};
657
658static int __init bpf_rstat_kfunc_init(void)
659{
660	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
661					 &bpf_rstat_kfunc_set);
662}
663late_initcall(bpf_rstat_kfunc_init);