Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include "cgroup-internal.h"
  3
  4#include <linux/sched/cputime.h>
  5
  6#include <linux/bpf.h>
  7#include <linux/btf.h>
  8#include <linux/btf_ids.h>
  9
 10static DEFINE_SPINLOCK(cgroup_rstat_lock);
 11static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
 12
 13static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
 14
 15static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
 16{
 17	return per_cpu_ptr(cgrp->rstat_cpu, cpu);
 18}
 19
 20/**
 21 * cgroup_rstat_updated - keep track of updated rstat_cpu
 22 * @cgrp: target cgroup
 23 * @cpu: cpu on which rstat_cpu was updated
 24 *
 25 * @cgrp's rstat_cpu on @cpu was updated.  Put it on the parent's matching
 26 * rstat_cpu->updated_children list.  See the comment on top of
 27 * cgroup_rstat_cpu definition for details.
 28 */
 29void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
 30{
 31	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
 32	unsigned long flags;
 33
 34	/*
 35	 * Speculative already-on-list test. This may race leading to
 36	 * temporary inaccuracies, which is fine.
 37	 *
 38	 * Because @parent's updated_children is terminated with @parent
 39	 * instead of NULL, we can tell whether @cgrp is on the list by
 40	 * testing the next pointer for NULL.
 41	 */
 42	if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next))
 43		return;
 44
 45	raw_spin_lock_irqsave(cpu_lock, flags);
 46
 47	/* put @cgrp and all ancestors on the corresponding updated lists */
 48	while (true) {
 49		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
 50		struct cgroup *parent = cgroup_parent(cgrp);
 51		struct cgroup_rstat_cpu *prstatc;
 52
 53		/*
 54		 * Both additions and removals are bottom-up.  If a cgroup
 55		 * is already in the tree, all ancestors are.
 56		 */
 57		if (rstatc->updated_next)
 58			break;
 59
 60		/* Root has no parent to link it to, but mark it busy */
 61		if (!parent) {
 62			rstatc->updated_next = cgrp;
 63			break;
 64		}
 65
 66		prstatc = cgroup_rstat_cpu(parent, cpu);
 67		rstatc->updated_next = prstatc->updated_children;
 68		prstatc->updated_children = cgrp;
 69
 70		cgrp = parent;
 71	}
 72
 73	raw_spin_unlock_irqrestore(cpu_lock, flags);
 74}
 75
 76/**
 77 * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
 78 * @pos: current position
 79 * @root: root of the tree to traversal
 80 * @cpu: target cpu
 
 81 *
 82 * Walks the updated rstat_cpu tree on @cpu from @root.  %NULL @pos starts
 83 * the traversal and %NULL return indicates the end.  During traversal,
 84 * each returned cgroup is unlinked from the tree.  Must be called with the
 85 * matching cgroup_rstat_cpu_lock held.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86 *
 87 * The only ordering guarantee is that, for a parent and a child pair
 88 * covered by a given traversal, if a child is visited, its parent is
 89 * guaranteed to be visited afterwards.
 
 
 
 
 
 90 */
 91static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
 92						   struct cgroup *root, int cpu)
 93{
 94	struct cgroup_rstat_cpu *rstatc;
 95	struct cgroup *parent;
 96
 97	if (pos == root)
 98		return NULL;
 99
100	/*
101	 * We're gonna walk down to the first leaf and visit/remove it.  We
102	 * can pick whatever unvisited node as the starting point.
 
 
 
 
103	 */
104	if (!pos) {
105		pos = root;
106		/* return NULL if this subtree is not on-list */
107		if (!cgroup_rstat_cpu(pos, cpu)->updated_next)
108			return NULL;
109	} else {
110		pos = cgroup_parent(pos);
111	}
112
113	/* walk down to the first leaf */
114	while (true) {
115		rstatc = cgroup_rstat_cpu(pos, cpu);
116		if (rstatc->updated_children == pos)
117			break;
118		pos = rstatc->updated_children;
119	}
120
121	/*
122	 * Unlink @pos from the tree.  As the updated_children list is
123	 * singly linked, we have to walk it to find the removal point.
124	 * However, due to the way we traverse, @pos will be the first
125	 * child in most cases. The only exception is @root.
126	 */
127	parent = cgroup_parent(pos);
128	if (parent) {
129		struct cgroup_rstat_cpu *prstatc;
130		struct cgroup **nextp;
131
132		prstatc = cgroup_rstat_cpu(parent, cpu);
133		nextp = &prstatc->updated_children;
134		while (*nextp != pos) {
135			struct cgroup_rstat_cpu *nrstatc;
136
137			nrstatc = cgroup_rstat_cpu(*nextp, cpu);
138			WARN_ON_ONCE(*nextp == parent);
139			nextp = &nrstatc->updated_next;
140		}
141		*nextp = rstatc->updated_next;
142	}
143
144	rstatc->updated_next = NULL;
145	return pos;
 
 
 
 
 
 
 
 
 
 
146}
147
148/*
149 * A hook for bpf stat collectors to attach to and flush their stats.
150 * Together with providing bpf kfuncs for cgroup_rstat_updated() and
151 * cgroup_rstat_flush(), this enables a complete workflow where bpf progs that
152 * collect cgroup stats can integrate with rstat for efficient flushing.
153 *
154 * A static noinline declaration here could cause the compiler to optimize away
155 * the function. A global noinline declaration will keep the definition, but may
156 * optimize away the callsite. Therefore, __weak is needed to ensure that the
157 * call is still emitted, by telling the compiler that we don't know what the
158 * function might eventually be.
159 *
160 * __diag_* below are needed to dismiss the missing prototype warning.
161 */
162__diag_push();
163__diag_ignore_all("-Wmissing-prototypes",
164		  "kfuncs which will be used in BPF programs");
165
166__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
167				     struct cgroup *parent, int cpu)
168{
169}
170
171__diag_pop();
172
173/* see cgroup_rstat_flush() */
174static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
175	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
176{
177	int cpu;
178
179	lockdep_assert_held(&cgroup_rstat_lock);
180
181	for_each_possible_cpu(cpu) {
182		raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
183						       cpu);
184		struct cgroup *pos = NULL;
185		unsigned long flags;
186
187		/*
188		 * The _irqsave() is needed because cgroup_rstat_lock is
189		 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
190		 * this lock with the _irq() suffix only disables interrupts on
191		 * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
192		 * interrupts on both configurations. The _irqsave() ensures
193		 * that interrupts are always disabled and later restored.
194		 */
195		raw_spin_lock_irqsave(cpu_lock, flags);
196		while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
197			struct cgroup_subsys_state *css;
198
199			cgroup_base_stat_flush(pos, cpu);
200			bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
201
202			rcu_read_lock();
203			list_for_each_entry_rcu(css, &pos->rstat_css_list,
204						rstat_css_node)
205				css->ss->css_rstat_flush(css, cpu);
206			rcu_read_unlock();
207		}
208		raw_spin_unlock_irqrestore(cpu_lock, flags);
209
210		/* if @may_sleep, play nice and yield if necessary */
211		if (may_sleep && (need_resched() ||
212				  spin_needbreak(&cgroup_rstat_lock))) {
213			spin_unlock_irq(&cgroup_rstat_lock);
214			if (!cond_resched())
215				cpu_relax();
216			spin_lock_irq(&cgroup_rstat_lock);
217		}
218	}
219}
220
221/**
222 * cgroup_rstat_flush - flush stats in @cgrp's subtree
223 * @cgrp: target cgroup
224 *
225 * Collect all per-cpu stats in @cgrp's subtree into the global counters
226 * and propagate them upwards.  After this function returns, all cgroups in
227 * the subtree have up-to-date ->stat.
228 *
229 * This also gets all cgroups in the subtree including @cgrp off the
230 * ->updated_children lists.
231 *
232 * This function may block.
233 */
234void cgroup_rstat_flush(struct cgroup *cgrp)
235{
236	might_sleep();
237
238	spin_lock_irq(&cgroup_rstat_lock);
239	cgroup_rstat_flush_locked(cgrp, true);
240	spin_unlock_irq(&cgroup_rstat_lock);
241}
242
243/**
244 * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
245 * @cgrp: target cgroup
246 *
247 * This function can be called from any context.
248 */
249void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
250{
251	unsigned long flags;
252
253	spin_lock_irqsave(&cgroup_rstat_lock, flags);
254	cgroup_rstat_flush_locked(cgrp, false);
255	spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
256}
257
258/**
259 * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
260 * @cgrp: target cgroup
261 *
262 * Flush stats in @cgrp's subtree and prevent further flushes.  Must be
263 * paired with cgroup_rstat_flush_release().
264 *
265 * This function may block.
266 */
267void cgroup_rstat_flush_hold(struct cgroup *cgrp)
268	__acquires(&cgroup_rstat_lock)
269{
270	might_sleep();
271	spin_lock_irq(&cgroup_rstat_lock);
272	cgroup_rstat_flush_locked(cgrp, true);
273}
274
275/**
276 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
277 */
278void cgroup_rstat_flush_release(void)
279	__releases(&cgroup_rstat_lock)
280{
281	spin_unlock_irq(&cgroup_rstat_lock);
282}
283
284int cgroup_rstat_init(struct cgroup *cgrp)
285{
286	int cpu;
287
288	/* the root cgrp has rstat_cpu preallocated */
289	if (!cgrp->rstat_cpu) {
290		cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
291		if (!cgrp->rstat_cpu)
292			return -ENOMEM;
293	}
294
295	/* ->updated_children list is self terminated */
296	for_each_possible_cpu(cpu) {
297		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
298
299		rstatc->updated_children = cgrp;
300		u64_stats_init(&rstatc->bsync);
301	}
302
303	return 0;
304}
305
306void cgroup_rstat_exit(struct cgroup *cgrp)
307{
308	int cpu;
309
310	cgroup_rstat_flush(cgrp);
311
312	/* sanity check */
313	for_each_possible_cpu(cpu) {
314		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
315
316		if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
317		    WARN_ON_ONCE(rstatc->updated_next))
318			return;
319	}
320
321	free_percpu(cgrp->rstat_cpu);
322	cgrp->rstat_cpu = NULL;
323}
324
325void __init cgroup_rstat_boot(void)
326{
327	int cpu;
328
329	for_each_possible_cpu(cpu)
330		raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
331}
332
333/*
334 * Functions for cgroup basic resource statistics implemented on top of
335 * rstat.
336 */
337static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
338				 struct cgroup_base_stat *src_bstat)
339{
340	dst_bstat->cputime.utime += src_bstat->cputime.utime;
341	dst_bstat->cputime.stime += src_bstat->cputime.stime;
342	dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
343#ifdef CONFIG_SCHED_CORE
344	dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
345#endif
346}
347
348static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
349				 struct cgroup_base_stat *src_bstat)
350{
351	dst_bstat->cputime.utime -= src_bstat->cputime.utime;
352	dst_bstat->cputime.stime -= src_bstat->cputime.stime;
353	dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
354#ifdef CONFIG_SCHED_CORE
355	dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
356#endif
357}
358
359static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
360{
361	struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
362	struct cgroup *parent = cgroup_parent(cgrp);
 
363	struct cgroup_base_stat delta;
364	unsigned seq;
365
366	/* Root-level stats are sourced from system-wide CPU stats */
367	if (!parent)
368		return;
369
370	/* fetch the current per-cpu values */
371	do {
372		seq = __u64_stats_fetch_begin(&rstatc->bsync);
373		delta = rstatc->bstat;
374	} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
375
376	/* propagate percpu delta to global */
377	cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
378	cgroup_base_stat_add(&cgrp->bstat, &delta);
379	cgroup_base_stat_add(&rstatc->last_bstat, &delta);
 
380
381	/* propagate global delta to parent (unless that's root) */
382	if (cgroup_parent(parent)) {
383		delta = cgrp->bstat;
384		cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
385		cgroup_base_stat_add(&parent->bstat, &delta);
386		cgroup_base_stat_add(&cgrp->last_bstat, &delta);
 
 
 
 
 
 
387	}
388}
389
390static struct cgroup_rstat_cpu *
391cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
392{
393	struct cgroup_rstat_cpu *rstatc;
394
395	rstatc = get_cpu_ptr(cgrp->rstat_cpu);
396	*flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
397	return rstatc;
398}
399
400static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
401						 struct cgroup_rstat_cpu *rstatc,
402						 unsigned long flags)
403{
404	u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
405	cgroup_rstat_updated(cgrp, smp_processor_id());
406	put_cpu_ptr(rstatc);
407}
408
409void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
410{
411	struct cgroup_rstat_cpu *rstatc;
412	unsigned long flags;
413
414	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
415	rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
416	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
417}
418
419void __cgroup_account_cputime_field(struct cgroup *cgrp,
420				    enum cpu_usage_stat index, u64 delta_exec)
421{
422	struct cgroup_rstat_cpu *rstatc;
423	unsigned long flags;
424
425	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
426
427	switch (index) {
428	case CPUTIME_USER:
429	case CPUTIME_NICE:
430		rstatc->bstat.cputime.utime += delta_exec;
431		break;
432	case CPUTIME_SYSTEM:
433	case CPUTIME_IRQ:
434	case CPUTIME_SOFTIRQ:
435		rstatc->bstat.cputime.stime += delta_exec;
436		break;
437#ifdef CONFIG_SCHED_CORE
438	case CPUTIME_FORCEIDLE:
439		rstatc->bstat.forceidle_sum += delta_exec;
440		break;
441#endif
442	default:
443		break;
444	}
445
446	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
447}
448
449/*
450 * compute the cputime for the root cgroup by getting the per cpu data
451 * at a global level, then categorizing the fields in a manner consistent
452 * with how it is done by __cgroup_account_cputime_field for each bit of
453 * cpu time attributed to a cgroup.
454 */
455static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
456{
457	struct task_cputime *cputime = &bstat->cputime;
458	int i;
459
460	cputime->stime = 0;
461	cputime->utime = 0;
462	cputime->sum_exec_runtime = 0;
463	for_each_possible_cpu(i) {
464		struct kernel_cpustat kcpustat;
465		u64 *cpustat = kcpustat.cpustat;
466		u64 user = 0;
467		u64 sys = 0;
468
469		kcpustat_cpu_fetch(&kcpustat, i);
470
471		user += cpustat[CPUTIME_USER];
472		user += cpustat[CPUTIME_NICE];
473		cputime->utime += user;
474
475		sys += cpustat[CPUTIME_SYSTEM];
476		sys += cpustat[CPUTIME_IRQ];
477		sys += cpustat[CPUTIME_SOFTIRQ];
478		cputime->stime += sys;
479
480		cputime->sum_exec_runtime += user;
481		cputime->sum_exec_runtime += sys;
482		cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
483
484#ifdef CONFIG_SCHED_CORE
485		bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
486#endif
487	}
488}
489
490void cgroup_base_stat_cputime_show(struct seq_file *seq)
491{
492	struct cgroup *cgrp = seq_css(seq)->cgroup;
493	u64 usage, utime, stime;
494	struct cgroup_base_stat bstat;
495#ifdef CONFIG_SCHED_CORE
496	u64 forceidle_time;
497#endif
498
499	if (cgroup_parent(cgrp)) {
500		cgroup_rstat_flush_hold(cgrp);
501		usage = cgrp->bstat.cputime.sum_exec_runtime;
502		cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
503			       &utime, &stime);
504#ifdef CONFIG_SCHED_CORE
505		forceidle_time = cgrp->bstat.forceidle_sum;
506#endif
507		cgroup_rstat_flush_release();
508	} else {
509		root_cgroup_cputime(&bstat);
510		usage = bstat.cputime.sum_exec_runtime;
511		utime = bstat.cputime.utime;
512		stime = bstat.cputime.stime;
513#ifdef CONFIG_SCHED_CORE
514		forceidle_time = bstat.forceidle_sum;
515#endif
516	}
517
518	do_div(usage, NSEC_PER_USEC);
519	do_div(utime, NSEC_PER_USEC);
520	do_div(stime, NSEC_PER_USEC);
521#ifdef CONFIG_SCHED_CORE
522	do_div(forceidle_time, NSEC_PER_USEC);
523#endif
524
525	seq_printf(seq, "usage_usec %llu\n"
526		   "user_usec %llu\n"
527		   "system_usec %llu\n",
528		   usage, utime, stime);
529
530#ifdef CONFIG_SCHED_CORE
531	seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
532#endif
533}
534
535/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
536BTF_SET8_START(bpf_rstat_kfunc_ids)
537BTF_ID_FLAGS(func, cgroup_rstat_updated)
538BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
539BTF_SET8_END(bpf_rstat_kfunc_ids)
540
541static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
542	.owner          = THIS_MODULE,
543	.set            = &bpf_rstat_kfunc_ids,
544};
545
546static int __init bpf_rstat_kfunc_init(void)
547{
548	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
549					 &bpf_rstat_kfunc_set);
550}
551late_initcall(bpf_rstat_kfunc_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include "cgroup-internal.h"
  3
  4#include <linux/sched/cputime.h>
  5
  6#include <linux/bpf.h>
  7#include <linux/btf.h>
  8#include <linux/btf_ids.h>
  9
 10static DEFINE_SPINLOCK(cgroup_rstat_lock);
 11static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
 12
 13static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
 14
 15static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
 16{
 17	return per_cpu_ptr(cgrp->rstat_cpu, cpu);
 18}
 19
 20/**
 21 * cgroup_rstat_updated - keep track of updated rstat_cpu
 22 * @cgrp: target cgroup
 23 * @cpu: cpu on which rstat_cpu was updated
 24 *
 25 * @cgrp's rstat_cpu on @cpu was updated.  Put it on the parent's matching
 26 * rstat_cpu->updated_children list.  See the comment on top of
 27 * cgroup_rstat_cpu definition for details.
 28 */
 29__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
 30{
 31	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
 32	unsigned long flags;
 33
 34	/*
 35	 * Speculative already-on-list test. This may race leading to
 36	 * temporary inaccuracies, which is fine.
 37	 *
 38	 * Because @parent's updated_children is terminated with @parent
 39	 * instead of NULL, we can tell whether @cgrp is on the list by
 40	 * testing the next pointer for NULL.
 41	 */
 42	if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next))
 43		return;
 44
 45	raw_spin_lock_irqsave(cpu_lock, flags);
 46
 47	/* put @cgrp and all ancestors on the corresponding updated lists */
 48	while (true) {
 49		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
 50		struct cgroup *parent = cgroup_parent(cgrp);
 51		struct cgroup_rstat_cpu *prstatc;
 52
 53		/*
 54		 * Both additions and removals are bottom-up.  If a cgroup
 55		 * is already in the tree, all ancestors are.
 56		 */
 57		if (rstatc->updated_next)
 58			break;
 59
 60		/* Root has no parent to link it to, but mark it busy */
 61		if (!parent) {
 62			rstatc->updated_next = cgrp;
 63			break;
 64		}
 65
 66		prstatc = cgroup_rstat_cpu(parent, cpu);
 67		rstatc->updated_next = prstatc->updated_children;
 68		prstatc->updated_children = cgrp;
 69
 70		cgrp = parent;
 71	}
 72
 73	raw_spin_unlock_irqrestore(cpu_lock, flags);
 74}
 75
 76/**
 77 * cgroup_rstat_push_children - push children cgroups into the given list
 78 * @head: current head of the list (= subtree root)
 79 * @child: first child of the root
 80 * @cpu: target cpu
 81 * Return: A new singly linked list of cgroups to be flush
 82 *
 83 * Iteratively traverse down the cgroup_rstat_cpu updated tree level by
 84 * level and push all the parents first before their next level children
 85 * into a singly linked list built from the tail backward like "pushing"
 86 * cgroups into a stack. The root is pushed by the caller.
 87 */
 88static struct cgroup *cgroup_rstat_push_children(struct cgroup *head,
 89						 struct cgroup *child, int cpu)
 90{
 91	struct cgroup *chead = child;	/* Head of child cgroup level */
 92	struct cgroup *ghead = NULL;	/* Head of grandchild cgroup level */
 93	struct cgroup *parent, *grandchild;
 94	struct cgroup_rstat_cpu *crstatc;
 95
 96	child->rstat_flush_next = NULL;
 97
 98next_level:
 99	while (chead) {
100		child = chead;
101		chead = child->rstat_flush_next;
102		parent = cgroup_parent(child);
103
104		/* updated_next is parent cgroup terminated */
105		while (child != parent) {
106			child->rstat_flush_next = head;
107			head = child;
108			crstatc = cgroup_rstat_cpu(child, cpu);
109			grandchild = crstatc->updated_children;
110			if (grandchild != child) {
111				/* Push the grand child to the next level */
112				crstatc->updated_children = child;
113				grandchild->rstat_flush_next = ghead;
114				ghead = grandchild;
115			}
116			child = crstatc->updated_next;
117			crstatc->updated_next = NULL;
118		}
119	}
120
121	if (ghead) {
122		chead = ghead;
123		ghead = NULL;
124		goto next_level;
125	}
126	return head;
127}
128
129/**
130 * cgroup_rstat_updated_list - return a list of updated cgroups to be flushed
131 * @root: root of the cgroup subtree to traverse
132 * @cpu: target cpu
133 * Return: A singly linked list of cgroups to be flushed
134 *
135 * Walks the updated rstat_cpu tree on @cpu from @root.  During traversal,
136 * each returned cgroup is unlinked from the updated tree.
137 *
138 * The only ordering guarantee is that, for a parent and a child pair
139 * covered by a given traversal, the child is before its parent in
140 * the list.
141 *
142 * Note that updated_children is self terminated and points to a list of
143 * child cgroups if not empty. Whereas updated_next is like a sibling link
144 * within the children list and terminated by the parent cgroup. An exception
145 * here is the cgroup root whose updated_next can be self terminated.
146 */
147static struct cgroup *cgroup_rstat_updated_list(struct cgroup *root, int cpu)
 
148{
149	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
150	struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(root, cpu);
151	struct cgroup *head = NULL, *parent, *child;
152	unsigned long flags;
 
153
154	/*
155	 * The _irqsave() is needed because cgroup_rstat_lock is
156	 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
157	 * this lock with the _irq() suffix only disables interrupts on
158	 * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
159	 * interrupts on both configurations. The _irqsave() ensures
160	 * that interrupts are always disabled and later restored.
161	 */
162	raw_spin_lock_irqsave(cpu_lock, flags);
 
 
 
 
 
 
 
163
164	/* Return NULL if this subtree is not on-list */
165	if (!rstatc->updated_next)
166		goto unlock_ret;
 
 
 
 
167
168	/*
169	 * Unlink @root from its parent. As the updated_children list is
170	 * singly linked, we have to walk it to find the removal point.
 
 
171	 */
172	parent = cgroup_parent(root);
173	if (parent) {
174		struct cgroup_rstat_cpu *prstatc;
175		struct cgroup **nextp;
176
177		prstatc = cgroup_rstat_cpu(parent, cpu);
178		nextp = &prstatc->updated_children;
179		while (*nextp != root) {
180			struct cgroup_rstat_cpu *nrstatc;
181
182			nrstatc = cgroup_rstat_cpu(*nextp, cpu);
183			WARN_ON_ONCE(*nextp == parent);
184			nextp = &nrstatc->updated_next;
185		}
186		*nextp = rstatc->updated_next;
187	}
188
189	rstatc->updated_next = NULL;
190
191	/* Push @root to the list first before pushing the children */
192	head = root;
193	root->rstat_flush_next = NULL;
194	child = rstatc->updated_children;
195	rstatc->updated_children = root;
196	if (child != root)
197		head = cgroup_rstat_push_children(head, child, cpu);
198unlock_ret:
199	raw_spin_unlock_irqrestore(cpu_lock, flags);
200	return head;
201}
202
203/*
204 * A hook for bpf stat collectors to attach to and flush their stats.
205 * Together with providing bpf kfuncs for cgroup_rstat_updated() and
206 * cgroup_rstat_flush(), this enables a complete workflow where bpf progs that
207 * collect cgroup stats can integrate with rstat for efficient flushing.
208 *
209 * A static noinline declaration here could cause the compiler to optimize away
210 * the function. A global noinline declaration will keep the definition, but may
211 * optimize away the callsite. Therefore, __weak is needed to ensure that the
212 * call is still emitted, by telling the compiler that we don't know what the
213 * function might eventually be.
 
 
214 */
215
216__bpf_hook_start();
 
217
218__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
219				     struct cgroup *parent, int cpu)
220{
221}
222
223__bpf_hook_end();
224
225/* see cgroup_rstat_flush() */
226static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
227	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
228{
229	int cpu;
230
231	lockdep_assert_held(&cgroup_rstat_lock);
232
233	for_each_possible_cpu(cpu) {
234		struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
 
 
 
235
236		for (; pos; pos = pos->rstat_flush_next) {
 
 
 
 
 
 
 
 
 
237			struct cgroup_subsys_state *css;
238
239			cgroup_base_stat_flush(pos, cpu);
240			bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
241
242			rcu_read_lock();
243			list_for_each_entry_rcu(css, &pos->rstat_css_list,
244						rstat_css_node)
245				css->ss->css_rstat_flush(css, cpu);
246			rcu_read_unlock();
247		}
 
248
249		/* play nice and yield if necessary */
250		if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
 
251			spin_unlock_irq(&cgroup_rstat_lock);
252			if (!cond_resched())
253				cpu_relax();
254			spin_lock_irq(&cgroup_rstat_lock);
255		}
256	}
257}
258
259/**
260 * cgroup_rstat_flush - flush stats in @cgrp's subtree
261 * @cgrp: target cgroup
262 *
263 * Collect all per-cpu stats in @cgrp's subtree into the global counters
264 * and propagate them upwards.  After this function returns, all cgroups in
265 * the subtree have up-to-date ->stat.
266 *
267 * This also gets all cgroups in the subtree including @cgrp off the
268 * ->updated_children lists.
269 *
270 * This function may block.
271 */
272__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
273{
274	might_sleep();
275
276	spin_lock_irq(&cgroup_rstat_lock);
277	cgroup_rstat_flush_locked(cgrp);
278	spin_unlock_irq(&cgroup_rstat_lock);
279}
280
281/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282 * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
283 * @cgrp: target cgroup
284 *
285 * Flush stats in @cgrp's subtree and prevent further flushes.  Must be
286 * paired with cgroup_rstat_flush_release().
287 *
288 * This function may block.
289 */
290void cgroup_rstat_flush_hold(struct cgroup *cgrp)
291	__acquires(&cgroup_rstat_lock)
292{
293	might_sleep();
294	spin_lock_irq(&cgroup_rstat_lock);
295	cgroup_rstat_flush_locked(cgrp);
296}
297
298/**
299 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
300 */
301void cgroup_rstat_flush_release(void)
302	__releases(&cgroup_rstat_lock)
303{
304	spin_unlock_irq(&cgroup_rstat_lock);
305}
306
307int cgroup_rstat_init(struct cgroup *cgrp)
308{
309	int cpu;
310
311	/* the root cgrp has rstat_cpu preallocated */
312	if (!cgrp->rstat_cpu) {
313		cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
314		if (!cgrp->rstat_cpu)
315			return -ENOMEM;
316	}
317
318	/* ->updated_children list is self terminated */
319	for_each_possible_cpu(cpu) {
320		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
321
322		rstatc->updated_children = cgrp;
323		u64_stats_init(&rstatc->bsync);
324	}
325
326	return 0;
327}
328
329void cgroup_rstat_exit(struct cgroup *cgrp)
330{
331	int cpu;
332
333	cgroup_rstat_flush(cgrp);
334
335	/* sanity check */
336	for_each_possible_cpu(cpu) {
337		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
338
339		if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
340		    WARN_ON_ONCE(rstatc->updated_next))
341			return;
342	}
343
344	free_percpu(cgrp->rstat_cpu);
345	cgrp->rstat_cpu = NULL;
346}
347
348void __init cgroup_rstat_boot(void)
349{
350	int cpu;
351
352	for_each_possible_cpu(cpu)
353		raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
354}
355
356/*
357 * Functions for cgroup basic resource statistics implemented on top of
358 * rstat.
359 */
360static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
361				 struct cgroup_base_stat *src_bstat)
362{
363	dst_bstat->cputime.utime += src_bstat->cputime.utime;
364	dst_bstat->cputime.stime += src_bstat->cputime.stime;
365	dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
366#ifdef CONFIG_SCHED_CORE
367	dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
368#endif
369}
370
371static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
372				 struct cgroup_base_stat *src_bstat)
373{
374	dst_bstat->cputime.utime -= src_bstat->cputime.utime;
375	dst_bstat->cputime.stime -= src_bstat->cputime.stime;
376	dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
377#ifdef CONFIG_SCHED_CORE
378	dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
379#endif
380}
381
382static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
383{
384	struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
385	struct cgroup *parent = cgroup_parent(cgrp);
386	struct cgroup_rstat_cpu *prstatc;
387	struct cgroup_base_stat delta;
388	unsigned seq;
389
390	/* Root-level stats are sourced from system-wide CPU stats */
391	if (!parent)
392		return;
393
394	/* fetch the current per-cpu values */
395	do {
396		seq = __u64_stats_fetch_begin(&rstatc->bsync);
397		delta = rstatc->bstat;
398	} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
399
400	/* propagate per-cpu delta to cgroup and per-cpu global statistics */
401	cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
402	cgroup_base_stat_add(&cgrp->bstat, &delta);
403	cgroup_base_stat_add(&rstatc->last_bstat, &delta);
404	cgroup_base_stat_add(&rstatc->subtree_bstat, &delta);
405
406	/* propagate cgroup and per-cpu global delta to parent (unless that's root) */
407	if (cgroup_parent(parent)) {
408		delta = cgrp->bstat;
409		cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
410		cgroup_base_stat_add(&parent->bstat, &delta);
411		cgroup_base_stat_add(&cgrp->last_bstat, &delta);
412
413		delta = rstatc->subtree_bstat;
414		prstatc = cgroup_rstat_cpu(parent, cpu);
415		cgroup_base_stat_sub(&delta, &rstatc->last_subtree_bstat);
416		cgroup_base_stat_add(&prstatc->subtree_bstat, &delta);
417		cgroup_base_stat_add(&rstatc->last_subtree_bstat, &delta);
418	}
419}
420
421static struct cgroup_rstat_cpu *
422cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
423{
424	struct cgroup_rstat_cpu *rstatc;
425
426	rstatc = get_cpu_ptr(cgrp->rstat_cpu);
427	*flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
428	return rstatc;
429}
430
431static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
432						 struct cgroup_rstat_cpu *rstatc,
433						 unsigned long flags)
434{
435	u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
436	cgroup_rstat_updated(cgrp, smp_processor_id());
437	put_cpu_ptr(rstatc);
438}
439
440void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
441{
442	struct cgroup_rstat_cpu *rstatc;
443	unsigned long flags;
444
445	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
446	rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
447	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
448}
449
450void __cgroup_account_cputime_field(struct cgroup *cgrp,
451				    enum cpu_usage_stat index, u64 delta_exec)
452{
453	struct cgroup_rstat_cpu *rstatc;
454	unsigned long flags;
455
456	rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
457
458	switch (index) {
459	case CPUTIME_USER:
460	case CPUTIME_NICE:
461		rstatc->bstat.cputime.utime += delta_exec;
462		break;
463	case CPUTIME_SYSTEM:
464	case CPUTIME_IRQ:
465	case CPUTIME_SOFTIRQ:
466		rstatc->bstat.cputime.stime += delta_exec;
467		break;
468#ifdef CONFIG_SCHED_CORE
469	case CPUTIME_FORCEIDLE:
470		rstatc->bstat.forceidle_sum += delta_exec;
471		break;
472#endif
473	default:
474		break;
475	}
476
477	cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
478}
479
480/*
481 * compute the cputime for the root cgroup by getting the per cpu data
482 * at a global level, then categorizing the fields in a manner consistent
483 * with how it is done by __cgroup_account_cputime_field for each bit of
484 * cpu time attributed to a cgroup.
485 */
486static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
487{
488	struct task_cputime *cputime = &bstat->cputime;
489	int i;
490
491	memset(bstat, 0, sizeof(*bstat));
 
 
492	for_each_possible_cpu(i) {
493		struct kernel_cpustat kcpustat;
494		u64 *cpustat = kcpustat.cpustat;
495		u64 user = 0;
496		u64 sys = 0;
497
498		kcpustat_cpu_fetch(&kcpustat, i);
499
500		user += cpustat[CPUTIME_USER];
501		user += cpustat[CPUTIME_NICE];
502		cputime->utime += user;
503
504		sys += cpustat[CPUTIME_SYSTEM];
505		sys += cpustat[CPUTIME_IRQ];
506		sys += cpustat[CPUTIME_SOFTIRQ];
507		cputime->stime += sys;
508
509		cputime->sum_exec_runtime += user;
510		cputime->sum_exec_runtime += sys;
511		cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
512
513#ifdef CONFIG_SCHED_CORE
514		bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
515#endif
516	}
517}
518
519void cgroup_base_stat_cputime_show(struct seq_file *seq)
520{
521	struct cgroup *cgrp = seq_css(seq)->cgroup;
522	u64 usage, utime, stime;
523	struct cgroup_base_stat bstat;
524#ifdef CONFIG_SCHED_CORE
525	u64 forceidle_time;
526#endif
527
528	if (cgroup_parent(cgrp)) {
529		cgroup_rstat_flush_hold(cgrp);
530		usage = cgrp->bstat.cputime.sum_exec_runtime;
531		cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
532			       &utime, &stime);
533#ifdef CONFIG_SCHED_CORE
534		forceidle_time = cgrp->bstat.forceidle_sum;
535#endif
536		cgroup_rstat_flush_release();
537	} else {
538		root_cgroup_cputime(&bstat);
539		usage = bstat.cputime.sum_exec_runtime;
540		utime = bstat.cputime.utime;
541		stime = bstat.cputime.stime;
542#ifdef CONFIG_SCHED_CORE
543		forceidle_time = bstat.forceidle_sum;
544#endif
545	}
546
547	do_div(usage, NSEC_PER_USEC);
548	do_div(utime, NSEC_PER_USEC);
549	do_div(stime, NSEC_PER_USEC);
550#ifdef CONFIG_SCHED_CORE
551	do_div(forceidle_time, NSEC_PER_USEC);
552#endif
553
554	seq_printf(seq, "usage_usec %llu\n"
555		   "user_usec %llu\n"
556		   "system_usec %llu\n",
557		   usage, utime, stime);
558
559#ifdef CONFIG_SCHED_CORE
560	seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
561#endif
562}
563
564/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
565BTF_SET8_START(bpf_rstat_kfunc_ids)
566BTF_ID_FLAGS(func, cgroup_rstat_updated)
567BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
568BTF_SET8_END(bpf_rstat_kfunc_ids)
569
570static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
571	.owner          = THIS_MODULE,
572	.set            = &bpf_rstat_kfunc_ids,
573};
574
575static int __init bpf_rstat_kfunc_init(void)
576{
577	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
578					 &bpf_rstat_kfunc_set);
579}
580late_initcall(bpf_rstat_kfunc_init);