Linux Audio

Check our new training course

Loading...
v3.15
  1
  2#include <linux/slab.h>
  3#include <linux/fs.h>
  4#include <linux/seq_file.h>
  5#include <linux/proc_fs.h>
  6
  7#include "sched.h"
  8
  9/*
 10 * bump this up when changing the output format or the meaning of an existing
 11 * format, so that tools can adapt (or abort)
 12 */
 13#define SCHEDSTAT_VERSION 15
 14
 15static int show_schedstat(struct seq_file *seq, void *v)
 16{
 17	int cpu;
 18	int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
 19	char *mask_str = kmalloc(mask_len, GFP_KERNEL);
 20
 21	if (mask_str == NULL)
 22		return -ENOMEM;
 23
 24	if (v == (void *)1) {
 25		seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
 26		seq_printf(seq, "timestamp %lu\n", jiffies);
 27	} else {
 28		struct rq *rq;
 29#ifdef CONFIG_SMP
 30		struct sched_domain *sd;
 31		int dcount = 0;
 32#endif
 33		cpu = (unsigned long)(v - 2);
 34		rq = cpu_rq(cpu);
 35
 36		/* runqueue-specific stats */
 37		seq_printf(seq,
 38		    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
 39		    cpu, rq->yld_count,
 40		    rq->sched_count, rq->sched_goidle,
 41		    rq->ttwu_count, rq->ttwu_local,
 42		    rq->rq_cpu_time,
 43		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
 44
 45		seq_printf(seq, "\n");
 46
 47#ifdef CONFIG_SMP
 48		/* domain-specific stats */
 49		rcu_read_lock();
 50		for_each_domain(cpu, sd) {
 51			enum cpu_idle_type itype;
 52
 53			cpumask_scnprintf(mask_str, mask_len,
 54					  sched_domain_span(sd));
 55			seq_printf(seq, "domain%d %s", dcount++, mask_str);
 56			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
 57					itype++) {
 58				seq_printf(seq, " %u %u %u %u %u %u %u %u",
 59				    sd->lb_count[itype],
 60				    sd->lb_balanced[itype],
 61				    sd->lb_failed[itype],
 62				    sd->lb_imbalance[itype],
 63				    sd->lb_gained[itype],
 64				    sd->lb_hot_gained[itype],
 65				    sd->lb_nobusyq[itype],
 66				    sd->lb_nobusyg[itype]);
 67			}
 68			seq_printf(seq,
 69				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
 70			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
 71			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
 72			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
 73			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
 74			    sd->ttwu_move_balance);
 75		}
 76		rcu_read_unlock();
 77#endif
 78	}
 79	kfree(mask_str);
 80	return 0;
 81}
 82
 83/*
 84 * This itererator needs some explanation.
 85 * It returns 1 for the header position.
 86 * This means 2 is cpu 0.
 87 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 88 * to use cpumask_* to iterate over the cpus.
 89 */
 90static void *schedstat_start(struct seq_file *file, loff_t *offset)
 91{
 92	unsigned long n = *offset;
 93
 94	if (n == 0)
 95		return (void *) 1;
 96
 97	n--;
 98
 99	if (n > 0)
100		n = cpumask_next(n - 1, cpu_online_mask);
101	else
102		n = cpumask_first(cpu_online_mask);
103
104	*offset = n + 1;
105
106	if (n < nr_cpu_ids)
107		return (void *)(unsigned long)(n + 2);
108	return NULL;
109}
110
111static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
112{
113	(*offset)++;
114	return schedstat_start(file, offset);
115}
116
117static void schedstat_stop(struct seq_file *file, void *data)
118{
119}
120
121static const struct seq_operations schedstat_sops = {
122	.start = schedstat_start,
123	.next  = schedstat_next,
124	.stop  = schedstat_stop,
125	.show  = show_schedstat,
126};
127
128static int schedstat_open(struct inode *inode, struct file *file)
129{
130	return seq_open(file, &schedstat_sops);
131}
132
133static const struct file_operations proc_schedstat_operations = {
134	.open    = schedstat_open,
135	.read    = seq_read,
136	.llseek  = seq_lseek,
137	.release = seq_release,
138};
139
140static int __init proc_schedstat_init(void)
141{
142	proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
143	return 0;
144}
145subsys_initcall(proc_schedstat_init);
v4.6
  1
  2#include <linux/slab.h>
  3#include <linux/fs.h>
  4#include <linux/seq_file.h>
  5#include <linux/proc_fs.h>
  6
  7#include "sched.h"
  8
  9/*
 10 * bump this up when changing the output format or the meaning of an existing
 11 * format, so that tools can adapt (or abort)
 12 */
 13#define SCHEDSTAT_VERSION 15
 14
 15static int show_schedstat(struct seq_file *seq, void *v)
 16{
 17	int cpu;
 
 
 
 
 
 18
 19	if (v == (void *)1) {
 20		seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
 21		seq_printf(seq, "timestamp %lu\n", jiffies);
 22	} else {
 23		struct rq *rq;
 24#ifdef CONFIG_SMP
 25		struct sched_domain *sd;
 26		int dcount = 0;
 27#endif
 28		cpu = (unsigned long)(v - 2);
 29		rq = cpu_rq(cpu);
 30
 31		/* runqueue-specific stats */
 32		seq_printf(seq,
 33		    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
 34		    cpu, rq->yld_count,
 35		    rq->sched_count, rq->sched_goidle,
 36		    rq->ttwu_count, rq->ttwu_local,
 37		    rq->rq_cpu_time,
 38		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
 39
 40		seq_printf(seq, "\n");
 41
 42#ifdef CONFIG_SMP
 43		/* domain-specific stats */
 44		rcu_read_lock();
 45		for_each_domain(cpu, sd) {
 46			enum cpu_idle_type itype;
 47
 48			seq_printf(seq, "domain%d %*pb", dcount++,
 49				   cpumask_pr_args(sched_domain_span(sd)));
 
 50			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
 51					itype++) {
 52				seq_printf(seq, " %u %u %u %u %u %u %u %u",
 53				    sd->lb_count[itype],
 54				    sd->lb_balanced[itype],
 55				    sd->lb_failed[itype],
 56				    sd->lb_imbalance[itype],
 57				    sd->lb_gained[itype],
 58				    sd->lb_hot_gained[itype],
 59				    sd->lb_nobusyq[itype],
 60				    sd->lb_nobusyg[itype]);
 61			}
 62			seq_printf(seq,
 63				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
 64			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
 65			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
 66			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
 67			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
 68			    sd->ttwu_move_balance);
 69		}
 70		rcu_read_unlock();
 71#endif
 72	}
 
 73	return 0;
 74}
 75
 76/*
 77 * This itererator needs some explanation.
 78 * It returns 1 for the header position.
 79 * This means 2 is cpu 0.
 80 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 81 * to use cpumask_* to iterate over the cpus.
 82 */
 83static void *schedstat_start(struct seq_file *file, loff_t *offset)
 84{
 85	unsigned long n = *offset;
 86
 87	if (n == 0)
 88		return (void *) 1;
 89
 90	n--;
 91
 92	if (n > 0)
 93		n = cpumask_next(n - 1, cpu_online_mask);
 94	else
 95		n = cpumask_first(cpu_online_mask);
 96
 97	*offset = n + 1;
 98
 99	if (n < nr_cpu_ids)
100		return (void *)(unsigned long)(n + 2);
101	return NULL;
102}
103
104static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
105{
106	(*offset)++;
107	return schedstat_start(file, offset);
108}
109
110static void schedstat_stop(struct seq_file *file, void *data)
111{
112}
113
114static const struct seq_operations schedstat_sops = {
115	.start = schedstat_start,
116	.next  = schedstat_next,
117	.stop  = schedstat_stop,
118	.show  = show_schedstat,
119};
120
121static int schedstat_open(struct inode *inode, struct file *file)
122{
123	return seq_open(file, &schedstat_sops);
124}
125
126static const struct file_operations proc_schedstat_operations = {
127	.open    = schedstat_open,
128	.read    = seq_read,
129	.llseek  = seq_lseek,
130	.release = seq_release,
131};
132
133static int __init proc_schedstat_init(void)
134{
135	proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
136	return 0;
137}
138subsys_initcall(proc_schedstat_init);