Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  4 * Collects misc. OS related data (CPU utilization, running processes).
  5 *
  6 * Copyright IBM Corp. 2003, 2006
  7 *
  8 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  9 */
 10
 11#define KMSG_COMPONENT	"appldata"
 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 13
 14#include <linux/module.h>
 15#include <linux/init.h>
 16#include <linux/slab.h>
 17#include <linux/errno.h>
 18#include <linux/kernel_stat.h>
 19#include <linux/netdevice.h>
 20#include <linux/sched.h>
 21#include <linux/sched/loadavg.h>
 22#include <linux/sched/stat.h>
 23#include <asm/appldata.h>
 24#include <asm/smp.h>
 25
 26#include "appldata.h"
 27
 
 
 
 
 28/*
 29 * OS data
 30 *
 31 * This is accessed as binary data by z/VM. If changes to it can't be avoided,
 32 * the structure version (product ID, see appldata_base.c) needs to be changed
 33 * as well and all documentation and z/VM applications using it must be
 34 * updated.
 35 *
 36 * The record layout is documented in the Linux for zSeries Device Drivers
 37 * book:
 38 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
 39 */
 40struct appldata_os_per_cpu {
 41	u32 per_cpu_user;	/* timer ticks spent in user mode   */
 42	u32 per_cpu_nice;	/* ... spent with modified priority */
 43	u32 per_cpu_system;	/* ... spent in kernel mode         */
 44	u32 per_cpu_idle;	/* ... spent in idle mode           */
 45
 46	/* New in 2.6 */
 47	u32 per_cpu_irq;	/* ... spent in interrupts          */
 48	u32 per_cpu_softirq;	/* ... spent in softirqs            */
 49	u32 per_cpu_iowait;	/* ... spent while waiting for I/O  */
 50
 51	/* New in modification level 01 */
 52	u32 per_cpu_steal;	/* ... stolen by hypervisor	    */
 53	u32 cpu_id;		/* number of this CPU		    */
 54} __attribute__((packed));
 55
 56struct appldata_os_data {
 57	u64 timestamp;
 58	u32 sync_count_1;	/* after VM collected the record data, */
 59	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
 60				   same. If not, the record has been updated on
 61				   the Linux side while VM was collecting the
 62				   (possibly corrupt) data */
 63
 64	u32 nr_cpus;		/* number of (virtual) CPUs        */
 65	u32 per_cpu_size;	/* size of the per-cpu data struct */
 66	u32 cpu_offset;		/* offset of the first per-cpu data struct */
 67
 68	u32 nr_running;		/* number of runnable threads      */
 69	u32 nr_threads;		/* number of threads               */
 70	u32 avenrun[3];		/* average nr. of running processes during */
 71				/* the last 1, 5 and 15 minutes */
 72
 73	/* New in 2.6 */
 74	u32 nr_iowait;		/* number of blocked threads
 75				   (waiting for I/O)               */
 76
 77	/* per cpu data */
 78	struct appldata_os_per_cpu os_cpu[0];
 79} __attribute__((packed));
 80
 81static struct appldata_os_data *appldata_os_data;
 82
 83static struct appldata_ops ops = {
 84	.name	   = "os",
 85	.record_nr = APPLDATA_RECORD_OS_ID,
 86	.owner	   = THIS_MODULE,
 87	.mod_lvl   = {0xF0, 0xF1},		/* EBCDIC "01" */
 88};
 89
 90
 91/*
 92 * appldata_get_os_data()
 93 *
 94 * gather OS data
 95 */
 96static void appldata_get_os_data(void *data)
 97{
 98	int i, j, rc;
 99	struct appldata_os_data *os_data;
100	unsigned int new_size;
101
102	os_data = data;
103	os_data->sync_count_1++;
104
105	os_data->nr_threads = nr_threads;
106	os_data->nr_running = nr_running();
107	os_data->nr_iowait  = nr_iowait();
108	os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
109	os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
110	os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
111
112	j = 0;
113	for_each_online_cpu(i) {
114		os_data->os_cpu[j].per_cpu_user =
115			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
116		os_data->os_cpu[j].per_cpu_nice =
117			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
118		os_data->os_cpu[j].per_cpu_system =
119			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
120		os_data->os_cpu[j].per_cpu_idle =
121			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
122		os_data->os_cpu[j].per_cpu_irq =
123			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
124		os_data->os_cpu[j].per_cpu_softirq =
125			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
126		os_data->os_cpu[j].per_cpu_iowait =
127			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
128		os_data->os_cpu[j].per_cpu_steal =
129			nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
130		os_data->os_cpu[j].cpu_id = i;
131		j++;
132	}
133
134	os_data->nr_cpus = j;
135
136	new_size = sizeof(struct appldata_os_data) +
137		   (os_data->nr_cpus * sizeof(struct appldata_os_per_cpu));
138	if (ops.size != new_size) {
139		if (ops.active) {
140			rc = appldata_diag(APPLDATA_RECORD_OS_ID,
141					   APPLDATA_START_INTERVAL_REC,
142					   (unsigned long) ops.data, new_size,
143					   ops.mod_lvl);
144			if (rc != 0)
145				pr_err("Starting a new OS data collection "
146				       "failed with rc=%d\n", rc);
147
148			rc = appldata_diag(APPLDATA_RECORD_OS_ID,
149					   APPLDATA_STOP_REC,
150					   (unsigned long) ops.data, ops.size,
151					   ops.mod_lvl);
152			if (rc != 0)
153				pr_err("Stopping a faulty OS data "
154				       "collection failed with rc=%d\n", rc);
155		}
156		ops.size = new_size;
157	}
158	os_data->timestamp = get_tod_clock();
159	os_data->sync_count_2++;
160}
161
162
163/*
164 * appldata_os_init()
165 *
166 * init data, register ops
167 */
168static int __init appldata_os_init(void)
169{
170	int rc, max_size;
171
172	max_size = sizeof(struct appldata_os_data) +
173		   (num_possible_cpus() * sizeof(struct appldata_os_per_cpu));
174	if (max_size > APPLDATA_MAX_REC_SIZE) {
175		pr_err("Maximum OS record size %i exceeds the maximum "
176		       "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
177		rc = -ENOMEM;
178		goto out;
179	}
180
181	appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA);
182	if (appldata_os_data == NULL) {
183		rc = -ENOMEM;
184		goto out;
185	}
186
187	appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
188	appldata_os_data->cpu_offset   = offsetof(struct appldata_os_data,
189							os_cpu);
190
191	ops.data = appldata_os_data;
192	ops.callback  = &appldata_get_os_data;
193	rc = appldata_register_ops(&ops);
194	if (rc != 0)
195		kfree(appldata_os_data);
196out:
197	return rc;
198}
199
200/*
201 * appldata_os_exit()
202 *
203 * unregister ops
204 */
205static void __exit appldata_os_exit(void)
206{
207	appldata_unregister_ops(&ops);
208	kfree(appldata_os_data);
209}
210
211
212module_init(appldata_os_init);
213module_exit(appldata_os_exit);
214
215MODULE_LICENSE("GPL");
216MODULE_AUTHOR("Gerald Schaefer");
217MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");
v3.15
 
  1/*
  2 * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  3 * Collects misc. OS related data (CPU utilization, running processes).
  4 *
  5 * Copyright IBM Corp. 2003, 2006
  6 *
  7 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  8 */
  9
 10#define KMSG_COMPONENT	"appldata"
 11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 12
 13#include <linux/module.h>
 14#include <linux/init.h>
 15#include <linux/slab.h>
 16#include <linux/errno.h>
 17#include <linux/kernel_stat.h>
 18#include <linux/netdevice.h>
 19#include <linux/sched.h>
 
 
 20#include <asm/appldata.h>
 21#include <asm/smp.h>
 22
 23#include "appldata.h"
 24
 25
 26#define LOAD_INT(x) ((x) >> FSHIFT)
 27#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
 28
 29/*
 30 * OS data
 31 *
 32 * This is accessed as binary data by z/VM. If changes to it can't be avoided,
 33 * the structure version (product ID, see appldata_base.c) needs to be changed
 34 * as well and all documentation and z/VM applications using it must be
 35 * updated.
 36 *
 37 * The record layout is documented in the Linux for zSeries Device Drivers
 38 * book:
 39 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
 40 */
 41struct appldata_os_per_cpu {
 42	u32 per_cpu_user;	/* timer ticks spent in user mode   */
 43	u32 per_cpu_nice;	/* ... spent with modified priority */
 44	u32 per_cpu_system;	/* ... spent in kernel mode         */
 45	u32 per_cpu_idle;	/* ... spent in idle mode           */
 46
 47	/* New in 2.6 */
 48	u32 per_cpu_irq;	/* ... spent in interrupts          */
 49	u32 per_cpu_softirq;	/* ... spent in softirqs            */
 50	u32 per_cpu_iowait;	/* ... spent while waiting for I/O  */
 51
 52	/* New in modification level 01 */
 53	u32 per_cpu_steal;	/* ... stolen by hypervisor	    */
 54	u32 cpu_id;		/* number of this CPU		    */
 55} __attribute__((packed));
 56
 57struct appldata_os_data {
 58	u64 timestamp;
 59	u32 sync_count_1;	/* after VM collected the record data, */
 60	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
 61				   same. If not, the record has been updated on
 62				   the Linux side while VM was collecting the
 63				   (possibly corrupt) data */
 64
 65	u32 nr_cpus;		/* number of (virtual) CPUs        */
 66	u32 per_cpu_size;	/* size of the per-cpu data struct */
 67	u32 cpu_offset;		/* offset of the first per-cpu data struct */
 68
 69	u32 nr_running;		/* number of runnable threads      */
 70	u32 nr_threads;		/* number of threads               */
 71	u32 avenrun[3];		/* average nr. of running processes during */
 72				/* the last 1, 5 and 15 minutes */
 73
 74	/* New in 2.6 */
 75	u32 nr_iowait;		/* number of blocked threads
 76				   (waiting for I/O)               */
 77
 78	/* per cpu data */
 79	struct appldata_os_per_cpu os_cpu[0];
 80} __attribute__((packed));
 81
 82static struct appldata_os_data *appldata_os_data;
 83
 84static struct appldata_ops ops = {
 85	.name	   = "os",
 86	.record_nr = APPLDATA_RECORD_OS_ID,
 87	.owner	   = THIS_MODULE,
 88	.mod_lvl   = {0xF0, 0xF1},		/* EBCDIC "01" */
 89};
 90
 91
 92/*
 93 * appldata_get_os_data()
 94 *
 95 * gather OS data
 96 */
 97static void appldata_get_os_data(void *data)
 98{
 99	int i, j, rc;
100	struct appldata_os_data *os_data;
101	unsigned int new_size;
102
103	os_data = data;
104	os_data->sync_count_1++;
105
106	os_data->nr_threads = nr_threads;
107	os_data->nr_running = nr_running();
108	os_data->nr_iowait  = nr_iowait();
109	os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
110	os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
111	os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
112
113	j = 0;
114	for_each_online_cpu(i) {
115		os_data->os_cpu[j].per_cpu_user =
116			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
117		os_data->os_cpu[j].per_cpu_nice =
118			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
119		os_data->os_cpu[j].per_cpu_system =
120			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
121		os_data->os_cpu[j].per_cpu_idle =
122			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
123		os_data->os_cpu[j].per_cpu_irq =
124			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
125		os_data->os_cpu[j].per_cpu_softirq =
126			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
127		os_data->os_cpu[j].per_cpu_iowait =
128			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
129		os_data->os_cpu[j].per_cpu_steal =
130			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
131		os_data->os_cpu[j].cpu_id = i;
132		j++;
133	}
134
135	os_data->nr_cpus = j;
136
137	new_size = sizeof(struct appldata_os_data) +
138		   (os_data->nr_cpus * sizeof(struct appldata_os_per_cpu));
139	if (ops.size != new_size) {
140		if (ops.active) {
141			rc = appldata_diag(APPLDATA_RECORD_OS_ID,
142					   APPLDATA_START_INTERVAL_REC,
143					   (unsigned long) ops.data, new_size,
144					   ops.mod_lvl);
145			if (rc != 0)
146				pr_err("Starting a new OS data collection "
147				       "failed with rc=%d\n", rc);
148
149			rc = appldata_diag(APPLDATA_RECORD_OS_ID,
150					   APPLDATA_STOP_REC,
151					   (unsigned long) ops.data, ops.size,
152					   ops.mod_lvl);
153			if (rc != 0)
154				pr_err("Stopping a faulty OS data "
155				       "collection failed with rc=%d\n", rc);
156		}
157		ops.size = new_size;
158	}
159	os_data->timestamp = get_tod_clock();
160	os_data->sync_count_2++;
161}
162
163
164/*
165 * appldata_os_init()
166 *
167 * init data, register ops
168 */
169static int __init appldata_os_init(void)
170{
171	int rc, max_size;
172
173	max_size = sizeof(struct appldata_os_data) +
174		   (num_possible_cpus() * sizeof(struct appldata_os_per_cpu));
175	if (max_size > APPLDATA_MAX_REC_SIZE) {
176		pr_err("Maximum OS record size %i exceeds the maximum "
177		       "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
178		rc = -ENOMEM;
179		goto out;
180	}
181
182	appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA);
183	if (appldata_os_data == NULL) {
184		rc = -ENOMEM;
185		goto out;
186	}
187
188	appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
189	appldata_os_data->cpu_offset   = offsetof(struct appldata_os_data,
190							os_cpu);
191
192	ops.data = appldata_os_data;
193	ops.callback  = &appldata_get_os_data;
194	rc = appldata_register_ops(&ops);
195	if (rc != 0)
196		kfree(appldata_os_data);
197out:
198	return rc;
199}
200
201/*
202 * appldata_os_exit()
203 *
204 * unregister ops
205 */
206static void __exit appldata_os_exit(void)
207{
208	appldata_unregister_ops(&ops);
209	kfree(appldata_os_data);
210}
211
212
213module_init(appldata_os_init);
214module_exit(appldata_os_exit);
215
216MODULE_LICENSE("GPL");
217MODULE_AUTHOR("Gerald Schaefer");
218MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");