Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
 
 
 
 
 
 
 
 
 
 
 
 
  3 *
  4 * Copyright IBM Corp. 2008
  5 *
  6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  7 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  8 */
  9
 10#include <linux/kvm_host.h>
 11#include <linux/fs.h>
 12#include <linux/seq_file.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/module.h>
 16
 17#include <asm/time.h>
 18#include <asm-generic/div64.h>
 19
 20#include "timing.h"
 21
 22void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
 23{
 24	int i;
 25
 26	/* Take a lock to avoid concurrent updates */
 27	mutex_lock(&vcpu->arch.exit_timing_lock);
 28
 29	vcpu->arch.last_exit_type = 0xDEAD;
 30	for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
 31		vcpu->arch.timing_count_type[i] = 0;
 32		vcpu->arch.timing_max_duration[i] = 0;
 33		vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
 34		vcpu->arch.timing_sum_duration[i] = 0;
 35		vcpu->arch.timing_sum_quad_duration[i] = 0;
 36	}
 37	vcpu->arch.timing_last_exit = 0;
 38	vcpu->arch.timing_exit.tv64 = 0;
 39	vcpu->arch.timing_last_enter.tv64 = 0;
 40
 41	mutex_unlock(&vcpu->arch.exit_timing_lock);
 42}
 43
 44static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
 45{
 46	u64 old;
 47
 48	mutex_lock(&vcpu->arch.exit_timing_lock);
 49
 50	vcpu->arch.timing_count_type[type]++;
 51
 52	/* sum */
 53	old = vcpu->arch.timing_sum_duration[type];
 54	vcpu->arch.timing_sum_duration[type] += duration;
 55	if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
 56		printk(KERN_ERR"%s - wrap adding sum of durations"
 57			" old %lld new %lld type %d exit # of type %d\n",
 58			__func__, old, vcpu->arch.timing_sum_duration[type],
 59			type, vcpu->arch.timing_count_type[type]);
 60	}
 61
 62	/* square sum */
 63	old = vcpu->arch.timing_sum_quad_duration[type];
 64	vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
 65	if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
 66		printk(KERN_ERR"%s - wrap adding sum of squared durations"
 67			" old %lld new %lld type %d exit # of type %d\n",
 68			__func__, old,
 69			vcpu->arch.timing_sum_quad_duration[type],
 70			type, vcpu->arch.timing_count_type[type]);
 71	}
 72
 73	/* set min/max */
 74	if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
 75		vcpu->arch.timing_min_duration[type] = duration;
 76	if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
 77		vcpu->arch.timing_max_duration[type] = duration;
 78
 79	mutex_unlock(&vcpu->arch.exit_timing_lock);
 80}
 81
 82void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
 83{
 84	u64 exit = vcpu->arch.timing_last_exit;
 85	u64 enter = vcpu->arch.timing_last_enter.tv64;
 86
 87	/* save exit time, used next exit when the reenter time is known */
 88	vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
 89
 90	if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
 91		return; /* skip incomplete cycle (e.g. after reset) */
 92
 93	/* update statistics for average and standard deviation */
 94	add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
 95	/* enter -> timing_last_exit is time spent in guest - log this too */
 96	add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
 97			TIMEINGUEST);
 98}
 99
100static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
101	[MMIO_EXITS] =              "MMIO",
 
102	[SIGNAL_EXITS] =            "SIGNAL",
103	[ITLB_REAL_MISS_EXITS] =    "ITLBREAL",
104	[ITLB_VIRT_MISS_EXITS] =    "ITLBVIRT",
105	[DTLB_REAL_MISS_EXITS] =    "DTLBREAL",
106	[DTLB_VIRT_MISS_EXITS] =    "DTLBVIRT",
107	[SYSCALL_EXITS] =           "SYSCALL",
108	[ISI_EXITS] =               "ISI",
109	[DSI_EXITS] =               "DSI",
110	[EMULATED_INST_EXITS] =     "EMULINST",
111	[EMULATED_MTMSRWE_EXITS] =  "EMUL_WAIT",
112	[EMULATED_WRTEE_EXITS] =    "EMUL_WRTEE",
113	[EMULATED_MTSPR_EXITS] =    "EMUL_MTSPR",
114	[EMULATED_MFSPR_EXITS] =    "EMUL_MFSPR",
115	[EMULATED_MTMSR_EXITS] =    "EMUL_MTMSR",
116	[EMULATED_MFMSR_EXITS] =    "EMUL_MFMSR",
117	[EMULATED_TLBSX_EXITS] =    "EMUL_TLBSX",
118	[EMULATED_TLBWE_EXITS] =    "EMUL_TLBWE",
119	[EMULATED_RFI_EXITS] =      "EMUL_RFI",
120	[DEC_EXITS] =               "DEC",
121	[EXT_INTR_EXITS] =          "EXTINT",
122	[HALT_WAKEUP] =             "HALT",
123	[USR_PR_INST] =             "USR_PR_INST",
124	[FP_UNAVAIL] =              "FP_UNAVAIL",
125	[DEBUG_EXITS] =             "DEBUG",
126	[TIMEINGUEST] =             "TIMEINGUEST"
127};
128
129static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
130{
131	struct kvm_vcpu *vcpu = m->private;
132	int i;
133	u64 min, max, sum, sum_quad;
134
135	seq_puts(m, "type	count	min	max	sum	sum_squared\n");
 
136
137	for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
138
139		min = vcpu->arch.timing_min_duration[i];
140		do_div(min, tb_ticks_per_usec);
141		max = vcpu->arch.timing_max_duration[i];
142		do_div(max, tb_ticks_per_usec);
143		sum = vcpu->arch.timing_sum_duration[i];
144		do_div(sum, tb_ticks_per_usec);
145		sum_quad = vcpu->arch.timing_sum_quad_duration[i];
146		do_div(sum_quad, tb_ticks_per_usec);
147
148		seq_printf(m, "%12s	%10d	%10lld	%10lld	%20lld	%20lld\n",
149			kvm_exit_names[i],
150			vcpu->arch.timing_count_type[i],
151			min,
152			max,
153			sum,
154			sum_quad);
155
156	}
157	return 0;
158}
159
160/* Write 'c' to clear the timing statistics. */
161static ssize_t kvmppc_exit_timing_write(struct file *file,
162				       const char __user *user_buf,
163				       size_t count, loff_t *ppos)
164{
165	int err = -EINVAL;
166	char c;
167
168	if (count > 1) {
169		goto done;
170	}
171
172	if (get_user(c, user_buf)) {
173		err = -EFAULT;
174		goto done;
175	}
176
177	if (c == 'c') {
178		struct seq_file *seqf = file->private_data;
179		struct kvm_vcpu *vcpu = seqf->private;
180		/* Write does not affect our buffers previously generated with
181		 * show. seq_file is locked here to prevent races of init with
182		 * a show call */
183		mutex_lock(&seqf->lock);
184		kvmppc_init_timing_stats(vcpu);
185		mutex_unlock(&seqf->lock);
186		err = count;
187	}
188
189done:
190	return err;
191}
192
193static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
194{
195	return single_open(file, kvmppc_exit_timing_show, inode->i_private);
196}
197
198static const struct file_operations kvmppc_exit_timing_fops = {
199	.owner   = THIS_MODULE,
200	.open    = kvmppc_exit_timing_open,
201	.read    = seq_read,
202	.write   = kvmppc_exit_timing_write,
203	.llseek  = seq_lseek,
204	.release = single_release,
205};
206
207int kvmppc_create_vcpu_debugfs_e500(struct kvm_vcpu *vcpu,
208				    struct dentry *debugfs_dentry)
209{
210	debugfs_create_file("timing", 0666, debugfs_dentry,
211			    vcpu, &kvmppc_exit_timing_fops);
212	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213}
v3.5.6
 
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License, version 2, as
  4 * published by the Free Software Foundation.
  5 *
  6 * This program is distributed in the hope that it will be useful,
  7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  9 * GNU General Public License for more details.
 10 *
 11 * You should have received a copy of the GNU General Public License
 12 * along with this program; if not, write to the Free Software
 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 14 *
 15 * Copyright IBM Corp. 2008
 16 *
 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
 19 */
 20
 21#include <linux/kvm_host.h>
 22#include <linux/fs.h>
 23#include <linux/seq_file.h>
 24#include <linux/debugfs.h>
 25#include <linux/uaccess.h>
 26#include <linux/module.h>
 27
 28#include <asm/time.h>
 29#include <asm-generic/div64.h>
 30
 31#include "timing.h"
 32
 33void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
 34{
 35	int i;
 36
 37	/* Take a lock to avoid concurrent updates */
 38	mutex_lock(&vcpu->arch.exit_timing_lock);
 39
 40	vcpu->arch.last_exit_type = 0xDEAD;
 41	for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
 42		vcpu->arch.timing_count_type[i] = 0;
 43		vcpu->arch.timing_max_duration[i] = 0;
 44		vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
 45		vcpu->arch.timing_sum_duration[i] = 0;
 46		vcpu->arch.timing_sum_quad_duration[i] = 0;
 47	}
 48	vcpu->arch.timing_last_exit = 0;
 49	vcpu->arch.timing_exit.tv64 = 0;
 50	vcpu->arch.timing_last_enter.tv64 = 0;
 51
 52	mutex_unlock(&vcpu->arch.exit_timing_lock);
 53}
 54
 55static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
 56{
 57	u64 old;
 58
 59	mutex_lock(&vcpu->arch.exit_timing_lock);
 60
 61	vcpu->arch.timing_count_type[type]++;
 62
 63	/* sum */
 64	old = vcpu->arch.timing_sum_duration[type];
 65	vcpu->arch.timing_sum_duration[type] += duration;
 66	if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
 67		printk(KERN_ERR"%s - wrap adding sum of durations"
 68			" old %lld new %lld type %d exit # of type %d\n",
 69			__func__, old, vcpu->arch.timing_sum_duration[type],
 70			type, vcpu->arch.timing_count_type[type]);
 71	}
 72
 73	/* square sum */
 74	old = vcpu->arch.timing_sum_quad_duration[type];
 75	vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
 76	if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
 77		printk(KERN_ERR"%s - wrap adding sum of squared durations"
 78			" old %lld new %lld type %d exit # of type %d\n",
 79			__func__, old,
 80			vcpu->arch.timing_sum_quad_duration[type],
 81			type, vcpu->arch.timing_count_type[type]);
 82	}
 83
 84	/* set min/max */
 85	if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
 86		vcpu->arch.timing_min_duration[type] = duration;
 87	if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
 88		vcpu->arch.timing_max_duration[type] = duration;
 89
 90	mutex_unlock(&vcpu->arch.exit_timing_lock);
 91}
 92
 93void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
 94{
 95	u64 exit = vcpu->arch.timing_last_exit;
 96	u64 enter = vcpu->arch.timing_last_enter.tv64;
 97
 98	/* save exit time, used next exit when the reenter time is known */
 99	vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
100
101	if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
102		return; /* skip incomplete cycle (e.g. after reset) */
103
104	/* update statistics for average and standard deviation */
105	add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
106	/* enter -> timing_last_exit is time spent in guest - log this too */
107	add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
108			TIMEINGUEST);
109}
110
111static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
112	[MMIO_EXITS] =              "MMIO",
113	[DCR_EXITS] =               "DCR",
114	[SIGNAL_EXITS] =            "SIGNAL",
115	[ITLB_REAL_MISS_EXITS] =    "ITLBREAL",
116	[ITLB_VIRT_MISS_EXITS] =    "ITLBVIRT",
117	[DTLB_REAL_MISS_EXITS] =    "DTLBREAL",
118	[DTLB_VIRT_MISS_EXITS] =    "DTLBVIRT",
119	[SYSCALL_EXITS] =           "SYSCALL",
120	[ISI_EXITS] =               "ISI",
121	[DSI_EXITS] =               "DSI",
122	[EMULATED_INST_EXITS] =     "EMULINST",
123	[EMULATED_MTMSRWE_EXITS] =  "EMUL_WAIT",
124	[EMULATED_WRTEE_EXITS] =    "EMUL_WRTEE",
125	[EMULATED_MTSPR_EXITS] =    "EMUL_MTSPR",
126	[EMULATED_MFSPR_EXITS] =    "EMUL_MFSPR",
127	[EMULATED_MTMSR_EXITS] =    "EMUL_MTMSR",
128	[EMULATED_MFMSR_EXITS] =    "EMUL_MFMSR",
129	[EMULATED_TLBSX_EXITS] =    "EMUL_TLBSX",
130	[EMULATED_TLBWE_EXITS] =    "EMUL_TLBWE",
131	[EMULATED_RFI_EXITS] =      "EMUL_RFI",
132	[DEC_EXITS] =               "DEC",
133	[EXT_INTR_EXITS] =          "EXTINT",
134	[HALT_WAKEUP] =             "HALT",
135	[USR_PR_INST] =             "USR_PR_INST",
136	[FP_UNAVAIL] =              "FP_UNAVAIL",
137	[DEBUG_EXITS] =             "DEBUG",
138	[TIMEINGUEST] =             "TIMEINGUEST"
139};
140
141static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
142{
143	struct kvm_vcpu *vcpu = m->private;
144	int i;
145	u64 min, max, sum, sum_quad;
146
147	seq_printf(m, "%s", "type	count	min	max	sum	sum_squared\n");
148
149
150	for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
151
152		min = vcpu->arch.timing_min_duration[i];
153		do_div(min, tb_ticks_per_usec);
154		max = vcpu->arch.timing_max_duration[i];
155		do_div(max, tb_ticks_per_usec);
156		sum = vcpu->arch.timing_sum_duration[i];
157		do_div(sum, tb_ticks_per_usec);
158		sum_quad = vcpu->arch.timing_sum_quad_duration[i];
159		do_div(sum_quad, tb_ticks_per_usec);
160
161		seq_printf(m, "%12s	%10d	%10lld	%10lld	%20lld	%20lld\n",
162			kvm_exit_names[i],
163			vcpu->arch.timing_count_type[i],
164			min,
165			max,
166			sum,
167			sum_quad);
168
169	}
170	return 0;
171}
172
173/* Write 'c' to clear the timing statistics. */
174static ssize_t kvmppc_exit_timing_write(struct file *file,
175				       const char __user *user_buf,
176				       size_t count, loff_t *ppos)
177{
178	int err = -EINVAL;
179	char c;
180
181	if (count > 1) {
182		goto done;
183	}
184
185	if (get_user(c, user_buf)) {
186		err = -EFAULT;
187		goto done;
188	}
189
190	if (c == 'c') {
191		struct seq_file *seqf = file->private_data;
192		struct kvm_vcpu *vcpu = seqf->private;
193		/* Write does not affect our buffers previously generated with
194		 * show. seq_file is locked here to prevent races of init with
195		 * a show call */
196		mutex_lock(&seqf->lock);
197		kvmppc_init_timing_stats(vcpu);
198		mutex_unlock(&seqf->lock);
199		err = count;
200	}
201
202done:
203	return err;
204}
205
206static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
207{
208	return single_open(file, kvmppc_exit_timing_show, inode->i_private);
209}
210
211static const struct file_operations kvmppc_exit_timing_fops = {
212	.owner   = THIS_MODULE,
213	.open    = kvmppc_exit_timing_open,
214	.read    = seq_read,
215	.write   = kvmppc_exit_timing_write,
216	.llseek  = seq_lseek,
217	.release = single_release,
218};
219
220void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
 
221{
222	static char dbg_fname[50];
223	struct dentry *debugfs_file;
224
225	snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
226		 current->pid, id);
227	debugfs_file = debugfs_create_file(dbg_fname, 0666,
228					kvm_debugfs_dir, vcpu,
229					&kvmppc_exit_timing_fops);
230
231	if (!debugfs_file) {
232		printk(KERN_ERR"%s: error creating debugfs file %s\n",
233			__func__, dbg_fname);
234		return;
235	}
236
237	vcpu->arch.debugfs_exit_timing = debugfs_file;
238}
239
240void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
241{
242	if (vcpu->arch.debugfs_exit_timing) {
243		debugfs_remove(vcpu->arch.debugfs_exit_timing);
244		vcpu->arch.debugfs_exit_timing = NULL;
245	}
246}