Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * x86 APERF/MPERF KHz calculation for
  4 * /sys/.../cpufreq/scaling_cur_freq
  5 *
  6 * Copyright (C) 2017 Intel Corp.
  7 * Author: Len Brown <len.brown@intel.com>
  8 */
  9
 10#include <linux/delay.h>
 11#include <linux/ktime.h>
 12#include <linux/math64.h>
 13#include <linux/percpu.h>
 14#include <linux/cpufreq.h>
 15#include <linux/smp.h>
 16#include <linux/sched/isolation.h>
 
 17
 18#include "cpu.h"
 19
 20struct aperfmperf_sample {
 21	unsigned int	khz;
 
 22	ktime_t	time;
 23	u64	aperf;
 24	u64	mperf;
 25};
 26
 27static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
 28
 29#define APERFMPERF_CACHE_THRESHOLD_MS	10
 30#define APERFMPERF_REFRESH_DELAY_MS	10
 31#define APERFMPERF_STALE_THRESHOLD_MS	1000
 32
 33/*
 34 * aperfmperf_snapshot_khz()
 35 * On the current CPU, snapshot APERF, MPERF, and jiffies
 36 * unless we already did it within 10ms
 37 * calculate kHz, save snapshot
 38 */
 39static void aperfmperf_snapshot_khz(void *dummy)
 40{
 41	u64 aperf, aperf_delta;
 42	u64 mperf, mperf_delta;
 43	struct aperfmperf_sample *s = this_cpu_ptr(&samples);
 44	unsigned long flags;
 45
 46	local_irq_save(flags);
 47	rdmsrl(MSR_IA32_APERF, aperf);
 48	rdmsrl(MSR_IA32_MPERF, mperf);
 49	local_irq_restore(flags);
 50
 51	aperf_delta = aperf - s->aperf;
 52	mperf_delta = mperf - s->mperf;
 53
 54	/*
 55	 * There is no architectural guarantee that MPERF
 56	 * increments faster than we can read it.
 57	 */
 58	if (mperf_delta == 0)
 59		return;
 60
 61	s->time = ktime_get();
 62	s->aperf = aperf;
 63	s->mperf = mperf;
 64	s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
 
 65}
 66
 67static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
 68{
 69	s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
 
 70
 71	/* Don't bother re-computing within the cache threshold time. */
 72	if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
 73		return true;
 74
 75	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
 
 76
 77	/* Return false if the previous iteration was too long ago. */
 78	return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
 79}
 80
 81unsigned int aperfmperf_get_khz(int cpu)
 82{
 83	if (!cpu_khz)
 84		return 0;
 85
 86	if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
 87		return 0;
 88
 89	if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
 90		return 0;
 91
 
 
 
 92	aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
 93	return per_cpu(samples.khz, cpu);
 94}
 95
 96void arch_freq_prepare_all(void)
 97{
 98	ktime_t now = ktime_get();
 99	bool wait = false;
100	int cpu;
101
102	if (!cpu_khz)
103		return;
104
105	if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
106		return;
107
108	for_each_online_cpu(cpu) {
109		if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
110			continue;
 
 
111		if (!aperfmperf_snapshot_cpu(cpu, now, false))
112			wait = true;
113	}
114
115	if (wait)
116		msleep(APERFMPERF_REFRESH_DELAY_MS);
117}
118
119unsigned int arch_freq_get_on_cpu(int cpu)
120{
 
 
121	if (!cpu_khz)
122		return 0;
123
124	if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
125		return 0;
126
127	if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
128		return 0;
129
130	if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
131		return per_cpu(samples.khz, cpu);
132
133	msleep(APERFMPERF_REFRESH_DELAY_MS);
 
 
134	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
135
136	return per_cpu(samples.khz, cpu);
137}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * x86 APERF/MPERF KHz calculation for
  4 * /sys/.../cpufreq/scaling_cur_freq
  5 *
  6 * Copyright (C) 2017 Intel Corp.
  7 * Author: Len Brown <len.brown@intel.com>
  8 */
  9
 10#include <linux/delay.h>
 11#include <linux/ktime.h>
 12#include <linux/math64.h>
 13#include <linux/percpu.h>
 14#include <linux/cpufreq.h>
 15#include <linux/smp.h>
 16#include <linux/sched/isolation.h>
 17#include <linux/rcupdate.h>
 18
 19#include "cpu.h"
 20
 21struct aperfmperf_sample {
 22	unsigned int	khz;
 23	atomic_t	scfpending;
 24	ktime_t	time;
 25	u64	aperf;
 26	u64	mperf;
 27};
 28
 29static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
 30
 31#define APERFMPERF_CACHE_THRESHOLD_MS	10
 32#define APERFMPERF_REFRESH_DELAY_MS	10
 33#define APERFMPERF_STALE_THRESHOLD_MS	1000
 34
 35/*
 36 * aperfmperf_snapshot_khz()
 37 * On the current CPU, snapshot APERF, MPERF, and jiffies
 38 * unless we already did it within 10ms
 39 * calculate kHz, save snapshot
 40 */
 41static void aperfmperf_snapshot_khz(void *dummy)
 42{
 43	u64 aperf, aperf_delta;
 44	u64 mperf, mperf_delta;
 45	struct aperfmperf_sample *s = this_cpu_ptr(&samples);
 46	unsigned long flags;
 47
 48	local_irq_save(flags);
 49	rdmsrl(MSR_IA32_APERF, aperf);
 50	rdmsrl(MSR_IA32_MPERF, mperf);
 51	local_irq_restore(flags);
 52
 53	aperf_delta = aperf - s->aperf;
 54	mperf_delta = mperf - s->mperf;
 55
 56	/*
 57	 * There is no architectural guarantee that MPERF
 58	 * increments faster than we can read it.
 59	 */
 60	if (mperf_delta == 0)
 61		return;
 62
 63	s->time = ktime_get();
 64	s->aperf = aperf;
 65	s->mperf = mperf;
 66	s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
 67	atomic_set_release(&s->scfpending, 0);
 68}
 69
 70static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
 71{
 72	s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
 73	struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
 74
 75	/* Don't bother re-computing within the cache threshold time. */
 76	if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
 77		return true;
 78
 79	if (!atomic_xchg(&s->scfpending, 1) || wait)
 80		smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
 81
 82	/* Return false if the previous iteration was too long ago. */
 83	return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
 84}
 85
 86unsigned int aperfmperf_get_khz(int cpu)
 87{
 88	if (!cpu_khz)
 89		return 0;
 90
 91	if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
 92		return 0;
 93
 94	if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
 95		return 0;
 96
 97	if (rcu_is_idle_cpu(cpu))
 98		return 0; /* Idle CPUs are completely uninteresting. */
 99
100	aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
101	return per_cpu(samples.khz, cpu);
102}
103
104void arch_freq_prepare_all(void)
105{
106	ktime_t now = ktime_get();
107	bool wait = false;
108	int cpu;
109
110	if (!cpu_khz)
111		return;
112
113	if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
114		return;
115
116	for_each_online_cpu(cpu) {
117		if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
118			continue;
119		if (rcu_is_idle_cpu(cpu))
120			continue; /* Idle CPUs are completely uninteresting. */
121		if (!aperfmperf_snapshot_cpu(cpu, now, false))
122			wait = true;
123	}
124
125	if (wait)
126		msleep(APERFMPERF_REFRESH_DELAY_MS);
127}
128
129unsigned int arch_freq_get_on_cpu(int cpu)
130{
131	struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
132
133	if (!cpu_khz)
134		return 0;
135
136	if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
137		return 0;
138
139	if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
140		return 0;
141
142	if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
143		return per_cpu(samples.khz, cpu);
144
145	msleep(APERFMPERF_REFRESH_DELAY_MS);
146	atomic_set(&s->scfpending, 1);
147	smp_mb(); /* ->scfpending before smp_call_function_single(). */
148	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
149
150	return per_cpu(samples.khz, cpu);
151}