Loading...
Note: File does not exist in v4.10.11.
1/*
2 * x86 APERF/MPERF KHz calculation for
3 * /sys/.../cpufreq/scaling_cur_freq
4 *
5 * Copyright (C) 2017 Intel Corp.
6 * Author: Len Brown <len.brown@intel.com>
7 *
8 * This file is licensed under GPLv2.
9 */
10
11#include <linux/delay.h>
12#include <linux/ktime.h>
13#include <linux/math64.h>
14#include <linux/percpu.h>
15#include <linux/smp.h>
16
17#include "cpu.h"
18
19struct aperfmperf_sample {
20 unsigned int khz;
21 ktime_t time;
22 u64 aperf;
23 u64 mperf;
24};
25
26static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
27
28#define APERFMPERF_CACHE_THRESHOLD_MS 10
29#define APERFMPERF_REFRESH_DELAY_MS 10
30#define APERFMPERF_STALE_THRESHOLD_MS 1000
31
32/*
33 * aperfmperf_snapshot_khz()
34 * On the current CPU, snapshot APERF, MPERF, and jiffies
35 * unless we already did it within 10ms
36 * calculate kHz, save snapshot
37 */
38static void aperfmperf_snapshot_khz(void *dummy)
39{
40 u64 aperf, aperf_delta;
41 u64 mperf, mperf_delta;
42 struct aperfmperf_sample *s = this_cpu_ptr(&samples);
43 unsigned long flags;
44
45 local_irq_save(flags);
46 rdmsrl(MSR_IA32_APERF, aperf);
47 rdmsrl(MSR_IA32_MPERF, mperf);
48 local_irq_restore(flags);
49
50 aperf_delta = aperf - s->aperf;
51 mperf_delta = mperf - s->mperf;
52
53 /*
54 * There is no architectural guarantee that MPERF
55 * increments faster than we can read it.
56 */
57 if (mperf_delta == 0)
58 return;
59
60 s->time = ktime_get();
61 s->aperf = aperf;
62 s->mperf = mperf;
63 s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
64}
65
66static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
67{
68 s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
69
70 /* Don't bother re-computing within the cache threshold time. */
71 if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
72 return true;
73
74 smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
75
76 /* Return false if the previous iteration was too long ago. */
77 return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
78}
79
80unsigned int aperfmperf_get_khz(int cpu)
81{
82 if (!cpu_khz)
83 return 0;
84
85 if (!static_cpu_has(X86_FEATURE_APERFMPERF))
86 return 0;
87
88 aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
89 return per_cpu(samples.khz, cpu);
90}
91
92void arch_freq_prepare_all(void)
93{
94 ktime_t now = ktime_get();
95 bool wait = false;
96 int cpu;
97
98 if (!cpu_khz)
99 return;
100
101 if (!static_cpu_has(X86_FEATURE_APERFMPERF))
102 return;
103
104 for_each_online_cpu(cpu)
105 if (!aperfmperf_snapshot_cpu(cpu, now, false))
106 wait = true;
107
108 if (wait)
109 msleep(APERFMPERF_REFRESH_DELAY_MS);
110}
111
112unsigned int arch_freq_get_on_cpu(int cpu)
113{
114 if (!cpu_khz)
115 return 0;
116
117 if (!static_cpu_has(X86_FEATURE_APERFMPERF))
118 return 0;
119
120 if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
121 return per_cpu(samples.khz, cpu);
122
123 msleep(APERFMPERF_REFRESH_DELAY_MS);
124 smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
125
126 return per_cpu(samples.khz, cpu);
127}